text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Treadmill spawn extension.
"""
import os
BUCKETS = 256
MAX_PER_BUCKET = 1000
APPS_DIR = 'apps'
SVSCAN_TREE_DIR = os.path.join(APPS_DIR, 'svscan_tree')
JOBS_DIR = os.path.join(APPS_DIR, 'jobs')
MANIFEST_DIR = 'manifest'
RUNNING_DIR = 'running'
CLEANUP_DIR = 'cleanup'
ZK_MIRROR_DIR = 'zk_mirror'
CELLAPI_SOCK = 'cellapi.sock'
JOB_DATA_DIR = 'data'
JOB_FINISH_TIMEOUT = 0
class SpawnPaths(object):
"""Treadmill spawn manifest watch."""
__slots__ = (
'root',
'jobs_dir',
'manifest_dir',
'running_dir',
'cleanup_dir',
'zk_mirror_dir',
'cellapi_sock',
'svscan_tree_dir',
'buckets',
'env_dir'
)
def __init__(self, root, buckets=BUCKETS):
self.root = root
self.buckets = buckets
self.jobs_dir = os.path.join(self.root, JOBS_DIR)
self.manifest_dir = os.path.join(self.root, MANIFEST_DIR)
self.running_dir = os.path.join(self.root, RUNNING_DIR)
self.cleanup_dir = os.path.join(self.root, CLEANUP_DIR)
self.cellapi_sock = os.path.join(self.root, CELLAPI_SOCK)
self.zk_mirror_dir = os.path.join(self.root, ZK_MIRROR_DIR)
self.svscan_tree_dir = os.path.join(self.root, SVSCAN_TREE_DIR)
self.env_dir = os.path.join(self.svscan_tree_dir, '.s6-svscan', 'env')
|
{
"content_hash": "3657bcae00d14026462bc4b19aed0066",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 27.75,
"alnum_prop": 0.6088588588588588,
"repo_name": "gaocegege/treadmill",
"id": "e69007ab75a832fd9dcf016d171e2cdf59880cb5",
"size": "1332",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev/2017-5-18",
"path": "treadmill/spawn/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "HTML",
"bytes": "3973869"
},
{
"name": "Python",
"bytes": "2127593"
},
{
"name": "R",
"bytes": "2119"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "41660"
}
],
"symlink_target": ""
}
|
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.learning.optimizers import optimizer
class _TestOptimizer(optimizer.Optimizer):
def initialize(self, specs):
del specs
return (0, 1)
def next(self, state, weights, gradients):
del gradients
return state, weights
class GetSetHparamsTest(tf.test.TestCase):
def test_get_hparams_returns_empty_dict(self):
test_optimizer = _TestOptimizer()
state = test_optimizer.initialize(specs=())
hparams = test_optimizer.get_hparams(state)
self.assertEqual(hparams, collections.OrderedDict())
def test_set_hparams_returns_input_state(self):
test_optimizer = _TestOptimizer()
state = test_optimizer.initialize(specs=())
hparams = collections.OrderedDict(a=1, b=2)
updated_state = test_optimizer.set_hparams(state, hparams)
self.assertEqual(state, updated_state)
class OptimizerChecksTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('bad_shape', tf.zeros([2], tf.float32), tf.zeros([3], tf.float32)),
('bad_dtype', tf.zeros([2], tf.float32), tf.zeros([2], tf.float64)),
('bad_structure', [tf.zeros([2]), tf.zeros([3])
], [tf.zeros([2]), [tf.zeros([3])]]),
)
def test_check_weights_gradients_match(self, weights, gradients):
with self.assertRaises(ValueError):
optimizer.check_weights_gradients_match(weights, gradients)
@parameterized.named_parameters(
('bad_shape', tf.zeros([2], tf.float32), tf.zeros([3], tf.float32)),
('bad_dtype', tf.zeros([2], tf.float32), tf.zeros([2], tf.float64)),
('bad_structure', [tf.zeros([2]), tf.zeros([3])
], [tf.zeros([2]), [tf.zeros([3])]]),
)
def test_check_weights_state_match(self, weights, state):
with self.assertRaisesRegex(ValueError, 'foo'):
optimizer.check_weights_state_match(weights, state, name='foo')
def test_handle_indexed_slices_single_value(self):
gradients = tf.IndexedSlices(
values=tf.constant([[0.0, 1.0], [1.0, 3.0]]),
indices=tf.constant([0, 2]),
dense_shape=tf.constant([4, 2]))
gradients = optimizer.handle_indexed_slices_gradients(gradients)
self.assertIsInstance(gradients, tf.Tensor)
self.assertAllClose([[0.0, 1.0], [0.0, 0.0], [1.0, 3.0], [0.0, 0.0]],
gradients)
def test_handle_indexed_slices_struct(self):
tensor = tf.constant([4.0, 5.5])
slices = tf.IndexedSlices(
values=tf.constant([[0.0, 1.0], [1.0, 3.0]]),
indices=tf.constant([0, 2]),
dense_shape=tf.constant([4, 2]))
gradients = [tensor, slices]
gradients = optimizer.handle_indexed_slices_gradients(gradients)
self.assertIsInstance(gradients[0], tf.Tensor)
self.assertIsInstance(gradients[1], tf.Tensor)
self.assertAllClose(
[[4.0, 5.5], [[0.0, 1.0], [0.0, 0.0], [1.0, 3.0], [0.0, 0.0]]],
gradients)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "6329b4f2230e6540ec47fb2990c2f11a",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 74,
"avg_line_length": 36.05952380952381,
"alnum_prop": 0.6447672499174645,
"repo_name": "tensorflow/federated",
"id": "cf3e64d6c0466bd59505a7179b10755357c16d65",
"size": "3629",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/learning/optimizers/optimizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
}
|
def convert(input_grid):
pass
|
{
"content_hash": "b171995559b73c5df7f517d72e4f97c0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 17,
"alnum_prop": 0.6764705882352942,
"repo_name": "pheanex/xpython",
"id": "5d753d548dce1a0f04d547bdb8e9aa99339816ed",
"size": "34",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "exercises/ocr-numbers/ocr_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410838"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
}
|
"""Support for ISY994 covers."""
import logging
from typing import Callable
from homeassistant.components.cover import DOMAIN, CoverDevice
from homeassistant.const import (
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNKNOWN,
)
from homeassistant.helpers.typing import ConfigType
from . import ISY994_NODES, ISY994_PROGRAMS, ISYDevice
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
0: STATE_CLOSED,
101: STATE_UNKNOWN,
102: "stopped",
103: STATE_CLOSING,
104: STATE_OPENING,
}
def setup_platform(
hass, config: ConfigType, add_entities: Callable[[list], None], discovery_info=None
):
"""Set up the ISY994 cover platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYCoverDevice(node))
for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYCoverProgram(name, status, actions))
add_entities(devices)
class ISYCoverDevice(ISYDevice, CoverDevice):
"""Representation of an ISY994 cover device."""
@property
def current_cover_position(self) -> int:
"""Return the current cover position."""
if self.is_unknown() or self.value is None:
return None
return sorted((0, self.value, 100))[1]
@property
def is_closed(self) -> bool:
"""Get whether the ISY994 cover device is closed."""
return self.state == STATE_CLOSED
@property
def state(self) -> str:
"""Get the state of the ISY994 cover device."""
if self.is_unknown():
return None
return VALUE_TO_STATE.get(self.value, STATE_OPEN)
def open_cover(self, **kwargs) -> None:
"""Send the open cover command to the ISY994 cover device."""
if not self._node.on(val=100):
_LOGGER.error("Unable to open the cover")
def close_cover(self, **kwargs) -> None:
"""Send the close cover command to the ISY994 cover device."""
if not self._node.off():
_LOGGER.error("Unable to close the cover")
class ISYCoverProgram(ISYCoverDevice):
"""Representation of an ISY994 cover program."""
def __init__(self, name: str, node: object, actions: object) -> None:
"""Initialize the ISY994 cover program."""
super().__init__(node)
self._name = name
self._actions = actions
@property
def state(self) -> str:
"""Get the state of the ISY994 cover program."""
return STATE_CLOSED if bool(self.value) else STATE_OPEN
def open_cover(self, **kwargs) -> None:
"""Send the open cover command to the ISY994 cover program."""
if not self._actions.runThen():
_LOGGER.error("Unable to open the cover")
def close_cover(self, **kwargs) -> None:
"""Send the close cover command to the ISY994 cover program."""
if not self._actions.runElse():
_LOGGER.error("Unable to close the cover")
|
{
"content_hash": "a53b82b7f8a7dcb38875482436f7e45b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 87,
"avg_line_length": 30.721649484536083,
"alnum_prop": 0.6359060402684564,
"repo_name": "leppa/home-assistant",
"id": "f5e052f6926d447af8349e3deae5441bfc835541",
"size": "2980",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/isy994/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
from random import random
from scipy.interpolate import lagrange
def func(x):
return 5*x*x - 3*x + 2
#Define the true curve
x_true = np.arange(-2,2.1,0.1)
y_true = func(x_true)
#Define 5 "data points" from this curve, with noise
x_data = np.arange(-2,3) #Define a set of five x points
y_data = func(x_data) #Define the y points from an explicit function
y_data += [(random()*2-1)*2 for i in range(len(y_data))] #Add some random noise
#Define the lagrangian interpolated curve
x_interp = np.arange(-2,2.1,0.1)
poly = lagrange(x_data, y_data) #Lagrange returns a function that can be called
y_interp = poly(x_interp)
#Now plot everything up
plt.figure(1)
plt.plot(x_true,y_true,'-',lw=2,label = 'True Curve')
plt.plot(x_data,y_data,' o', label = '"Measured Data"')
plt.plot(x_interp,y_interp,'-k', label = 'Lagrange Interpolation')
plt.xlim(-2.5,2.5)
plt.legend(loc=0)
plt.show(block = False)
|
{
"content_hash": "d895d1e5cf10ec1763fc88318541d2e8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 33.1,
"alnum_prop": 0.67472306143001,
"repo_name": "BU-PyCon/Meeting-3",
"id": "48a5c4449bb048a2e2944c905e1c0077b68fb756",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programs/lagrange.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20305"
}
],
"symlink_target": ""
}
|
from __future__ import division
import logging
import os
import signal
import sys
import time
from optparse import OptionParser
from util import local_libpath
sys.path.insert(0, local_libpath())
from thrift.protocol import TProtocol, TProtocolDecorator
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
class TestHandler(object):
def testVoid(self):
if options.verbose > 1:
logging.info('testVoid()')
def testString(self, str):
if options.verbose > 1:
logging.info('testString(%s)' % str)
return str
def testBool(self, boolean):
if options.verbose > 1:
logging.info('testBool(%s)' % str(boolean).lower())
return boolean
def testByte(self, byte):
if options.verbose > 1:
logging.info('testByte(%d)' % byte)
return byte
def testI16(self, i16):
if options.verbose > 1:
logging.info('testI16(%d)' % i16)
return i16
def testI32(self, i32):
if options.verbose > 1:
logging.info('testI32(%d)' % i32)
return i32
def testI64(self, i64):
if options.verbose > 1:
logging.info('testI64(%d)' % i64)
return i64
def testDouble(self, dub):
if options.verbose > 1:
logging.info('testDouble(%f)' % dub)
return dub
def testBinary(self, thing):
if options.verbose > 1:
logging.info('testBinary()') # TODO: hex output
return thing
def testStruct(self, thing):
if options.verbose > 1:
logging.info('testStruct({%s, %s, %s, %s})' % (thing.string_thing, thing.byte_thing, thing.i32_thing, thing.i64_thing))
return thing
def testException(self, arg):
# if options.verbose > 1:
logging.info('testException(%s)' % arg)
if arg == 'Xception':
raise Xception(errorCode=1001, message=arg)
elif arg == 'TException':
raise TException(message='This is a TException')
def testMultiException(self, arg0, arg1):
if options.verbose > 1:
logging.info('testMultiException(%s, %s)' % (arg0, arg1))
if arg0 == 'Xception':
raise Xception(errorCode=1001, message='This is an Xception')
elif arg0 == 'Xception2':
raise Xception2(
errorCode=2002,
struct_thing=Xtruct(string_thing='This is an Xception2'))
return Xtruct(string_thing=arg1)
def testOneway(self, seconds):
if options.verbose > 1:
logging.info('testOneway(%d) => sleeping...' % seconds)
time.sleep(seconds / 3) # be quick
if options.verbose > 1:
logging.info('done sleeping')
def testNest(self, thing):
if options.verbose > 1:
logging.info('testNest(%s)' % thing)
return thing
def testMap(self, thing):
if options.verbose > 1:
logging.info('testMap(%s)' % thing)
return thing
def testStringMap(self, thing):
if options.verbose > 1:
logging.info('testStringMap(%s)' % thing)
return thing
def testSet(self, thing):
if options.verbose > 1:
logging.info('testSet(%s)' % thing)
return thing
def testList(self, thing):
if options.verbose > 1:
logging.info('testList(%s)' % thing)
return thing
def testEnum(self, thing):
if options.verbose > 1:
logging.info('testEnum(%s)' % thing)
return thing
def testTypedef(self, thing):
if options.verbose > 1:
logging.info('testTypedef(%s)' % thing)
return thing
def testMapMap(self, thing):
if options.verbose > 1:
logging.info('testMapMap(%s)' % thing)
return {
-4: {
-4: -4,
-3: -3,
-2: -2,
-1: -1,
},
4: {
4: 4,
3: 3,
2: 2,
1: 1,
},
}
def testInsanity(self, argument):
if options.verbose > 1:
logging.info('testInsanity(%s)' % argument)
return {
1: {
2: argument,
3: argument,
},
2: {6: Insanity()},
}
def testMulti(self, arg0, arg1, arg2, arg3, arg4, arg5):
if options.verbose > 1:
logging.info('testMulti(%s, %s, %s, %s, %s, %s)' % (arg0, arg1, arg2, arg3, arg4, arg5))
return Xtruct(string_thing='Hello2',
byte_thing=arg0, i32_thing=arg1, i64_thing=arg2)
class SecondHandler(object):
def secondtestString(self, argument):
return "testString(\"" + argument + "\")"
# LAST_SEQID is a global because we have one transport and multiple protocols
# running on it (when multiplexed)
LAST_SEQID = None
class TPedanticSequenceIdProtocolWrapper(TProtocolDecorator.TProtocolDecorator):
"""
Wraps any protocol with sequence ID checking: looks for outbound
uniqueness as well as request/response alignment.
"""
def __init__(self, protocol):
# TProtocolDecorator.__new__ does all the heavy lifting
pass
def readMessageBegin(self):
global LAST_SEQID
(name, type, seqid) =\
super(TPedanticSequenceIdProtocolWrapper, self).readMessageBegin()
if LAST_SEQID is not None and LAST_SEQID == seqid:
raise TProtocol.TProtocolException(
TProtocol.TProtocolException.INVALID_DATA,
"We received the same seqid {0} twice in a row".format(seqid))
LAST_SEQID = seqid
return (name, type, seqid)
def make_pedantic(proto):
""" Wrap a protocol in the pedantic sequence ID wrapper. """
# NOTE: this is disabled for now as many clients send seqid
# of zero and that is okay, need a way to identify
# clients that MUST send seqid unique to function right
# or just force all implementations to send unique seqids (preferred)
return proto # TPedanticSequenceIdProtocolWrapper(proto)
class TPedanticSequenceIdProtocolFactory(TProtocol.TProtocolFactory):
def __init__(self, encapsulated):
super(TPedanticSequenceIdProtocolFactory, self).__init__()
self.encapsulated = encapsulated
def getProtocol(self, trans):
return make_pedantic(self.encapsulated.getProtocol(trans))
def main(options):
# common header allowed client types
allowed_client_types = [
THeaderTransport.THeaderClientType.HEADERS,
THeaderTransport.THeaderClientType.FRAMED_BINARY,
THeaderTransport.THeaderClientType.UNFRAMED_BINARY,
THeaderTransport.THeaderClientType.FRAMED_COMPACT,
THeaderTransport.THeaderClientType.UNFRAMED_COMPACT,
]
# set up the protocol factory form the --protocol option
prot_factories = {
'accel': TBinaryProtocol.TBinaryProtocolAcceleratedFactory(),
'multia': TBinaryProtocol.TBinaryProtocolAcceleratedFactory(),
'accelc': TCompactProtocol.TCompactProtocolAcceleratedFactory(),
'multiac': TCompactProtocol.TCompactProtocolAcceleratedFactory(),
'binary': TPedanticSequenceIdProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()),
'multi': TPedanticSequenceIdProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()),
'compact': TCompactProtocol.TCompactProtocolFactory(),
'multic': TCompactProtocol.TCompactProtocolFactory(),
'header': THeaderProtocol.THeaderProtocolFactory(allowed_client_types),
'multih': THeaderProtocol.THeaderProtocolFactory(allowed_client_types),
'json': TJSONProtocol.TJSONProtocolFactory(),
'multij': TJSONProtocol.TJSONProtocolFactory(),
}
pfactory = prot_factories.get(options.proto, None)
if pfactory is None:
raise AssertionError('Unknown --protocol option: %s' % options.proto)
try:
pfactory.string_length_limit = options.string_limit
pfactory.container_length_limit = options.container_limit
except Exception:
# Ignore errors for those protocols that does not support length limit
pass
# get the server type (TSimpleServer, TNonblockingServer, etc...)
if len(args) > 1:
raise AssertionError('Only one server type may be specified, not multiple types.')
server_type = args[0]
if options.trans == 'http':
server_type = 'THttpServer'
# Set up the handler and processor objects
handler = TestHandler()
processor = ThriftTest.Processor(handler)
if options.proto.startswith('multi'):
secondHandler = SecondHandler()
secondProcessor = SecondService.Processor(secondHandler)
multiplexedProcessor = TMultiplexedProcessor()
multiplexedProcessor.registerDefault(processor)
multiplexedProcessor.registerProcessor('ThriftTest', processor)
multiplexedProcessor.registerProcessor('SecondService', secondProcessor)
processor = multiplexedProcessor
global server
# Handle THttpServer as a special case
if server_type == 'THttpServer':
if options.ssl:
__certfile = os.path.join(os.path.dirname(SCRIPT_DIR), "keys", "server.crt")
__keyfile = os.path.join(os.path.dirname(SCRIPT_DIR), "keys", "server.key")
server = THttpServer.THttpServer(processor, ('', options.port), pfactory, cert_file=__certfile, key_file=__keyfile)
else:
server = THttpServer.THttpServer(processor, ('', options.port), pfactory)
server.serve()
sys.exit(0)
# set up server transport and transport factory
abs_key_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'keys', 'server.pem')
host = None
if options.ssl:
from thrift.transport import TSSLSocket
transport = TSSLSocket.TSSLServerSocket(host, options.port, certfile=abs_key_path)
else:
transport = TSocket.TServerSocket(host, options.port, options.domain_socket)
tfactory = TTransport.TBufferedTransportFactory()
if options.trans == 'buffered':
tfactory = TTransport.TBufferedTransportFactory()
elif options.trans == 'framed':
tfactory = TTransport.TFramedTransportFactory()
elif options.trans == '':
raise AssertionError('Unknown --transport option: %s' % options.trans)
else:
tfactory = TTransport.TBufferedTransportFactory()
# if --zlib, then wrap server transport, and use a different transport factory
if options.zlib:
transport = TZlibTransport.TZlibTransport(transport) # wrap with zlib
tfactory = TZlibTransport.TZlibTransportFactory()
# do server-specific setup here:
if server_type == "TNonblockingServer":
server = TNonblockingServer.TNonblockingServer(processor, transport, inputProtocolFactory=pfactory)
elif server_type == "TProcessPoolServer":
import signal
from thrift.server import TProcessPoolServer
server = TProcessPoolServer.TProcessPoolServer(processor, transport, tfactory, pfactory)
server.setNumWorkers(5)
def set_alarm():
def clean_shutdown(signum, frame):
for worker in server.workers:
if options.verbose > 0:
logging.info('Terminating worker: %s' % worker)
worker.terminate()
if options.verbose > 0:
logging.info('Requesting server to stop()')
try:
server.stop()
except Exception:
pass
signal.signal(signal.SIGALRM, clean_shutdown)
signal.alarm(4)
set_alarm()
else:
# look up server class dynamically to instantiate server
ServerClass = getattr(TServer, server_type)
server = ServerClass(processor, transport, tfactory, pfactory)
# enter server main loop
server.serve()
def exit_gracefully(signum, frame):
print("SIGINT received\n")
server.shutdown() # doesn't work properly, yet
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, exit_gracefully)
parser = OptionParser()
parser.add_option('--libpydir', type='string', dest='libpydir',
help='include this directory to sys.path for locating library code')
parser.add_option('--genpydir', type='string', dest='genpydir',
default='gen-py',
help='include this directory to sys.path for locating generated code')
parser.add_option("--port", type="int", dest="port",
help="port number for server to listen on")
parser.add_option("--zlib", action="store_true", dest="zlib",
help="use zlib wrapper for compressed transport")
parser.add_option("--ssl", action="store_true", dest="ssl",
help="use SSL for encrypted transport")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option('--protocol', dest="proto", type="string",
help="protocol to use, one of: accel, accelc, binary, compact, json, multi, multia, multiac, multic, multih, multij")
parser.add_option('--transport', dest="trans", type="string",
help="transport to use, one of: buffered, framed, http")
parser.add_option('--domain-socket', dest="domain_socket", type="string",
help="Unix domain socket path")
parser.add_option('--container-limit', dest='container_limit', type='int', default=None)
parser.add_option('--string-limit', dest='string_limit', type='int', default=None)
parser.set_defaults(port=9090, verbose=1, proto='binary', transport='buffered')
options, args = parser.parse_args()
# Print TServer log to stdout so that the test-runner can redirect it to log files
logging.basicConfig(level=options.verbose)
sys.path.insert(0, os.path.join(SCRIPT_DIR, options.genpydir))
from ThriftTest import ThriftTest, SecondService
from ThriftTest.ttypes import Xtruct, Xception, Xception2, Insanity
from thrift.Thrift import TException
from thrift.TMultiplexedProcessor import TMultiplexedProcessor
from thrift.transport import THeaderTransport
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import TZlibTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
from thrift.protocol import TJSONProtocol
from thrift.server import TServer, TNonblockingServer, THttpServer
sys.exit(main(options))
|
{
"content_hash": "659044b1cc7d14784febde67664362ba",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 139,
"avg_line_length": 38.119289340101524,
"alnum_prop": 0.6296690858246221,
"repo_name": "nsuke/thrift",
"id": "81ae1ad62eeceb55e743be943dc6975d11fa90a5",
"size": "15827",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "test/py/TestServer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34900"
},
{
"name": "C",
"bytes": "1068458"
},
{
"name": "C#",
"bytes": "531415"
},
{
"name": "C++",
"bytes": "4752353"
},
{
"name": "CMake",
"bytes": "128611"
},
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "D",
"bytes": "662065"
},
{
"name": "Dart",
"bytes": "181474"
},
{
"name": "Dockerfile",
"bytes": "66393"
},
{
"name": "Emacs Lisp",
"bytes": "5361"
},
{
"name": "Erlang",
"bytes": "323055"
},
{
"name": "Go",
"bytes": "707964"
},
{
"name": "HTML",
"bytes": "36484"
},
{
"name": "Haxe",
"bytes": "319989"
},
{
"name": "Java",
"bytes": "1379918"
},
{
"name": "JavaScript",
"bytes": "456805"
},
{
"name": "Kotlin",
"bytes": "60847"
},
{
"name": "Lex",
"bytes": "10761"
},
{
"name": "Lua",
"bytes": "81630"
},
{
"name": "M4",
"bytes": "172618"
},
{
"name": "Makefile",
"bytes": "216507"
},
{
"name": "OCaml",
"bytes": "39269"
},
{
"name": "PHP",
"bytes": "353558"
},
{
"name": "Pascal",
"bytes": "594372"
},
{
"name": "Perl",
"bytes": "133070"
},
{
"name": "Python",
"bytes": "509091"
},
{
"name": "Ruby",
"bytes": "400013"
},
{
"name": "Rust",
"bytes": "362681"
},
{
"name": "Shell",
"bytes": "61391"
},
{
"name": "Smalltalk",
"bytes": "22944"
},
{
"name": "Swift",
"bytes": "165395"
},
{
"name": "Thrift",
"bytes": "425010"
},
{
"name": "TypeScript",
"bytes": "61760"
},
{
"name": "Vim script",
"bytes": "2846"
},
{
"name": "Yacc",
"bytes": "26413"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
class Wisconsin(UnitedStates):
"""Wisconsin"""
include_columbus_day = False
include_federal_presidents_day = False
include_christmas_eve = True
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(12, 31, "New Years Eve"),
)
|
{
"content_hash": "8641823a19762ecf6058c1220b4f65d6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 28.357142857142858,
"alnum_prop": 0.6498740554156172,
"repo_name": "sayoun/workalendar",
"id": "db7c35ce3b0508f5cfecad51bf0f8cacf8f2a466",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workalendar/usa/wisconsin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "383844"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render
from django.template import RequestContext
from fle_site.apps.articles.models import Article
def blog_filter_page(request, template='articles/blog_homepage.html'):
"""Display all blog posts, and filters"""
blog_posts = Article.objects.live().order_by('-publish_date')
tags = []
for post in blog_posts:
tags += post.tags.all()
tags = set(tags)
# Create a dict of posts by date for filtering by date
sorted_posts = {}
for post in blog_posts:
year = post.publish_date.year
month = post.publish_date.strftime("%B")
if not sorted_posts.get(year):
sorted_posts[year] = {}
if not sorted_posts[year].get(month):
sorted_posts[year][month] = []
sorted_posts[year][month].append(post)
# Now sort each post list
for year, months in sorted_posts.items():
for month in months:
sorted_posts[year][month] = sorted(sorted_posts[year][month], key=lambda x: x.publish_date, reverse=True)
# Finally sort by year
posts_by_date = OrderedDict(sorted(sorted_posts.items(), reverse=True))
variables = {
'posts': blog_posts,
'tags': tags,
'posts_by_date': posts_by_date,
}
response = render(request, template, variables)
return response
def display_article(request, year=None, slug=None, template='articles/article_detail.html'):
"""Displays a single article."""
try:
article = Article.objects.live(user=request.user).get(publish_date__year=year, slug=slug)
except Article.DoesNotExist:
raise Http404
# make sure the user is logged in if the article requires it
if article.login_required and not request.user.is_authenticated():
return HttpResponseRedirect(reverse('auth_login') + '?next=' + request.path)
# Render top 5 most recent posts in the sidebar
blog_posts = Article.objects.live().order_by('-publish_date')
variables = RequestContext(request, {
'article': article,
'disqus_forum': getattr(settings, 'DISQUS_FORUM_SHORTNAME', None),
})
response = render(request, template, variables)
return response
|
{
"content_hash": "c39844806283ea076a7e41a63787ba92",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 117,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6699029126213593,
"repo_name": "jtamiace/fle-home",
"id": "71c5831f13a01b0de3751d39561b1fbd9f9df016",
"size": "2369",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "fle_site/apps/articles/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89436"
},
{
"name": "HTML",
"bytes": "260231"
},
{
"name": "JavaScript",
"bytes": "785155"
},
{
"name": "Python",
"bytes": "277441"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
# List of file paths containing BUILD files that should not be included for the
# pip smoke test.
BUILD_DENYLIST = [
"tensorflow/lite",
"tensorflow/compiler/mlir/lite",
"tensorflow/python/kernel_tests/signal",
"tensorflow/examples",
"tensorflow/tools/android",
"tensorflow/python/eager/benchmarks",
]
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and not any(x in root for x in BUILD_DENYLIST)):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python - attr(manual|pno_pip)
targets = " + ".join(python_targets)
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# TODO(amitpatankar): Clean up denylist.
# List of dependencies that should not included in the pip package.
DEPENDENCY_DENYLIST = [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/cc/saved_model:saved_model_test_files",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/python:test_ops_2",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/core:image_testdata",
"//tensorflow/core/lib/lmdb/testdata:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python/debug:grpc_tensorflow_server.par",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python:framework/test_file_system.so",
"//tensorflow/python:util_nest_test_main_lib",
# lite
"//tensorflow/lite/experimental/examples/lstm:rnn_cell",
"//tensorflow/lite/experimental/examples/lstm:rnn_cell.py",
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/lite/python:interpreter",
"//tensorflow/lite/python:interpreter_test",
"//tensorflow/lite/python:interpreter.py",
"//tensorflow/lite/python:interpreter_test.py",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode("utf-8")
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode("utf-8")
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = [
"_test", "_test.py", "_test_gpu", "_test_gpu.py", "_test_lib"
]
ignored_files_count = 0
denylisted_dependencies_count = len(DEPENDENCY_DENYLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files_count += 1
# Check if the dependency is in the pip package, the dependency denylist,
# or should be ignored because of its file extension.
if not (ignore or dependency in pip_package_dependencies_list or
dependency in DEPENDENCY_DENYLIST):
missing_dependencies.append(dependency)
print("Ignored files count: %d" % ignored_files_count)
print("Denylisted dependencies count: %d" % denylisted_dependencies_count)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
|
{
"content_hash": "2506d3fe41c3e2ec85ad4d60bc79a559",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 121,
"avg_line_length": 38.578947368421055,
"alnum_prop": 0.695164468697893,
"repo_name": "aam-at/tensorflow",
"id": "60e1ae5b65623a84dbb034c18793075dc7ea00f6",
"size": "7286",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/tools/pip_package/pip_smoke_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='address_book_protobuf.proto',
package='BenchmarkProtobuf',
serialized_pb='\n\x1b\x61\x64\x64ress_book_protobuf.proto\x12\x11\x42\x65nchmarkProtobuf\"\xf7\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\n\n\x02id\x18\x02 \x02(\x05\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12\x34\n\x05phone\x18\x04 \x03(\x0b\x32%.BenchmarkProtobuf.Person.PhoneNumber\x1aV\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x02(\t\x12\x37\n\x04type\x18\x02 \x01(\x0e\x32#.BenchmarkProtobuf.Person.PhoneType:\x04HOME\"6\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\x12\t\n\x05OTHER\x10\x03\"8\n\x0b\x41\x64\x64ressBook\x12)\n\x06person\x18\x01 \x03(\x0b\x32\x19.BenchmarkProtobuf.Person')
_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
name='PhoneType',
full_name='BenchmarkProtobuf.Person.PhoneType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MOBILE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HOME', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORK', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=244,
serialized_end=298,
)
_PERSON_PHONENUMBER = _descriptor.Descriptor(
name='PhoneNumber',
full_name='BenchmarkProtobuf.Person.PhoneNumber',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='BenchmarkProtobuf.Person.PhoneNumber.number', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='BenchmarkProtobuf.Person.PhoneNumber.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=156,
serialized_end=242,
)
_PERSON = _descriptor.Descriptor(
name='Person',
full_name='BenchmarkProtobuf.Person',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='BenchmarkProtobuf.Person.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='BenchmarkProtobuf.Person.id', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='email', full_name='BenchmarkProtobuf.Person.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phone', full_name='BenchmarkProtobuf.Person.phone', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PERSON_PHONENUMBER, ],
enum_types=[
_PERSON_PHONETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=51,
serialized_end=298,
)
_ADDRESSBOOK = _descriptor.Descriptor(
name='AddressBook',
full_name='BenchmarkProtobuf.AddressBook',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='person', full_name='BenchmarkProtobuf.AddressBook.person', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=300,
serialized_end=356,
)
_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
_PERSON_PHONENUMBER.containing_type = _PERSON;
_PERSON.fields_by_name['phone'].message_type = _PERSON_PHONENUMBER
_PERSON_PHONETYPE.containing_type = _PERSON;
_ADDRESSBOOK.fields_by_name['person'].message_type = _PERSON
DESCRIPTOR.message_types_by_name['Person'] = _PERSON
DESCRIPTOR.message_types_by_name['AddressBook'] = _ADDRESSBOOK
class Person(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class PhoneNumber(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PERSON_PHONENUMBER
# @@protoc_insertion_point(class_scope:BenchmarkProtobuf.Person.PhoneNumber)
DESCRIPTOR = _PERSON
# @@protoc_insertion_point(class_scope:BenchmarkProtobuf.Person)
class AddressBook(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ADDRESSBOOK
# @@protoc_insertion_point(class_scope:BenchmarkProtobuf.AddressBook)
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "3810707cc62402e2f467d4a0f6f0c1d7",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 654,
"avg_line_length": 33.92972972972973,
"alnum_prop": 0.7067070256491955,
"repo_name": "tongsucn/protobuf_thrift_benchmark",
"id": "f8375cd02327cba101418d5ddf554dbb983fde26",
"size": "6375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/address_book_protobuf_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41046"
},
{
"name": "Python",
"bytes": "32526"
}
],
"symlink_target": ""
}
|
default_app_config = 'apps.auth.apps.AuthConfig'
|
{
"content_hash": "04408c5e9471238a5bab5ca18916f6cc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 48,
"alnum_prop": 0.7916666666666666,
"repo_name": "ycheng-aa/qr_server",
"id": "518062b98e5e6024507e35496f9d556822ea5763",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/auth/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31680"
}
],
"symlink_target": ""
}
|
import os.path
# ## Import required libraries
import pandas as pd
from sklearn.metrics import accuracy_score
from ludwig.api import LudwigModel
from ludwig.datasets import mnist
# create data set for predictions
test_data = {'image_path': [], 'label': []}
dataset = mnist.Mnist()
test_dir = os.path.join(dataset.processed_dataset_path, 'testing')
for label in os.listdir(test_dir):
files = os.listdir(os.path.join(test_dir, label))
test_data['image_path'] += [os.path.join(test_dir, label, f) for f in
files]
test_data['label'] += len(files) * [label]
# collect data into a data frame
test_df = pd.DataFrame(test_data)
print(test_df.head())
# retrieve a trained model
model = LudwigModel.load('./results/multiple_experiment_Option3/model')
# make predictions
pred_df, _ = model.predict(dataset=test_df)
print(pred_df.head())
# print accuracy on test data set
print('predicted accuracy', accuracy_score(test_df['label'], pred_df['label_predictions']))
|
{
"content_hash": "96b1d585cf1bd97c82697c67842880eb",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 91,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.6975124378109453,
"repo_name": "uber/ludwig",
"id": "2af619a4e9e14d62c41c8412bcd19bfba81ac515",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mnist/assess_model_performance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "466847"
},
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "292184"
},
{
"name": "JavaScript",
"bytes": "85725"
},
{
"name": "Python",
"bytes": "1241008"
}
],
"symlink_target": ""
}
|
try:
my_id = instance_id()
except NameError:
from serverondemand.xen import instance_id, instance_region
my_id = instance_id()
import os
import requests
username = RS_USERNAME
apikey = RS_APIKEY
payload = '{"auth": {"RAX-KSKEY:apiKeyCredentials": {"username": "%s", "apiKey": "%s"}}}' % (username, apikey)
headers = {'content-type': 'application/json'}
r = requests.post('https://identity.api.rackspacecloud.com/v2.0/tokens', data = payload, headers = headers)
token = r.json()['access']['token']['id']
tenant = r.json()['access']['token']['tenant']['name']
server_url = 'https://%s.servers.api.rackspacecloud.com/v2/%s/servers/%s' % (instance_region(), tenant, my_id)
headers = {'x-auth-token': token}
requests.delete(server_url, headers = headers)
|
{
"content_hash": "40f1096eec47ec9d53803b69f1f5e3c1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 110,
"avg_line_length": 36.476190476190474,
"alnum_prop": 0.6801566579634465,
"repo_name": "adregner/server-on-demand",
"id": "ea4740e5326c76e845a763ba7bc41dd14a778a3c",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serverondemand/resources/demand_done.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12859"
},
{
"name": "Shell",
"bytes": "353"
}
],
"symlink_target": ""
}
|
"""
homeassistant.components.media_player.denon
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides an interface to Denon Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.denon/
"""
import logging
import telnetlib
from homeassistant.components.media_player import (
DOMAIN, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
MediaPlayerDevice)
from homeassistant.const import CONF_HOST, STATE_OFF, STATE_ON, STATE_UNKNOWN
_LOGGER = logging.getLogger(__name__)
SUPPORT_DENON = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Denon platform. """
if not config.get(CONF_HOST):
_LOGGER.error(
"Missing required configuration items in %s: %s",
DOMAIN,
CONF_HOST)
return False
denon = DenonDevice(
config.get("name", "Music station"),
config.get("host")
)
if denon.update():
add_devices([denon])
return True
else:
return False
class DenonDevice(MediaPlayerDevice):
""" Represents a Denon device. """
# pylint: disable=too-many-public-methods, abstract-method
def __init__(self, name, host):
self._name = name
self._host = host
self._pwstate = "PWSTANDBY"
self._volume = 0
self._muted = False
self._mediasource = ""
@classmethod
def telnet_request(cls, telnet, command):
""" Executes `command` and returns the response. """
telnet.write(command.encode("ASCII") + b"\r")
return telnet.read_until(b"\r", timeout=0.2).decode("ASCII").strip()
def telnet_command(self, command):
""" Establishes a telnet connection and sends `command`. """
telnet = telnetlib.Telnet(self._host)
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
def update(self):
try:
telnet = telnetlib.Telnet(self._host)
except ConnectionRefusedError:
return False
self._pwstate = self.telnet_request(telnet, "PW?")
# PW? sends also SISTATUS, which is not interesting
telnet.read_until(b"\r", timeout=0.2)
volume_str = self.telnet_request(telnet, "MV?")[len("MV"):]
self._volume = int(volume_str) / 60
self._muted = (self.telnet_request(telnet, "MU?") == "MUON")
self._mediasource = self.telnet_request(telnet, "SI?")[len("SI"):]
telnet.close()
return True
@property
def name(self):
""" Returns the name of the device. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
if self._pwstate == "PWSTANDBY":
return STATE_OFF
if self._pwstate == "PWON":
return STATE_ON
return STATE_UNKNOWN
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
return self._volume
@property
def is_volume_muted(self):
""" Boolean if volume is currently muted. """
return self._muted
@property
def media_title(self):
""" Current media source. """
return self._mediasource
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_DENON
def turn_off(self):
""" turn_off media player. """
self.telnet_command("PWSTANDBY")
def volume_up(self):
""" volume_up media player. """
self.telnet_command("MVUP")
def volume_down(self):
""" volume_down media player. """
self.telnet_command("MVDOWN")
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
# 60dB max
self.telnet_command("MV" + str(round(volume * 60)).zfill(2))
def mute_volume(self, mute):
""" mute (true) or unmute (false) media player. """
self.telnet_command("MU" + ("ON" if mute else "OFF"))
def media_play(self):
""" media_play media player. """
self.telnet_command("NS9A")
def media_pause(self):
""" media_pause media player. """
self.telnet_command("NS9B")
def media_next_track(self):
""" Send next track command. """
self.telnet_command("NS9D")
def media_previous_track(self):
self.telnet_command("NS9E")
def turn_on(self):
""" turn the media player on. """
self.telnet_command("PWON")
|
{
"content_hash": "fcdc8a72cc8f605c7df4de12fbcc587d",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.5966542750929368,
"repo_name": "nnic/home-assistant",
"id": "2853dda90ac477fa6d2ea00fbea630753ffd2500",
"size": "4842",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/denon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1482064"
},
{
"name": "Python",
"bytes": "1790232"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
}
|
import os
from unittest import TestCase
from test_base import PackstackTestCaseMixin
from packstack.plugins import serverprep_901
from packstack.installer.setup_controller import Controller
serverprep_901.controller = Controller()
class OSPluginUtilsTestCase(PackstackTestCaseMixin, TestCase):
def test_rhn_creds_quoted(self):
"""Make sure RHN password is quoted"""
password = "dasd|'asda%><?"
serverprep_901.controller.CONF["CONFIG_KEYSTONE_HOST"] = "1.2.3.4"
serverprep_901.controller.CONF["CONFIG_USE_EPEL"] = "n"
serverprep_901.controller.CONF["CONFIG_REPO"] = ""
serverprep_901.controller.CONF["CONFIG_RH_USER"] = "testuser"
serverprep_901.controller.CONF["CONFIG_RH_PW"] = password
serverprep_901.controller.CONF["CONFIG_RH_BETA_REPO"] = "n"
serverprep_901.controller.CONF["CONFIG_SATELLITE_FLAGS"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_URL"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_USER"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_PW"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_CACERT"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_AKEY"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_PROFILE"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_PROXY"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_PROXY_USER"] = ""
serverprep_901.controller.CONF["CONFIG_SATELLITE_PROXY_PW"] = ""
serverprep_901.serverprep()
self.assertNotEqual(
self.fake_popen.data.find('--password="%s"' % password), -1
)
|
{
"content_hash": "257c94903f8318623a30764f97a62b8a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 42.97435897435897,
"alnum_prop": 0.6789976133651552,
"repo_name": "slagle/packstack",
"id": "730f039d3a26045e741e09f8d156bcfe90111e96",
"size": "2301",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_plugin_serverprep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
from django.test import override_settings
from django.utils.crypto import get_random_string
from resolwe.flow.models import Process
from resolwe.test import ProcessTestCase
class PurgeTestFieldsMixin:
"""
This class contains tests, which validate each field individually. It is used
to perform basic unit tests (on simulated data, without running any processors)
as well as more complex end-to-end tests (by running processors). Having them
in this mixin makes sure that both, unit and e2e tests, are run for each field.
"""
def test_basic_file(self):
self.assertFieldWorks(
"basic:file:",
field_value={"file": "but_not_this"},
script_setup="touch remove_this but_not_this",
script_save="re-save-file sample but_not_this",
removed=["remove_this"],
not_removed=["but_not_this"],
)
def test_basic_file_specialization(self):
self.assertFieldWorks(
"basic:file:html:",
field_value={"file": "but_not_this"},
script_setup="touch remove_this but_not_this",
script_save="re-save-file sample but_not_this",
removed=["remove_this"],
not_removed=["but_not_this"],
)
def test_basic_file_list(self):
self.assertFieldWorks(
"list:basic:file:",
field_value=[{"file": "but_not_this"}, {"file": "and_not_this"}],
script_setup="touch remove_this but_not_this and_not_this",
script_save="re-save-file-list sample but_not_this and_not_this",
removed=["remove_this"],
not_removed=["but_not_this", "and_not_this"],
)
def test_basic_dir(self):
self.assertFieldWorks(
"basic:dir:",
field_value={"dir": "but_not_this"},
script_setup="""
mkdir remove_this but_not_this
touch remove_this/a remove_this/b remove_this/c
touch but_not_this/a but_not_this/b but_not_this/c
""",
script_save="re-save-dir sample but_not_this",
removed=["remove_this/", "remove_this/a", "remove_this/b", "remove_this/c"],
not_removed=["but_not_this/a", "but_not_this/b", "but_not_this/c"],
)
def test_basic_dir_list(self):
self.assertFieldWorks(
"list:basic:dir:",
field_value=[{"dir": "but_not_this"}, {"dir": "and_not_this"}],
script_setup="""
mkdir remove_this but_not_this and_not_this
touch remove_this/a remove_this/b remove_this/c
touch but_not_this/a but_not_this/b but_not_this/c
touch and_not_this/a and_not_this/b and_not_this/c
""",
script_save="re-save-dir-list sample but_not_this and_not_this",
removed=["remove_this/", "remove_this/a", "remove_this/b", "remove_this/c"],
not_removed=[
"but_not_this/a",
"but_not_this/b",
"but_not_this/c",
"and_not_this/a",
"and_not_this/b",
"and_not_this/c",
],
)
@override_settings(TEST_PROCESS_REQUIRE_TAGS=False) # Test uses dynamic processes.
class PurgeE2ETest(PurgeTestFieldsMixin, ProcessTestCase):
def create_and_run_processor(self, processor, **kwargs):
processor_slug = get_random_string(6)
Process.objects.create(
slug=processor_slug,
name="Test Purge Process",
contributor=self.admin,
type="data:test",
version=1,
**processor,
)
data = self.run_process(processor_slug, **kwargs)
return data
def assertFilesRemoved(self, data, *files):
for name in files:
path = data.location.get_path(filename=name)
self.assertFalse(os.path.isfile(path), msg=path)
def assertFilesNotRemoved(self, data, *files):
for name in files:
path = data.location.get_path(filename=name)
self.assertTrue(os.path.isfile(path), msg=path)
def test_complex_processor(self):
data = self.create_and_run_processor(
processor=dict(
input_schema=[],
output_schema=[
{
"name": "sample_file",
"label": "Sample output file",
"type": "basic:file:",
},
{
"name": "another_sample_file",
"label": "Another sample output file",
"type": "basic:file:",
},
{
"name": "sample_dir",
"label": "Sample output directory",
"type": "basic:dir:",
},
{
"name": "sample_file_list",
"label": "Sample list of output files",
"type": "list:basic:file:",
},
{
"name": "sample_dir_list",
"label": "Sample list of output directories",
"type": "list:basic:dir:",
},
],
run={
"language": "bash",
"program": """
touch these files should be removed
touch this_file_should_stay
mkdir -p should_stay/
touch should_stay/file
mkdir -p directory/should/be
touch directory/should/be/removed
touch directory/should/be/removed2
mkdir -p stay/directory
touch stay/directory/file1
touch stay/directory/file2
touch entry1 entry2 entry3
mkdir dir1 dir2 dir3
touch dir1/a dir2/b dir3/c dir3/d
touch ref1 ref2 ref3 ref4
mkdir refs
touch refs/a refs/b
re-save-file another_sample_file should_stay/file
re-save-file sample_file this_file_should_stay ref3
re-save-dir sample_dir stay ref4
re-save-file-list sample_file_list entry1 entry2:ref1 entry3:refs
re-save-dir-list sample_dir_list dir1:ref2 dir2 dir3
""",
},
)
)
self.assertFilesRemoved(
data, "these", "files", "should", "be", "removed", "directory"
)
self.assertFilesNotRemoved(
data,
"this_file_should_stay",
"stay/directory/file1",
"stay/directory/file2",
"entry1",
"entry2",
"entry3",
"dir1/a",
"dir2/b",
"dir3/c",
"dir3/d",
"ref1",
"ref2",
"ref3",
"ref4",
"refs/a",
"refs/b",
"should_stay/file",
)
def assertFieldWorks(
self, field_type, field_value, script_setup, script_save, removed, not_removed
):
"""
Checks that a field is handled correctly when running a processor, which
uses the field.
"""
field_schema = {"name": "sample", "label": "Sample output", "type": field_type}
# Test output.
data = self.create_and_run_processor(
processor=dict(
input_schema=[],
output_schema=[field_schema],
run={"language": "bash", "program": script_setup + "\n" + script_save},
)
)
self.assertFilesRemoved(data, *removed)
self.assertFilesNotRemoved(data, *not_removed)
# Test descriptor.
# descriptor_schema = DescriptorSchema.objects.create(
# slug=get_random_string(6), contributor=self.admin, schema=[field_schema]
# )
# data = self.create_and_run_processor(
# processor=dict(
# input_schema=[],
# output_schema=[],
# run={"language": "bash", "program": script_setup},
# ),
# descriptor_schema=descriptor_schema,
# descriptor={"sample": field_value},
# )
# self.assertFilesRemoved(data, *removed)
# self.assertFilesNotRemoved(data, *not_removed)
|
{
"content_hash": "18130fb3f53a600f89f09a2263208e23",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 88,
"avg_line_length": 34.86147186147186,
"alnum_prop": 0.5340866757730038,
"repo_name": "genialis/resolwe",
"id": "cb701fb4c412d4c0ba9d039094c75f8a4165df91",
"size": "8089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe/flow/tests/test_purge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "21533"
},
{
"name": "Python",
"bytes": "1813118"
},
{
"name": "Shell",
"bytes": "6244"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
import unittest
from . import util
class UtilTest(unittest.TestCase):
def test_is_in_dir(self):
self.assertTrue(util.is_in_dir("foo/bar.py", "foo"))
self.assertTrue(util.is_in_dir("foo/bar.py", "foo/"))
self.assertTrue(util.is_in_dir("/foo/bar.py", "/"))
self.assertFalse(util.is_in_dir("foo.py", "foo"))
self.assertFalse(util.is_in_dir("foo/bar.py", "foo/bar"))
self.assertFalse(util.is_in_dir("foo/bars", "foo/bar"))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "1e4f7a86efd1f1587a1e2dc96a7d9e49",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 31.842105263157894,
"alnum_prop": 0.6264462809917355,
"repo_name": "shs96c/buck",
"id": "c15a58c342eff9992563826b6d8b6e4ea40ae9c8",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-dsl/buck_parser/util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1585"
},
{
"name": "Batchfile",
"bytes": "2563"
},
{
"name": "C",
"bytes": "274563"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "18013"
},
{
"name": "CSS",
"bytes": "54894"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Dockerfile",
"bytes": "1938"
},
{
"name": "Go",
"bytes": "9630"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "7188"
},
{
"name": "Haskell",
"bytes": "1008"
},
{
"name": "IDL",
"bytes": "480"
},
{
"name": "Java",
"bytes": "28323049"
},
{
"name": "JavaScript",
"bytes": "934510"
},
{
"name": "Kotlin",
"bytes": "21626"
},
{
"name": "Lex",
"bytes": "3241"
},
{
"name": "Makefile",
"bytes": "1916"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4935"
},
{
"name": "Objective-C",
"bytes": "172139"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "486"
},
{
"name": "Prolog",
"bytes": "1486"
},
{
"name": "Python",
"bytes": "2027822"
},
{
"name": "Roff",
"bytes": "1207"
},
{
"name": "Rust",
"bytes": "5199"
},
{
"name": "Scala",
"bytes": "5082"
},
{
"name": "Shell",
"bytes": "67040"
},
{
"name": "Smalltalk",
"bytes": "3922"
},
{
"name": "Swift",
"bytes": "11377"
},
{
"name": "Thrift",
"bytes": "80526"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from david.ext.babel import lazy_gettext as _
from david.core.db import db, orm, func, CatLimitedQuery, UidMixin, SerializeMixin
from david.core.accounts import User
from david.core.attachment import PictureMixin
from david.lib.utils import truncate, striptags
from david.ext.views.static import lazy_static_url
from config import SITE_ROOT
from .tag import tags_table, Tag
K_ARTICLE = 200
class Article(db.Model, UidMixin, PictureMixin, SerializeMixin):
kind = K_ARTICLE
kind_name = 'article'
id = db.Column(db.Integer, primary_key=True)
cat = db.Column(db.SmallInteger, index=True, nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
title = db.Column(db.String(255), nullable=False)
summary = db.Column(db.Text())
content = db.Column(db.Text())
create_at = db.Column(db.DateTime, default=func.now())
update_at = db.Column(db.DateTime, default=func.now(), onupdate=func.utc_timestamp())
published = db.Column(db.Boolean, default=True)
sticking = db.Column(db.Boolean, default=False)
tags = db.relationship('Tag', secondary=tags_table,
backref=db.backref('articles', lazy='dynamic'))
_DEFAULT_PIC = lazy_static_url('img/article-default-%s.png')
@property
def cat_id(self):
return self.cat
@property
def cat_name(self):
return CAT_NAMES[str(self.cat)]
catname = property(lambda x: _(x.cat_name))
query_class = CatLimitedQuery
def abstract(self, limit=140, killwords=True):
return truncate((self.summary or '').strip() or
striptags(self.content or '').strip(), limit, killwords)
def url(self):
return '%s%s/%s' % (SITE_ROOT, self.cat_name, self.slug)
def listpage_url(self):
page = 1
return '%s%s/p%s' % (SITE_ROOT, self.cat_name, page)
@orm.reconstructor
def init_on_load(self, *kwargs):
""" get an article, but return with subclassed """
pass
def extended_self(self):
return CATS[str(self.cat_id)].get(self.id)
# article cats, will be extended later
CATS = {
}
CAT_NAMES = {
}
def add_cat(cat_id, cls):
if cat_id in CATS:
raise Exception('cat id %s already in use' % cat_id)
CATS[str(cat_id)] = cls
CAT_NAMES[str(cat_id)] = cls.cat_name
|
{
"content_hash": "3998c916f7590c9006fcf9322f8b2135",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 89,
"avg_line_length": 30.23076923076923,
"alnum_prop": 0.6607294317217981,
"repo_name": "ktmud/david",
"id": "bf6fe10a64ff4d8ea3e37964d6cf08bd14a7a77d",
"size": "2374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "david/core/article/article.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83881"
},
{
"name": "JavaScript",
"bytes": "281633"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "82385"
}
],
"symlink_target": ""
}
|
from BTL.ConvertedMetainfo import ConvertedMetainfo
from BTL.bencode import bencode, bdecode
import os
def file_from_path(path):
assert os.path.splitext(path)[1].lower() == '.torrent'
return open(path, 'rb').read()
def metainfo_from_file(f):
metainfo = ConvertedMetainfo(bdecode(f))
return metainfo
def metainfo_from_path(path):
return metainfo_from_file(file_from_path(path))
def infohash_from_path(path):
return str(metainfo_from_path(path).infohash)
def parse_infohash(ihash):
"""takes a hex-encoded infohash and returns an infohash or None
if the infohash is invalid."""
try:
x = ihash.decode('hex')
except ValueError:
return None
except TypeError:
return None
return x
def is_valid_infohash(x):
"""Determine whether this is a valid hex-encoded infohash."""
if not x or not len(x) == 40:
return False
return (parse_infohash(x) != None)
def parse_uuid(uuid):
"""takes a hex-encoded uuid and returns an uuid or None
if the uuid is invalid."""
try:
# Remove the '-'s at specific points
uuidhash = uuid[:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:]
if len(uuidhash) != 32:
return None
x = uuidhash.decode('hex')
return uuid
except:
return None
def is_valid_uuid(x):
"""Determine whether this is a valid hex-encoded uuid."""
if not x or len(x) != 36:
return False
return (parse_uuid(x) != None)
def infohash_from_infohash_or_path(x):
"""Expects a valid path to a .torrent file or a hex-encoded infohash.
Returns a binary infohash."""
if not len(x) == 40:
return infohash_from_path(x)
n = parse_infohash(x)
if n:
return n
## path happens to be 40 chars, or bad infohash
return infohash_from_path(x)
if __name__ == "__main__":
# Test is_valid_infohash()
assert is_valid_infohash("") == False
assert is_valid_infohash("12345") == False
assert is_valid_infohash("12345678901234567890123456789012345678901") == False
assert is_valid_infohash("abcdefghijklmnopqrstuvwxyzabcdefghijklmn") == False
assert is_valid_infohash("1234567890123456789012345678901234567890") == True
assert is_valid_infohash("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef") == True
|
{
"content_hash": "b285df4daa9fc9a817975a4f432841d4",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 81,
"avg_line_length": 29.531645569620252,
"alnum_prop": 0.6545220745820831,
"repo_name": "sauloal/linuxscripts",
"id": "4fd8d379005fe682e498f72fe74c68835bbd65b0",
"size": "2333",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "apache/var/www/html/saulo/torrent/html/bin/clients/mainline/BTL/fileutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616304"
},
{
"name": "C++",
"bytes": "24286"
},
{
"name": "JavaScript",
"bytes": "238160"
},
{
"name": "PHP",
"bytes": "9491076"
},
{
"name": "Perl",
"bytes": "877930"
},
{
"name": "Python",
"bytes": "3651261"
},
{
"name": "Racket",
"bytes": "4568"
},
{
"name": "Shell",
"bytes": "157362"
},
{
"name": "XSLT",
"bytes": "28086"
}
],
"symlink_target": ""
}
|
import datetime
from nose.tools import eq_, ok_
import mock
from django.conf import settings
from django.contrib.auth.models import Group
from django.utils import timezone
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
EventTweet,
Location,
Approval
)
from .base import ManageTestCase
from airmozilla.base.tests.test_utils import Response
class TestEventTweets(ManageTestCase):
event_base_data = {
'status': Event.STATUS_SCHEDULED,
'description': '...',
'privacy': 'public',
'location': '1',
'channels': '1',
'tags': 'xxx',
'template': '1',
'start_time': '2012-3-4 12:00',
'estimated_duration': '3600',
'timezone': 'US/Pacific'
}
placeholder = 'airmozilla/manage/tests/firefox.png'
@mock.patch('requests.get')
def test_prepare_new_tweet(self, rget):
def mocked_read(url, params):
assert url == settings.BITLY_URL
return Response({
u'status_code': 200,
u'data': {
u'url': u'http://mzl.la/1adh2wT',
u'hash': u'1adh2wT',
u'global_hash': u'1adh2wU',
u'long_url': u'https://air.mozilla.org/it-buildout/',
u'new_hash': 0
},
u'status_txt': u'OK'
})
rget.side_effect = mocked_read
event = Event.objects.get(title='Test event')
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
# on the edit page, there should be a link
response = self.client.get(
reverse('manage:event_edit', args=(event.pk,))
)
assert response.status_code == 200
url = reverse('manage:new_event_tweet', args=(event.pk,))
ok_(url in response.content)
response = self.client.get(url)
eq_(response.status_code, 200)
textarea = (
response.content
.split('<textarea')[1]
.split('>')[1]
.split('</textarea')[0]
)
ok_(textarea.strip().startswith('Check out This!'))
event = Event.objects.get(pk=event.pk)
event_url = 'http://testserver'
event_url += reverse('main:event', args=(event.slug,))
ok_('http://mzl.la/1adh2wT' in textarea)
ok_(event_url not in textarea)
# Sometimes, due to...
# https://bugzilla.mozilla.org/show_bug.cgi?id=1167211
# the session is cleared out here in this test, so we
# really make sure we're signed in
assert self.client.login(username='fake', password='fake')
assert self.client.session.items()
# load the form
response = self.client.get(url)
eq_(response.status_code, 200)
# try to submit it with longer than 140 characters
response = self.client.post(url, {
'text': 'x' * 141,
'include_placeholder': True,
})
eq_(response.status_code, 200)
assert not EventTweet.objects.all().count()
ok_('it has 141' in response.content)
# try again
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
})
eq_(response.status_code, 302)
ok_(EventTweet.objects.all().count())
now = timezone.now()
event_tweet, = EventTweet.objects.all()
_fmt = '%Y%m%d%H%M'
eq_(
event_tweet.send_date.strftime(_fmt),
now.strftime(_fmt)
)
ok_(not event_tweet.sent_date)
ok_(not event_tweet.error)
ok_(not event_tweet.tweet_id)
def test_event_tweets_empty(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
def test_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.get(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_('Needs to be approved first' in response.content)
from airmozilla.main.helpers import js_date
ok_(
js_date(tweet.send_date.replace(microsecond=0))
not in response.content
)
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = (
timezone.now()
- datetime.timedelta(days=1)
)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
not in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
ok_('Failed to send' in response.content)
def test_all_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.get(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:all_event_tweets')
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_('Needs to be approved first' in response.content)
from airmozilla.main.helpers import js_date
ok_(
js_date(tweet.send_date.replace(microsecond=0))
not in response.content
)
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = (
timezone.now()
- datetime.timedelta(days=1)
)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
not in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
ok_('Failed to send' in response.content)
@mock.patch('airmozilla.manage.views.events.send_tweet')
def test_force_send_now(self, mocked_send_tweet):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
def mock_send_tweet(event_tweet):
event_tweet.tweet_id = '1234567890'
event_tweet.save()
mocked_send_tweet.side_effect = mock_send_tweet
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'send': tweet.pk,
})
eq_(response.status_code, 302)
tweet = EventTweet.objects.get(pk=tweet.pk)
eq_(tweet.tweet_id, '1234567890')
def test_view_tweet_error(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
error='Crap!'
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'error': tweet.pk,
})
eq_(response.status_code, 200)
eq_(response['content-type'], 'text/plain')
ok_('Crap!' in response.content)
def test_cancel_event_tweet(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'cancel': tweet.pk,
})
eq_(response.status_code, 302)
ok_(not EventTweet.objects.all().count())
def test_create_event_tweet_with_location_timezone(self):
location = Location.objects.create(
name='Paris',
timezone='Europe/Paris'
)
event = Event.objects.get(title='Test event')
event.location = location
event.save()
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
url = reverse('manage:new_event_tweet', args=(event.pk,))
now = datetime.datetime.utcnow()
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
'send_date': now.strftime('%Y-%m-%d 12:00'),
})
eq_(response.status_code, 302)
event_tweet, = EventTweet.objects.all()
# we specified it as noon in Paris, but the save time
# will be UTC
ok_(event_tweet.send_date.hour != 12)
assert event_tweet.send_date.strftime('%Z') == 'UTC'
|
{
"content_hash": "1ef750abed19188d23fdf5821e8109a3",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 73,
"avg_line_length": 33.27989130434783,
"alnum_prop": 0.5510737323426145,
"repo_name": "Nolski/airmozilla",
"id": "7d0a95159df5cbc528ec300be67d1538b5df8edd",
"size": "12247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airmozilla/manage/tests/views/test_eventtweets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4527"
},
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "1714414"
},
{
"name": "HTML",
"bytes": "2399577"
},
{
"name": "JavaScript",
"bytes": "3194328"
},
{
"name": "Makefile",
"bytes": "13548"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "3296951"
},
{
"name": "Ruby",
"bytes": "4978"
},
{
"name": "Shell",
"bytes": "3573"
},
{
"name": "Smarty",
"bytes": "1943"
}
],
"symlink_target": ""
}
|
from swifpy import Dictionary, Int, Optional, Some, String
import unittest
class TestDictionary(unittest.TestCase):
def test_sample(self):
dictionary: Dictionary[String, Int] = Dictionary({'a': 2, 'b': 3, 'c': 5})
a: Optional[Int] = dictionary['a'] # Optional(2)
dictionary['d'] = 7
count: Int = dictionary.count # 4
for key, value in dictionary:
print("%s -> %d" % (key, value))
self.assertEqual(a, Some(2))
self.assertEqual(count, 4)
|
{
"content_hash": "d2381bef7964abd8262bb583b4bfa415",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 82,
"avg_line_length": 30.294117647058822,
"alnum_prop": 0.5941747572815534,
"repo_name": "koher/swifpy",
"id": "1f93fd4dea874e6a0138917c035553a59094de18",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dictionary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14918"
}
],
"symlink_target": ""
}
|
from monolithe.generators.lib import TemplateFileWriter
from monolithe.lib import SDKUtils
class PackageWriter(TemplateFileWriter):
"""
"""
def __init__(self, monolithe_config):
"""
"""
super(PackageWriter, self).__init__(package="monolithe.generators.lang.html")
self.monolithe_config = monolithe_config
self._output = self.monolithe_config.get_option("output", "transformer")
self._product_name = self.monolithe_config.get_option("product_name")
self.output_directory = "%s/html/" % (self._output)
def perform(self, apiversions):
"""
"""
self._write_main_index(apiversions=apiversions)
def _write_main_index(self, apiversions):
"""
"""
versions = {}
for v in apiversions:
versions[v] = SDKUtils.get_string_version(v)
self.write(destination=self.output_directory, filename="index.html", template_name="main_index.html.tpl",
apiversion=versions,
product_name=self._product_name)
|
{
"content_hash": "7fd9519dbf8287a314c622d30e7978d1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 113,
"avg_line_length": 29.97222222222222,
"alnum_prop": 0.6144578313253012,
"repo_name": "nuagenetworks/monolithe",
"id": "9ebb5dc8c4f5b4d82d1d8dcdf820046324b37bef",
"size": "2677",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monolithe/generators/lang/html/writers/packagewriter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "415189"
},
{
"name": "Smarty",
"bytes": "184108"
}
],
"symlink_target": ""
}
|
from datetime import (
datetime,
timedelta,
)
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_datetime64_any_dtype
from pandas import (
DatetimeIndex,
DatetimeTZDtype,
Index,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
isna,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
@pytest.mark.parametrize(
"nat,idx",
[
(Timestamp("NaT"), DatetimeArray),
(Timedelta("NaT"), TimedeltaArray),
(Period("NaT", freq="M"), PeriodArray),
],
)
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
for field in DatetimeArray._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
ser = Series(idx)
for field in DatetimeArray._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeArray._bool_ops:
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"])
def test_identity(klass, value):
assert klass(value) is NaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value, request):
if klass is Period and value == "":
request.node.add_marker(
pytest.mark.xfail(reason="Period cannot parse empty string")
)
assert klass(value).value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(klass, method, freq):
# see gh-14940
ts = klass("nat")
round_method = getattr(ts, method)
assert round_method(freq) is ts
@pytest.mark.parametrize(
"method",
[
"astimezone",
"combine",
"ctime",
"dst",
"fromordinal",
"fromtimestamp",
"fromisocalendar",
"isocalendar",
"strftime",
"strptime",
"time",
"timestamp",
"timetuple",
"timetz",
"toordinal",
"tzname",
"utcfromtimestamp",
"utcnow",
"utcoffset",
"utctimetuple",
"timestamp",
],
)
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
msg = f"NaTType does not support {method}"
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
@pytest.mark.parametrize("method", ["weekday", "isoweekday"])
def test_nat_methods_nan(method):
# see gh-9513, gh-17329
assert np.isnan(getattr(NaT, method)())
@pytest.mark.parametrize(
"method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"]
)
def test_nat_methods_nat(method):
# see gh-8254, gh-9513, gh-17329
assert getattr(NaT, method)() is NaT
@pytest.mark.parametrize(
"get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)]
)
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT"
@pytest.mark.parametrize(
"klass,expected",
[
(Timestamp, ["normalize", "to_julian_date", "to_period", "unit"]),
(
Timedelta,
[
"components",
"resolution_string",
"to_pytimedelta",
"to_timedelta64",
"unit",
"view",
],
),
],
)
def test_missing_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# Here, we check which public methods NaT does not have. We
# ignore any missing private methods.
nat_names = dir(NaT)
klass_names = dir(klass)
missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")]
missing.sort()
assert missing == expected
def _get_overlap_public_nat_methods(klass, as_tuple=False):
"""
Get overlapping public methods between NaT and another class.
Parameters
----------
klass : type
The class to compare with NaT
as_tuple : bool, default False
Whether to return a list of tuples of the form (klass, method).
Returns
-------
overlap : list
"""
nat_names = dir(NaT)
klass_names = dir(klass)
overlap = [
x
for x in nat_names
if x in klass_names and not x.startswith("_") and callable(getattr(klass, x))
]
# Timestamp takes precedence over Timedelta in terms of overlap.
if klass is Timedelta:
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
if as_tuple:
overlap = [(klass, method) for method in overlap]
overlap.sort()
return overlap
@pytest.mark.parametrize(
"klass,expected",
[
(
Timestamp,
[
"as_unit",
"astimezone",
"ceil",
"combine",
"ctime",
"date",
"day_name",
"dst",
"floor",
"fromisocalendar",
"fromisoformat",
"fromordinal",
"fromtimestamp",
"isocalendar",
"isoformat",
"isoweekday",
"month_name",
"now",
"replace",
"round",
"strftime",
"strptime",
"time",
"timestamp",
"timetuple",
"timetz",
"to_datetime64",
"to_numpy",
"to_pydatetime",
"today",
"toordinal",
"tz_convert",
"tz_localize",
"tzname",
"utcfromtimestamp",
"utcnow",
"utcoffset",
"utctimetuple",
"weekday",
],
),
(Timedelta, ["total_seconds"]),
],
)
def test_overlap_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# In case when Timestamp, Timedelta, and NaT are overlap, the overlap
# is considered to be with Timestamp and NaT, not Timedelta.
assert _get_overlap_public_nat_methods(klass) == expected
@pytest.mark.parametrize(
"compare",
(
_get_overlap_public_nat_methods(Timestamp, True)
+ _get_overlap_public_nat_methods(Timedelta, True)
),
ids=lambda x: f"{x[0].__name__}.{x[1]}",
)
def test_nat_doc_strings(compare):
# see gh-17327
#
# The docstrings for overlapping methods should match.
klass, method = compare
klass_doc = getattr(klass, method).__doc__
# Ignore differences with Timestamp.isoformat() as they're intentional
if klass == Timestamp and method == "isoformat":
return
if method == "to_numpy":
# GH#44460 can return either dt64 or td64 depending on dtype,
# different docstring is intentional
return
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
_ops = {
"left_plus_right": lambda a, b: a + b,
"right_plus_left": lambda a, b: b + a,
"left_minus_right": lambda a, b: a - b,
"right_minus_left": lambda a, b: b - a,
"left_times_right": lambda a, b: a * b,
"right_times_left": lambda a, b: b * a,
"left_div_right": lambda a, b: a / b,
"right_div_left": lambda a, b: b / a,
}
@pytest.mark.parametrize("op_name", list(_ops.keys()))
@pytest.mark.parametrize(
"value,val_type",
[
(2, "scalar"),
(1.5, "floating"),
(np.nan, "floating"),
("foo", "str"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
(Timestamp("2014-01-01"), "timestamp"),
(Timestamp("2014-01-01", tz="UTC"), "timestamp"),
(Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
(pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
],
)
def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
"floating": {
"right_div_left",
"left_minus_right",
"right_minus_left",
"left_plus_right",
"right_plus_left",
},
"str": set(_ops.keys()),
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {
"left_times_right",
"right_times_left",
"left_div_right",
"right_div_left",
},
}
op = _ops[op_name]
if op_name in invalid_ops.get(val_type, set()):
if (
val_type == "timedelta"
and "times" in op_name
and isinstance(value, Timedelta)
):
typs = "(Timedelta|NaTType)"
msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'"
elif val_type == "str":
# un-specific check here because the message comes from str
# and varies by method
msg = "|".join(
[
"can only concatenate str",
"unsupported operand type",
"can't multiply sequence",
"Can't convert 'NaTType'",
"must be str, not NaTType",
]
)
else:
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
op(NaT, value)
else:
if val_type == "timedelta" and "div" in op_name:
expected = np.nan
else:
expected = NaT
assert op(NaT, value) is expected
@pytest.mark.parametrize(
"val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)]
)
def test_nat_rfloordiv_timedelta(val, expected):
# see gh-#18846
#
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // val is expected
@pytest.mark.parametrize(
"op_name",
["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize(
"value",
[
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]),
DatetimeArray._from_sequence(
["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific")
),
TimedeltaIndex(["1 day", "2 day"], name="x"),
],
)
def test_nat_arithmetic_index(op_name, value):
# see gh-11718
exp_name = "x"
exp_data = [NaT] * 2
if is_datetime64_any_dtype(value.dtype) and "plus" in op_name:
expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
if not isinstance(value, Index):
expected = expected.array
op = _ops[op_name]
result = op(NaT, value)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"op_name",
["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
@pytest.mark.parametrize(
"dtype,op,out_dtype",
[
("datetime64[ns]", operator.add, "datetime64[ns]"),
("datetime64[ns]", roperator.radd, "datetime64[ns]"),
("datetime64[ns]", operator.sub, "timedelta64[ns]"),
("datetime64[ns]", roperator.rsub, "timedelta64[ns]"),
("timedelta64[ns]", operator.add, "datetime64[ns]"),
("timedelta64[ns]", roperator.radd, "datetime64[ns]"),
("timedelta64[ns]", operator.sub, "datetime64[ns]"),
("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"),
],
)
def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
other = np.arange(10).astype(dtype)
result = op(NaT, other)
expected = np.empty(other.shape, dtype=out_dtype)
expected.fill("NaT")
tm.assert_numpy_array_equal(result, expected)
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
def test_to_numpy_alias():
# GH 24653: alias .to_numpy() for scalars
expected = NaT.to_datetime64()
result = NaT.to_numpy()
assert isna(expected) and isna(result)
# GH#44460
result = NaT.to_numpy("M8[s]")
assert isinstance(result, np.datetime64)
assert result.dtype == "M8[s]"
result = NaT.to_numpy("m8[ns]")
assert isinstance(result, np.timedelta64)
assert result.dtype == "m8[ns]"
result = NaT.to_numpy("m8[s]")
assert isinstance(result, np.timedelta64)
assert result.dtype == "m8[s]"
with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "):
NaT.to_numpy(np.int64)
@pytest.mark.parametrize(
"other",
[
Timedelta(0),
Timedelta(0).to_pytimedelta(),
pytest.param(
Timedelta(0).to_timedelta64(),
marks=pytest.mark.xfail(
reason="td64 doesn't return NotImplemented, see numpy#17017"
),
),
Timestamp(0),
Timestamp(0).to_pydatetime(),
pytest.param(
Timestamp(0).to_datetime64(),
marks=pytest.mark.xfail(
reason="dt64 doesn't return NotImplemented, see numpy#17017"
),
),
Timestamp(0).tz_localize("UTC"),
NaT,
],
)
def test_nat_comparisons(compare_operators_no_eq_ne, other):
# GH 26039
opname = compare_operators_no_eq_ne
assert getattr(NaT, opname)(other) is False
op = getattr(operator, opname.strip("_"))
assert op(NaT, other) is False
assert op(other, NaT) is False
@pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")])
def test_nat_comparisons_numpy(other):
# Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons
# pass, this test can be removed
assert not NaT == other
assert NaT != other
assert not NaT < other
assert not NaT > other
assert not NaT <= other
assert not NaT >= other
@pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")])
@pytest.mark.parametrize(
"symbol_and_op",
[("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)],
)
def test_nat_comparisons_invalid(other_and_type, symbol_and_op):
# GH#35585
other, other_type = other_and_type
symbol, op = symbol_and_op
assert not NaT == other
assert not other == NaT
assert NaT != other
assert other != NaT
msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'"
with pytest.raises(TypeError, match=msg):
op(NaT, other)
msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'"
with pytest.raises(TypeError, match=msg):
op(other, NaT)
@pytest.mark.parametrize(
"other",
[
np.array(["foo"] * 2, dtype=object),
np.array([2, 3], dtype="int64"),
np.array([2.0, 3.5], dtype="float64"),
],
ids=["str", "int", "float"],
)
def test_nat_comparisons_invalid_ndarray(other):
# GH#40722
expected = np.array([False, False])
result = NaT == other
tm.assert_numpy_array_equal(result, expected)
result = other == NaT
tm.assert_numpy_array_equal(result, expected)
expected = np.array([True, True])
result = NaT != other
tm.assert_numpy_array_equal(result, expected)
result = other != NaT
tm.assert_numpy_array_equal(result, expected)
for symbol, op in [
("<=", operator.le),
("<", operator.lt),
(">=", operator.ge),
(">", operator.gt),
]:
msg = f"'{symbol}' not supported between"
with pytest.raises(TypeError, match=msg):
op(NaT, other)
if other.dtype == np.dtype("object"):
# uses the reverse operator, so symbol changes
msg = None
with pytest.raises(TypeError, match=msg):
op(other, NaT)
def test_compare_date(fixed_now_ts):
# GH#39151 comparing NaT with date object is deprecated
# See also: tests.scalar.timestamps.test_comparisons::test_compare_date
dt = fixed_now_ts.to_pydatetime().date()
msg = "Cannot compare NaT with datetime.date object"
for left, right in [(NaT, dt), (dt, NaT)]:
assert not left == right
assert left != right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
@pytest.mark.parametrize(
"obj",
[
offsets.YearEnd(2),
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.MonthEnd(2),
offsets.MonthEnd(12),
offsets.Day(2),
offsets.Day(5),
offsets.Hour(24),
offsets.Hour(3),
offsets.Minute(),
np.timedelta64(3, "h"),
np.timedelta64(4, "h"),
np.timedelta64(3200, "s"),
np.timedelta64(3600, "s"),
np.timedelta64(3600 * 24, "s"),
np.timedelta64(2, "D"),
np.timedelta64(365, "D"),
timedelta(-2),
timedelta(365),
timedelta(minutes=120),
timedelta(days=4, minutes=180),
timedelta(hours=23),
timedelta(hours=23, minutes=30),
timedelta(hours=48),
],
)
def test_nat_addsub_tdlike_scalar(obj):
assert NaT + obj is NaT
assert obj + NaT is NaT
assert NaT - obj is NaT
def test_pickle():
# GH#4606
p = tm.round_trip_pickle(NaT)
assert p is NaT
|
{
"content_hash": "a1b85f6b6498edd32d13729b28e1785a",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 88,
"avg_line_length": 28.136363636363637,
"alnum_prop": 0.567296042003231,
"repo_name": "pandas-dev/pandas",
"id": "e310506935729788d5edea7804d58a7087120063",
"size": "19808",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/scalar/test_nat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
'''
The MIT License (MIT)
Portions Copyright (c) 2015-2019, The OmniDB Team
Portions Copyright (c) 2017-2019, 2ndQuadrant Limited
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os.path
import re
from collections import OrderedDict
from enum import Enum
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
'''
------------------------------------------------------------------------
Template
------------------------------------------------------------------------
'''
class TemplateType(Enum):
EXECUTE = 1
SCRIPT = 2
class Template:
def __init__(self, p_text, p_type=TemplateType.EXECUTE):
self.v_text = p_text
self.v_type = p_type
'''
------------------------------------------------------------------------
SQLite
------------------------------------------------------------------------
'''
class SQLite:
def __init__(self, p_service, p_conn_id=0, p_alias='', p_foreignkeys=True):
self.v_alias = p_alias
self.v_db_type = 'sqlite'
self.v_conn_string = ''
self.v_conn_id = p_conn_id
self.v_server = ''
self.v_port = ''
self.v_service = p_service
self.v_active_service = p_service
self.v_user = ''
self.v_active_user = ''
self.v_schema = ''
self.v_connection = Spartacus.Database.SQLite(p_service, p_foreignkeys)
self.v_has_schema = False
self.v_has_functions = False
self.v_has_procedures = False
self.v_has_sequences = False
self.v_has_primary_keys = True
self.v_has_foreign_keys = True
self.v_has_uniques = True
self.v_has_indexes = True
self.v_has_checks = False
self.v_has_excludes = False
self.v_has_rules = False
self.v_has_triggers = True
self.v_has_partitions = True
self.v_has_statistics = False
self.v_has_update_rule = True
self.v_can_rename_table = True
self.v_rename_table_command = "alter table #p_table_name# rename to #p_new_table_name#"
self.v_create_pk_command = "constraint #p_constraint_name# primary key (#p_columns#)"
self.v_create_fk_command = "constraint #p_constraint_name# foreign key (#p_columns#) references #p_r_table_name# (#p_r_columns#) #p_delete_update_rules#"
self.v_create_unique_command = "constraint #p_constraint_name# unique (#p_columns#)"
self.v_can_alter_type = False
self.v_can_alter_nullable = False
self.v_can_rename_column = False
self.v_can_add_column = True
self.v_add_column_command = "alter table #p_table_name# add column #p_column_name# #p_data_type# #p_nullable#"
self.v_can_drop_column = False
self.v_can_add_constraint = False
self.v_can_drop_constraint = False
self.v_create_index_command = "create index #p_index_name# on #p_table_name# (#p_columns#)";
self.v_create_unique_index_command = "create unique index #p_index_name# on #p_table_name# (#p_columns#)"
self.v_drop_index_command = "drop index #p_index_name#"
self.v_update_rules = [
"NO ACTION",
"RESTRICT",
"SET NULL",
"SET DEFAULT",
"CASCADE"
]
self.v_delete_rules = [
"NO ACTION",
"RESTRICT",
"SET NULL",
"SET DEFAULT",
"CASCADE"
]
self.v_reserved_words = []
self.v_console_help = "Console tab."
self.v_use_server_cursor = False
self.v_version = ''
self.v_version_num = ''
# Decorator to acquire lock before performing action
def lock_required(function):
def wrap(self, *args, **kwargs):
try:
if self.v_lock != None:
self.v_lock.acquire()
except:
None
try:
r = function(self, *args, **kwargs)
except:
try:
if self.v_lock != None:
self.v_lock.release()
except:
None
raise
try:
if self.v_lock != None:
self.v_lock.release()
except:
None
return r
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
@lock_required
def GetVersion(self):
self.v_version = self.v_connection.ExecuteScalar('SELECT sqlite_version()')
v_splitted_version = self.v_version.split('.')
self.v_version_num = '{0}{1}{2}'.format(
v_splitted_version[0].zfill(2),
v_splitted_version[1].zfill(2),
v_splitted_version[2].zfill(2)
)
return 'SQLite ' + self.v_version
def GetName(self):
return self.v_service
def PrintDatabaseInfo(self):
if '/' in self.v_service:
v_strings = self.v_service.split('/')
return v_strings[len(v_strings)-1]
else:
return self.v_service
def PrintDatabaseDetails(self):
return 'Local File'
def HandleUpdateDeleteRules(self, p_update_rule, p_delete_rule):
v_rules = ''
if p_update_rule.strip() != "":
v_rules += " on update " + p_update_rule + " "
if p_delete_rule.strip() != "":
v_rules += " on delete " + p_delete_rule + " "
return v_rules
def TestConnection(self):
v_return = ''
try:
if os.path.isfile(self.v_service):
v_return = 'Connection successful.'
else:
v_return = 'File does not exist, if you try to manage this connection a database file will be created.'
except Exception as exc:
v_return = str(exc)
return v_return
@lock_required
def QueryTables(self):
return self.v_connection.Query('''
select name as table_name
from sqlite_master
where type = 'table'
''', True)
@lock_required
def QueryTablesFields(self, p_table=None):
v_table_columns_all = Spartacus.Database.DataTable()
v_table_columns_all.Columns = [
'column_name',
'data_type',
'nullable',
'data_length',
'data_precision',
'data_scale',
'table_name'
]
if p_table:
v_tables = Spartacus.Database.DataTable()
v_tables.Columns.append('table_name')
v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))
else:
v_tables = self.QueryTables()
for v_table in v_tables.Rows:
v_table_columns_tmp = self.v_connection.Query("pragma table_info('{0}')".format(v_table['table_name']), True)
v_table_columns = Spartacus.Database.DataTable()
v_table_columns.Columns = [
'column_name',
'data_type',
'nullable',
'data_length',
'data_precision',
'data_scale',
'table_name'
]
for r in v_table_columns_tmp.Rows:
v_row = []
v_row.append(r['name'])
if '(' in r['type']:
v_index = r['type'].find('(')
v_data_type = r['type'].lower()[0 : v_index]
if ',' in r['type']:
v_sizes = r['type'][v_index + 1 : r['type'].find(')')].split(',')
v_data_length = ''
v_data_precision = v_sizes[0]
v_data_scale = v_sizes[1]
else:
v_data_length = r['type'][v_index + 1 : r['type'].find(')')]
v_data_precision = ''
v_data_scale = ''
else:
v_data_type = r['type'].lower()
v_data_length = ''
v_data_precision = ''
v_data_scale = ''
v_row.append(v_data_type)
if r['notnull'] == '1':
v_row.append('NO')
else:
v_row.append('YES')
v_row.append(v_data_length)
v_row.append(v_data_precision)
v_row.append(v_data_scale)
v_row.append(v_table['table_name'])
v_table_columns.Rows.append(OrderedDict(zip(v_table_columns.Columns, v_row)))
v_table_columns_all.Merge(v_table_columns)
return v_table_columns_all
@lock_required
def QueryTablesForeignKeys(self, p_table=None):
v_fks_all = Spartacus.Database.DataTable()
v_fks_all.Columns = [
'r_table_name',
'table_name',
'r_column_name',
'column_name',
'constraint_name',
'update_rule',
'delete_rule',
'table_schema',
'r_table_schema'
]
if p_table:
v_tables = Spartacus.Database.DataTable()
v_tables.Columns.append('table_name')
v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))
else:
v_tables = self.QueryTables()
for v_table in v_tables.Rows:
v_fks_tmp = self.v_connection.Query("pragma foreign_key_list('{0}')".format(v_table['table_name']), True)
v_fks = Spartacus.Database.DataTable()
v_fks.Columns = [
'r_table_name',
'table_name',
'r_column_name',
'column_name',
'constraint_name',
'update_rule',
'delete_rule',
'table_schema',
'r_table_schema'
]
for r in v_fks_tmp.Rows:
v_row = []
v_row.append(r['table'])
v_row.append(v_table['table_name'])
v_row.append(r['to'])
v_row.append(r['from'])
v_row.append(v_table['table_name'] + '_fk_' + str(r['id']))
v_row.append(r['on_update'])
v_row.append(r['on_delete'])
v_row.append('')
v_row.append('')
v_fks.Rows.append(OrderedDict(zip(v_fks.Columns, v_row)))
v_fks_all.Merge(v_fks)
return v_fks_all
@lock_required
def QueryTablesForeignKeysColumns(self, p_fkey, p_table=None):
v_fk = Spartacus.Database.DataTable()
v_fk.Columns = [
'r_table_name',
'table_name',
'r_column_name',
'column_name',
'constraint_name',
'update_rule',
'delete_rule',
'table_schema',
'r_table_schema'
]
v_fks_tmp = self.v_connection.Query("pragma foreign_key_list('{0}')".format(p_table), True)
for v_row_tmp in v_fks_tmp.Rows:
if (p_table + '_fk_' + str(v_row_tmp['id'])) == p_fkey:
v_row = []
v_row.append(v_row_tmp['table'])
v_row.append(p_table)
v_row.append(v_row_tmp['to'])
v_row.append(v_row_tmp['from'])
v_row.append(p_table + '_fk_' + str(v_row_tmp['id']))
v_row.append(v_row_tmp['on_update'])
v_row.append(v_row_tmp['on_delete'])
v_row.append('')
v_row.append('')
v_fk.Rows.append(OrderedDict(zip(v_fk.Columns, v_row)))
return v_fk
@lock_required
def QueryTablesPrimaryKeys(self, p_table=None):
v_pks_all = Spartacus.Database.DataTable()
v_pks_all.Columns = [
'constraint_name',
'column_name',
'table_name'
]
if p_table:
v_tables = Spartacus.Database.DataTable()
v_tables.Columns.append('table_name')
v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))
else:
v_tables = self.QueryTables()
for v_table in v_tables.Rows:
v_pks_tmp = self.v_connection.Query("pragma table_info('{0}')".format(v_table['table_name']), True)
v_pks = Spartacus.Database.DataTable()
v_pks.Columns = [
'constraint_name',
'column_name',
'table_name'
]
for r in v_pks_tmp.Rows:
if r['pk'] != '0':
v_row = []
v_row.append('pk_' + v_table['table_name'])
v_row.append(r['name'])
v_row.append(v_table['table_name'])
v_pks.Rows.append(OrderedDict(zip(v_pks.Columns, v_row)))
v_pks_all.Merge(v_pks)
return v_pks_all
@lock_required
def QueryTablesPrimaryKeysColumns(self, p_table=None):
v_pk_tmp = self.v_connection.Query("pragma table_info('{0}')".format(p_table), True)
v_pk = Spartacus.Database.DataTable()
v_pk.Columns = ['column_name']
for v_row in v_pk_tmp.Rows:
if v_row['pk'] != '0':
v_row = [v_row['name']]
v_pk.Rows.append(OrderedDict(zip(v_pk.Columns, v_row)))
return v_pk
@lock_required
def QueryTablesUniques(self, p_table=None):
v_uniques_all = Spartacus.Database.DataTable()
v_uniques_all.Columns = [
'constraint_name',
'table_name'
]
if p_table:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
and name = '{0}'
'''.format(p_table), True)
else:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
''', True)
for v_table in v_tables.Rows:
v_uniques = self.v_connection.Query('''
PRAGMA index_list('{0}')
'''.format(
v_table['name']
), True)
for v_unique in v_uniques.Rows:
if v_unique['origin'] == 'u':
v_uniques_all.AddRow([
v_unique['name'],
v_table['name']
])
return v_uniques_all
@lock_required
def QueryTablesUniquesColumns(self, p_unique, p_table=None):
v_uniques_all = Spartacus.Database.DataTable()
v_uniques_all.Columns = [
'constraint_name',
'column_name',
'table_name'
]
if p_table:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
and name = '{0}'
'''.format(p_table), True)
else:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
''', True)
for v_table in v_tables.Rows:
v_uniques = self.v_connection.Query('''
PRAGMA index_list('{0}')
'''.format(
v_table['name']
), True)
for v_unique in v_uniques.Rows:
if v_unique['origin'] == 'u':
if v_unique['name'] == p_unique:
v_unique_columns = self.v_connection.Query('''
PRAGMA index_info('{0}')
'''.format(
v_unique['name']
), True)
for v_unique_column in v_unique_columns.Rows:
v_uniques_all.AddRow([
v_unique['name'],
v_unique_column['name'],
v_table['name']
])
return v_uniques_all
@lock_required
def QueryTablesIndexes(self, p_table=None):
v_indexes_all = Spartacus.Database.DataTable()
v_indexes_all.Columns = [
'index_name',
'table_name',
'uniqueness'
]
if p_table:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
and name = '{0}'
'''.format(p_table), True)
else:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
''', True)
for v_table in v_tables.Rows:
v_indexes = self.v_connection.Query('''
PRAGMA index_list('{0}')
'''.format(
v_table['name']
), True)
for v_index in v_indexes.Rows:
if v_index['origin'] == 'c':
v_indexes_all.AddRow([
v_index['name'],
v_table['name'],
'Unique' if v_index['unique'] == '1' else 'Non Unique'
])
return v_indexes_all
@lock_required
def QueryTablesIndexesColumns(self, p_index, p_table=None):
v_indexes_all = Spartacus.Database.DataTable()
v_indexes_all.Columns = [
'index_name',
'column_name',
'table_name'
]
if p_table:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
and name = '{0}'
'''.format(p_table), True)
else:
v_tables = self.v_connection.Query('''
select name
from sqlite_master
where type = 'table'
''', True)
for v_table in v_tables.Rows:
v_indexes = self.v_connection.Query('''
PRAGMA index_list('{0}')
'''.format(
v_table['name']
), True)
for v_index in v_indexes.Rows:
if v_index['origin'] == 'c':
if v_index['name'] == p_index:
v_index_columns = self.v_connection.Query('''
PRAGMA index_info('{0}')
'''.format(
v_index['name']
), True)
for v_index_column in v_index_columns.Rows:
v_indexes_all.AddRow([
v_index['name'],
v_index_column['name'],
v_table['name']
])
return v_indexes_all
@lock_required
def QueryViews(self):
return self.v_connection.Query('''
select name as table_name
from sqlite_master
where type = 'view'
''', True)
@lock_required
def QueryViewFields(self, p_table=None):
v_table_columns_all = Spartacus.Database.DataTable()
v_table_columns_all.Columns = [
'column_name',
'data_type',
'nullable',
'data_length',
'data_precision',
'data_scale',
'table_name'
]
if p_table:
v_tables = Spartacus.Database.DataTable()
v_tables.Columns.append('table_name')
v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))
else:
v_tables = self.QueryTables()
for v_table in v_tables.Rows:
v_table_columns_tmp = self.v_connection.Query("pragma table_info('{0}')".format(v_table['table_name']), True)
v_table_columns = Spartacus.Database.DataTable()
v_table_columns.Columns = [
'column_name',
'data_type',
'nullable',
'data_length',
'data_precision',
'data_scale',
'table_name'
]
for r in v_table_columns_tmp.Rows:
v_row = []
v_row.append(r['name'])
if '(' in r['type']:
v_index = r['type'].find('(')
v_data_type = r['type'].lower()[0 : v_index]
if ',' in r['type']:
v_sizes = r['type'][v_index + 1 : r['type'].find(')')].split(',')
v_data_length = ''
v_data_precision = v_sizes[0]
v_data_scale = v_sizes[1]
else:
v_data_length = r['type'][v_index + 1 : r['type'].find(')')]
v_data_precision = ''
v_data_scale = ''
else:
v_data_type = r['type'].lower()
v_data_length = ''
v_data_precision = ''
v_data_scale = ''
v_row.append(v_data_type)
if r['notnull'] == '1':
v_row.append('NO')
else:
v_row.append('YES')
v_row.append(v_data_length)
v_row.append(v_data_precision)
v_row.append(v_data_scale)
v_row.append(v_table['table_name'])
v_table_columns.Rows.append(OrderedDict(zip(v_table_columns.Columns, v_row)))
v_table_columns_all.Merge(v_table_columns)
return v_table_columns_all
@lock_required
def QueryTablesTriggers(self, p_table=None):
return self.v_connection.Query('''
SELECT name AS trigger_name,
tbl_name AS table_name
FROM sqlite_master
WHERE type = 'trigger'
AND tbl_name = '{0}'
'''.format(
p_table
), True)
def TemplateSelect(self, p_table, p_kind):
# table
if p_kind == 't':
v_sql = 'SELECT t.'
v_fields = self.QueryTablesFields(p_table)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0} t'.format(p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table)
if len(v_pk.Rows) > 0:
v_fields = self.QueryTablesPrimaryKeysColumns(p_table)
if len(v_fields.Rows) > 0:
v_sql += '\nORDER BY t.'
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
# view
elif p_kind == 'v':
v_sql = 'SELECT t.'
v_fields = self.QueryViewFields(p_table)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0} t'.format(p_table)
return Template(v_sql)
def TemplateInsert(self, p_table):
v_fields = self.QueryTablesFields(p_table)
if len(v_fields.Rows) > 0:
v_sql = 'INSERT INTO {0} (\n'.format(p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(p_table)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append(' ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append('\n , ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
else:
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_sql += '\n) VALUES (\n'
for v in v_values:
v_sql += v
v_sql += '\n)'
else:
v_sql = ''
return Template(v_sql)
def TemplateUpdate(self, p_table):
v_fields = self.QueryTablesFields(p_table)
if len(v_fields.Rows) > 0:
v_sql = 'UPDATE {0}\nSET '.format(p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(p_table)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_first = True
for r in v_fields.Rows:
if v_first:
if r['column_name'] in v_pk_fields:
v_sql += '{0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['column_name'] in v_pk_fields:
v_sql += '\n , {0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
else:
v_first = True
for r in v_fields.Rows:
if v_first:
if r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_sql += '\nWHERE condition'
else:
v_sql = ''
return Template(v_sql)
@lock_required
def QueryDataLimited(self, p_query, p_count=-1):
if p_count != -1:
self.v_connection.Open()
v_data = self.v_connection.QueryBlock(p_query, p_count, True)
self.v_connection.Close()
return v_data
else:
return self.v_connection.Query(p_query, True)
@lock_required
def QueryTableRecords(self, p_column_list, p_table, p_filter, p_count=-1):
v_limit = ''
if p_count != -1:
v_limit = ' limit ' + p_count
return self.v_connection.Query('''
select {0}
from {1} t
{2}
{3}
'''.format(
p_column_list,
p_table,
p_filter,
v_limit
), True
)
def TemplateCreateView(self):
return Template('''CREATE
--TEMPORARY
VIEW view_name
--( column_definition, ... )
AS
--SELECT...
''')
def TemplateDropView(self):
return Template('DROP VIEW #view_name#')
def TemplateCreateTable(self):
return Template('''CREATE
--TEMPORARY
TABLE table_name
(
column_name data_type
--CONSTRAINT constraint_name
--NOT NULL
--CHECK
--UNIQUE
--PRIMARY KEY
--FOREIGN KEY
)
--WITHOUT ROWID
''')
def TemplateAlterTable(self):
return Template('''ALTER TABLE #table_name#
--RENAME TO new_table_name
--RENAME COLUMN column_name TO new_column_name
--ADD COLUMN columnd_definition
''')
def TemplateDropTable(self):
return Template('DROP TABLE #table_name#')
def TemplateCreateColumn(self):
return Template('''ALTER TABLE #table_name#
ADD COLUMN columnd_definition
''')
def TemplateCreateIndex(self):
return Template('''CREATE
--UNIQUE
INDEX index_name ON #table_name# ( column_name, ... )
--WHERE expression
''')
def TemplateReindex(self):
return Template('REINDEX #index_name#')
def TemplateDropIndex(self):
return Template('DROP INDEX #index_name#')
def TemplateDelete(self):
return Template('''DELETE FROM
#table_name#
WHERE condition
''')
def TemplateCreateTrigger(self):
return Template('''CREATE
--TEMPORARY
TRIGGER trigger_name
--BEFORE
--AFTER
--INSTEAD OF
--DELETE
--INSERT
--UPDATE
--OF column_name
ON #table_name#
--FOR EACH ROW
WHEN expression
BEGIN
statement
;
END
''')
def TemplateDropTrigger(self):
return Template('DROP TRIGGER #trigger_name#')
def GetAutocompleteValues(self, p_columns, p_filter):
return None
def GetErrorPosition(self, p_error_message):
vector = str(p_error_message).split('\n')
v_return = None
if len(vector) > 1 and vector[1][0:4]=='LINE':
v_return = {
'row': vector[1].split(':')[0].split(' ')[1],
'col': vector[2].index('^') - len(vector[1].split(':')[0])-2
}
return v_return
def GetPropertiesTable(self, p_object):
return self.v_connection.Query('''
SELECT type AS "Type",
name AS "Name",
rootpage AS "Root Page"
FROM sqlite_master
WHERE type = 'table'
AND name = '{0}'
'''.format(p_object))
def GetPropertiesTableField(self, p_table, p_object):
return self.v_connection.Query('''
SELECT 'Column' AS "Type",
'{0}' AS "Name"
'''.format(p_object))
def GetPropertiesIndex(self, p_object):
return self.v_connection.Query('''
SELECT type AS "Type",
name AS "Name",
rootpage AS "Root Page"
FROM sqlite_master
WHERE type = 'index'
AND name = '{0}'
'''.format(p_object))
def GetPropertiesView(self, p_object):
return self.v_connection.Query('''
SELECT type AS "Type",
name AS "Name",
rootpage AS "Root Page"
FROM sqlite_master
WHERE type = 'view'
AND name = '{0}'
'''.format(p_object))
def GetPropertiesTrigger(self, p_table, p_object):
return self.v_connection.Query('''
SELECT type AS "Type",
name AS "Name",
rootpage AS "Root Page"
FROM sqlite_master
WHERE type = 'trigger'
AND name = '{0}'
AND tbl_name = '{1}'
'''.format(p_object, p_table))
def GetPropertiesPK(self, p_table, p_object):
return self.v_connection.Query('''
SELECT 'PK' AS "Type",
'{0}' AS "Name"
'''.format(p_object))
def GetPropertiesFK(self, p_table, p_object):
return self.v_connection.Query('''
SELECT 'FK' AS "Type",
'{0}' AS "Name"
'''.format(p_object))
def GetPropertiesUnique(self, p_table, p_object):
return self.v_connection.Query('''
SELECT 'Unique' AS "Type",
'{0}' AS "Name"
'''.format(p_object))
def GetProperties(self, p_table, p_object, p_type):
try:
if p_type == 'table':
return self.GetPropertiesTable(p_object).Transpose('Property', 'Value')
elif p_type == 'table_field':
return self.GetPropertiesTableField(p_table, p_object).Transpose('Property', 'Value')
elif p_type == 'index':
return self.GetPropertiesIndex(p_object).Transpose('Property', 'Value')
elif p_type == 'view':
return self.GetPropertiesView(p_object).Transpose('Property', 'Value')
elif p_type == 'trigger':
return self.GetPropertiesTrigger(p_table, p_object).Transpose('Property', 'Value')
elif p_type == 'pk':
return self.GetPropertiesPK(p_table, p_object).Transpose('Property', 'Value')
elif p_type == 'foreign_key':
return self.GetPropertiesFK(p_table, p_object).Transpose('Property', 'Value')
elif p_type == 'unique':
return self.GetPropertiesUnique(p_table, p_object).Transpose('Property', 'Value')
else:
return None
except Spartacus.Database.Exception as exc:
if str(exc) == 'Can only transpose a table with a single row.':
raise Exception('Object {0} does not exist anymore. Please refresh the tree view.'.format(p_object))
else:
raise exc
def GetDDLTable(self, p_object):
return self.v_connection.ExecuteScalar('''
SELECT sql
FROM sqlite_master
WHERE type = 'table'
AND name = '{0}'
'''.format(p_object))
def GetDDLIndex(self, p_object):
return self.v_connection.ExecuteScalar('''
SELECT sql
FROM sqlite_master
WHERE type = 'index'
AND name = '{0}'
'''.format(p_object))
def GetDDLView(self, p_object):
return self.v_connection.ExecuteScalar('''
SELECT sql
FROM sqlite_master
WHERE type = 'view'
AND name = '{0}'
'''.format(p_object))
def GetDDLTrigger(self, p_object, p_table):
return self.v_connection.ExecuteScalar('''
SELECT sql
FROM sqlite_master
WHERE type = 'trigger'
AND name = '{0}'
AND tbl_name = '{1}'
'''.format(p_object, p_table))
def GetDDL(self, p_table, p_object, p_type):
if p_type == 'table':
return self.GetDDLTable(p_object)
elif p_type == 'index':
return self.GetDDLIndex(p_object)
elif p_type == 'view':
return self.GetDDLView(p_object)
elif p_type == 'trigger':
return self.GetDDLTrigger(p_object, p_table)
else:
return ''
|
{
"content_hash": "2986b070db46289fca45628ee471d889",
"timestamp": "",
"source": "github",
"line_count": 1055,
"max_line_length": 161,
"avg_line_length": 35.4739336492891,
"alnum_prop": 0.47706078824315296,
"repo_name": "OmniDB/OmniDB",
"id": "12b3a4a492879f8dfefc571c6caf4aca8213dc2e",
"size": "37425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OmniDB/OmniDB_app/include/OmniDatabase/SQLite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19630"
},
{
"name": "C++",
"bytes": "302"
},
{
"name": "CSS",
"bytes": "304604"
},
{
"name": "Dockerfile",
"bytes": "13652"
},
{
"name": "HTML",
"bytes": "95804"
},
{
"name": "JavaScript",
"bytes": "20832908"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "PLpgSQL",
"bytes": "6153"
},
{
"name": "Python",
"bytes": "2766750"
},
{
"name": "Ruby",
"bytes": "25824"
},
{
"name": "SQLPL",
"bytes": "88625"
},
{
"name": "Shell",
"bytes": "59204"
},
{
"name": "TSQL",
"bytes": "88280"
}
],
"symlink_target": ""
}
|
import os
dbFilename = "task_database_python.txt"
#dbPath = os.path.join( os.getenv("HOME"), dbFilename )
with open(dbFilename, "r") as myfile:
dbContents = myfile.read()
trimmedContents = dbContents.rstrip()
print(trimmedContents)
|
{
"content_hash": "1ac9f94ade618879aafeb40cc1f64179",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7113821138211383,
"repo_name": "dgoldman916/nyu-python",
"id": "660a291f5bc8404d65f513efb0a2f20ee059b88c",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class1/assignment3/list.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "331"
},
{
"name": "HTML",
"bytes": "2658196"
},
{
"name": "Jupyter Notebook",
"bytes": "79592"
},
{
"name": "Python",
"bytes": "314058"
},
{
"name": "Shell",
"bytes": "1110"
},
{
"name": "Vim script",
"bytes": "161"
}
],
"symlink_target": ""
}
|
import random
import re
import string
import uuid
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara.plugins import base as plugin_base
from sahara.service import shares as shares_service
from sahara.swift import swift_helper as sw
from sahara.utils.openstack import manila as m
from sahara.utils import remote
opts = [
cfg.StrOpt('job_workflow_postfix',
default='',
help="Postfix for storing jobs in hdfs. Will be "
"added to '/user/<hdfs user>/' path.")
]
CONF = cfg.CONF
CONF.register_opts(opts)
conductor = c.API
# Prefix used to mark data_source name references in arg lists
DATA_SOURCE_PREFIX = "datasource://"
DATA_SOURCE_SUBST_NAME = "edp.substitute_data_source_for_name"
DATA_SOURCE_SUBST_UUID = "edp.substitute_data_source_for_uuid"
def get_plugin(cluster):
return plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
def create_workflow_dir(where, path, job, use_uuid=None, chmod=""):
if use_uuid is None:
use_uuid = six.text_type(uuid.uuid4())
constructed_dir = _append_slash_if_needed(path)
constructed_dir += '%s/%s' % (job.name, use_uuid)
with remote.get_remote(where) as r:
if chmod:
r.execute_command("mkdir -p -m %s %s" % (chmod, constructed_dir))
else:
r.execute_command("mkdir -p %s" % constructed_dir)
return constructed_dir
def get_data_sources(job_execution, job, data_source_urls, cluster=None):
def _construct(ctx, ds_id):
source = conductor.data_source_get(ctx, ds_id)
if source and source.id not in data_source_urls:
url = _construct_data_source_url(source.url, job_execution.id)
runtime_url = _runtime_url(url, cluster)
data_source_urls[source.id] = (url, runtime_url)
return source
ctx = context.ctx()
input_source = _construct(ctx, job_execution.input_id)
output_source = _construct(ctx, job_execution.output_id)
return input_source, output_source
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
def may_contain_data_source_refs(job_configs):
def _check_data_source_ref_option(option):
truth = job_configs and (
job_configs.get('configs', {}).get(option))
# Config values specified in the UI may be
# passed as strings
return truth in (True, 'True')
return (
_check_data_source_ref_option(DATA_SOURCE_SUBST_NAME),
_check_data_source_ref_option(DATA_SOURCE_SUBST_UUID))
def _data_source_ref_search(job_configs, func, prune=lambda x: x):
"""Return a list of unique values in job_configs filtered by func().
Loop over the 'args', 'configs' and 'params' elements in
job_configs and return a list of all values for which
func(value) is True.
Optionally provide a 'prune' function that is applied
to values before they are added to the return value.
"""
args = set([prune(arg) for arg in job_configs.get(
'args', []) if func(arg)])
configs = set([prune(val) for val in six.itervalues(
job_configs.get('configs', {})) if func(val)])
params = set([prune(val) for val in six.itervalues(
job_configs.get('params', {})) if func(val)])
return list(args | configs | params)
def find_possible_data_source_refs_by_name(job_configs):
"""Find string values in job_configs starting with 'datasource://'.
Loop over the 'args', 'configs', and 'params' elements of
job_configs to find all values beginning with the prefix
'datasource://'. Return a list of unique values with the prefix
removed.
Note that for 'configs' and 'params', which are dictionaries, only
the values are considered and the keys are not relevant.
"""
def startswith(arg):
return isinstance(
arg,
six.string_types) and arg.startswith(DATA_SOURCE_PREFIX)
return _data_source_ref_search(job_configs,
startswith,
prune=lambda x: x[len(DATA_SOURCE_PREFIX):])
def find_possible_data_source_refs_by_uuid(job_configs):
"""Find string values in job_configs which are uuids.
Return a list of unique values in the 'args', 'configs', and 'params'
elements of job_configs which have the form of a uuid.
Note that for 'configs' and 'params', which are dictionaries, only
the values are considered and the keys are not relevant.
"""
return _data_source_ref_search(job_configs, uuidutils.is_uuid_like)
def _add_credentials_for_data_sources(ds_list, configs):
username = password = None
for src in ds_list:
if src.type == "swift" and hasattr(src, "credentials"):
if "user" in src.credentials:
username = src.credentials['user']
if "password" in src.credentials:
password = src.credentials['password']
break
# Don't overwrite if there is already a value here
if configs.get(sw.HADOOP_SWIFT_USERNAME, None) is None and (
username is not None):
configs[sw.HADOOP_SWIFT_USERNAME] = username
if configs.get(sw.HADOOP_SWIFT_PASSWORD, None) is None and (
password is not None):
configs[sw.HADOOP_SWIFT_PASSWORD] = password
def resolve_data_source_references(job_configs,
job_exec_id,
data_source_urls,
cluster=None):
"""Resolve possible data_source references in job_configs.
Look for any string values in the 'args', 'configs', and 'params'
elements of job_configs which start with 'datasource://' or have
the form of a uuid.
For values beginning with 'datasource://', strip off the prefix
and search for a DataSource object with a name that matches the
value.
For values having the form of a uuid, search for a DataSource object
with an id that matches the value.
If a DataSource object is found for the value, replace the value
with the URL from the DataSource object. If any DataSource objects
are found which reference swift paths and contain credentials, set
credential configuration values in job_configs (use the first set
of swift credentials found).
If no values are resolved, return an empty list and a reference
to job_configs.
If any values are resolved, return a list of the referenced
data_source objects and a copy of job_configs with all of the
references replaced with URLs.
"""
by_name, by_uuid = may_contain_data_source_refs(job_configs)
if not (by_name or by_uuid):
return [], job_configs
ctx = context.ctx()
ds_seen = {}
new_configs = {}
def _resolve(value):
kwargs = {}
if by_name and isinstance(
value,
six.string_types) and value.startswith(DATA_SOURCE_PREFIX):
value = value[len(DATA_SOURCE_PREFIX):]
kwargs['name'] = value
elif by_uuid and uuidutils.is_uuid_like(value):
kwargs['id'] = value
if kwargs:
# Name and id are both unique constraints so if there
# is more than 1 something is really wrong
ds = conductor.data_source_get_all(ctx, **kwargs)
if len(ds) == 1:
ds = ds[0]
ds_seen[ds.id] = ds
if ds.id not in data_source_urls:
url = _construct_data_source_url(ds.url, job_exec_id)
runtime_url = _runtime_url(url, cluster)
data_source_urls[ds.id] = (url, runtime_url)
return data_source_urls[ds.id][1]
return value
# Loop over configs/params/args and look up each value as a data_source.
# If we find it, replace the value. In all cases, we've produced a
# copy which is not a FrozenClass type and can be updated.
new_configs['configs'] = {
k: _resolve(v) for k, v in six.iteritems(
job_configs.get('configs', {}))}
new_configs['params'] = {
k: _resolve(v) for k, v in six.iteritems(
job_configs.get('params', {}))}
new_configs['args'] = [_resolve(a) for a in job_configs.get('args', [])]
# If we didn't resolve anything we might as well return the original
ds_seen = ds_seen.values()
if not ds_seen:
return [], job_configs
# If there are no proxy_configs and the user has not already set configs
# for swift credentials, set those configs based on data_sources we found
if not job_configs.get('proxy_configs'):
_add_credentials_for_data_sources(ds_seen, new_configs['configs'])
else:
# we'll need to copy these, too, so job_configs is complete
new_configs['proxy_configs'] = {
k: v for k, v in six.iteritems(job_configs.get('proxy_configs'))}
return ds_seen, new_configs
def _construct_data_source_url(url, job_exec_id):
"""Resolve placeholders in data_source URL.
Supported placeholders:
* %RANDSTR(len)% - will be replaced with random string of lowercase
letters of length `len`.
* %JOB_EXEC_ID% - will be replaced with the job execution ID.
"""
def _randstr(match):
len = int(match.group(1))
return ''.join(random.choice(string.ascii_lowercase)
for _ in six.moves.range(len))
url = url.replace("%JOB_EXEC_ID%", job_exec_id)
url = re.sub(r"%RANDSTR\((\d+)\)%", _randstr, url)
return url
def _runtime_url(url, cluster):
if url.startswith(m.MANILA_PREFIX) and cluster:
path = shares_service.get_share_path(url, cluster.shares or [])
if path is None:
path = mount_share_at_default_path(url, cluster)
# This gets us the mount point, but we need a file:// scheme to
# indicate a local filesystem path
return "file://{path}".format(path=path)
return url
def to_url_dict(data_source_urls, runtime=False):
idx = 1 if runtime else 0
return {id: urls[idx] for id, urls in six.iteritems(data_source_urls)}
def mount_share_at_default_path(url, cluster):
# Automount this share to the cluster with default path
# url example: 'manila://ManilaShare-uuid/path_to_file'
share_id = six.moves.urllib.parse.urlparse(url).netloc
if cluster.shares:
cluster_shares = [dict(s) for s in cluster.shares]
else:
cluster_shares = []
needed_share = {
'id': share_id,
'path': shares_service.default_mount(share_id),
'access_level': 'rw'
}
cluster_shares.append(needed_share)
cluster = conductor.cluster_update(
context.ctx(), cluster, {'shares': cluster_shares})
shares_service.mount_shares(cluster)
return shares_service.get_share_path(url, cluster.shares)
|
{
"content_hash": "f829ebd3edf9749265bcfa7170b5c51c",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 79,
"avg_line_length": 34.426332288401255,
"alnum_prop": 0.6326716445091969,
"repo_name": "zhangjunli177/sahara",
"id": "a1b702275237d2978e60e6de6eb3572667aa7f35",
"size": "11572",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/service/edp/job_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "29432"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3131969"
},
{
"name": "Shell",
"bytes": "60900"
}
],
"symlink_target": ""
}
|
import json
import time
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import get_package_version
from charms.layer.bigtop_zookeeper import Zookeeper
from charms.leadership import leader_set, leader_get
from charms.reactive import (
hook,
is_state,
remove_state,
set_state,
when,
when_not
)
from charms.reactive.helpers import data_changed
import shutil
import os
@when('local-monitors.available')
def local_monitors_available(nagios):
setup_nagios(nagios)
@when('nrpe-external-master.available')
def nrpe_external_master_available(nagios):
setup_nagios(nagios)
def setup_nagios(nagios):
config = hookenv.config()
unit_name = hookenv.local_unit()
checks = [
{
'name': 'zk_open_file_descriptor_coun',
'description': 'ZK_Open_File_Descriptors_Count',
'warn': config['open_file_descriptor_count_warn'],
'crit': config['open_file_descriptor_count_crit']
},
{
'name': 'zk_ephemerals_count',
'description': 'ZK_Ephemerals_Count',
'warn': config['ephemerals_count_warn'],
'crit': config['ephemerals_count_crit']
},
{
'name': 'zk_avg_latency',
'description': 'ZK_Avg_Latency',
'warn': config['avg_latency_warn'],
'crit': config['avg_latency_crit']
},
{
'name': 'zk_max_latency',
'description': 'ZK_Max_Latency',
'warn': config['max_latency_warn'],
'crit': config['max_latency_crit']
},
{
'name': 'zk_min_latency',
'description': 'ZK_Min_Latency',
'warn': config['min_latency_warn'],
'crit': config['min_latency_crit']
},
{
'name': 'zk_outstanding_requests',
'description': 'ZK_Outstanding_Requests',
'warn': config['outstanding_requests_warn'],
'crit': config['outstanding_requests_crit']
},
{
'name': 'zk_watch_count',
'description': 'ZK_Watch_Count',
'warn': config['watch_count_warn'],
'crit': config['watch_count_crit']
},
]
check_cmd = ['/usr/local/lib/nagios/plugins/check_zookeeper.py',
'-o', 'nagios', '-s', 'localhost:2181']
for check in checks:
nagios.add_check(check_cmd + ['--key', check['name'],
'-w', str(check['warn']),
'-c', str(check['crit'])],
name=check['name'],
description=check['description'],
context=config["nagios_context"],
servicegroups=config["nagios_servicegroups"],
unit=unit_name
)
nagios.updated()
@hook('upgrade-charm')
def nrpe_helper_upgrade_charm():
# Make sure the nrpe handler will get replaced at charm upgrade
remove_state('zookeeper.nrpe_helper.installed')
@when('zookeeper.nrpe_helper.registered')
@when_not('zookeeper.nrpe_helper.installed')
def install_nrpe_helper():
dst_dir = '/usr/local/lib/nagios/plugins/'
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
src = '{}/files/check_zookeeper.py'.format(hookenv.charm_dir())
dst = '{}/check_zookeeper.py'.format(dst_dir)
shutil.copy(src, dst)
os.chmod(dst, 0o755)
set_state('zookeeper.nrpe_helper.installed')
@when('bigtop.available')
@when_not('zookeeper.installed')
def install_zookeeper():
'''
After Bigtop has done the initial setup, trigger a puppet install,
via our Zooekeeper library.
puppet will start the service, as a side effect.
'''
hookenv.status_set('maintenance', 'installing zookeeper')
zookeeper = Zookeeper()
# Prime data changed
data_changed('zkpeer.nodes', zookeeper.read_peers())
data_changed(
'zk.network_interface',
hookenv.config().get('network_interface'))
data_changed(
'zk.autopurge_purge_interval',
hookenv.config().get('autopurge_purge_interval'))
data_changed(
'zk.autopurge_snap_retain_count',
hookenv.config().get('autopurge_snap_retain_count'))
zookeeper.install()
zookeeper.open_ports()
set_state('zookeeper.installed')
set_state('zookeeper.started')
hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
# set app version string for juju status output
zoo_version = get_package_version('zookeeper') or 'unknown'
hookenv.application_version_set(zoo_version)
def _restart_zookeeper(msg):
'''
Restart Zookeeper by re-running the puppet scripts.
'''
hookenv.status_set('maintenance', msg)
zookeeper = Zookeeper()
zookeeper.install()
hookenv.status_set('active', 'ready {}'.format(zookeeper.quorum_check()))
@when('zookeeper.started')
def update_network_interface():
'''
Possibly restart zookeeper, due to the network interface that it
should listen on changing.
'''
network_interface = hookenv.config().get('network_interface')
if data_changed('zk.network_interface', network_interface):
_restart_zookeeper('updating network interface')
@when('zookeeper.started')
def update_autopurge_purge_interval():
purge_interval = hookenv.config().get('autopurge_purge_interval')
if data_changed('zk.autopurge_purge_interval', purge_interval):
_restart_zookeeper('updating snapshot purge interval')
@when('zookeeper.started')
def update_autopurge_snap_retain_count():
snap_retain = hookenv.config().get('autopurge_snap_retain_count')
if data_changed('zk.autopurge_snap_retain_count', snap_retain):
_restart_zookeeper('updating number of retained snapshots')
@when('zookeeper.started', 'zookeeper.joined')
def serve_client(client):
config = Zookeeper().dist_config
port = config.port('zookeeper')
rest_port = config.port('zookeeper-rest') # TODO: add zookeeper REST
client.send_port(port, rest_port)
#
# Rolling restart -- helpers and handlers
#
# When we add or remove a Zookeeper peer, Zookeeper needs to perform a
# rolling restart of all of its peers, restarting the Zookeeper
# "leader" last.
#
# The following functions accomplish this. Here's how they all fit together:
#
# (As you read, keep in mind that one node functions as the "leader"
# in the context of Juju, and one node functions as the "leader" in
# the context of Zookeeper; these nodes may or may not be the same.)
#
# 0. Whenever the Zookeeper server starts, it attempts to determine
# whether it is the Zookeeper leader. If so, it sets a flag on the
# Juju peer relation data.
#
# 1. When a node is added or remove from the cluster, the Juju leader
# runs `check_cluster`, and generates a "restart queue" comprising
# nodes in the cluster, with the Zookeeper lead node sorted last in
# the queue. It also sets a nonce, to identify this restart queue
# uniquely, and thus handle the situation where another node is
# added or restarted while we're still reacting to the first node's
# addition or removal. The leader drops the queue and nonce into
# the leadership data as "restart_queue" and "restart_nonce",
# respectively.
#
# 2. When any node detects a leadership.changed.restart_queue event,
# it runs `restart_for_quorum`, which is a noop unless the node's
# private address is the first element of the restart queue. In
# that case, if the node is the Juju leader, it will restart, then
# remove itself from the restart queue, triggering another
# leadership.changed.restart_queue event. If the node isn't the
# Juju leader, it will restart itself, then run `inform_restart`.
#
# 3. `inform_restart` will create a relation data changed event, which
# triggers `update_restart_queue` to run on the leader. This method
# will update the restart_queue, clearing any nodes that have
# restarted for the current nonce, and looping us back to step 2.
#
# 4. Once all the nodes have restarted, we should be in the following state:
#
# * All nodes have an updated Zookeeper server running with the new
# * peer data.
#
# * The Zookeeper leader has restarted last, which should help
# prevent orphaned jobs, per the Zookeeper docs.
#
# * peers still have zkpeer.restarted.<nonce> set on their relation
# data. This is okay, as we will generate a new nonce next time,
# and the data is small.
#
# Edge cases and potential bugs:
#
# 1. Juju leader changes in the middle of a restart: this gets a
# little bit dicey, but it should work. The new leader should run
# `check_cluster_departed`, and start a new restart_queue.
#
def _ip_list(nodes):
'''
Given a list of nodes, in the format that our peer relation or
zookeeper lib will typically return node lists in, make a list of
just the ips (stripping ports, if they have been added).
We expect the list we passed in to look something like this:
[('zookeeper/0', '10.0.0.4'), ('zookeeper/1', '10.0.0.5')]
or this:
[('0', '10.0.0.4:2888:4888'), ('1', '10.0.0.5:2888:4888')]
We will return a list in the form:
['10.0.0.4', '10.0.0.5']
'''
return [node[1].split(':')[0] for node in nodes]
@when('zookeeper.started', 'leadership.is_leader', 'zkpeer.joined')
@when_not('zkpeer.departed')
def check_cluster(zkpeer):
'''
Checkup on the state of the cluster. Start a rolling restart if
the peers have changed.
'''
zk = Zookeeper()
if data_changed('zkpeer.nodes', zk.read_peers()):
peers = _ip_list(zk.sort_peers(zkpeer))
nonce = time.time()
hookenv.log('Quorum changed. Restart queue: {}'.format(peers))
leader_set(
restart_queue=json.dumps(peers),
restart_nonce=json.dumps(nonce)
)
@when('zookeeper.started', 'leadership.is_leader', 'zkpeer.joined',
'zkpeer.departed')
def check_cluster_departed(zkpeer, zkpeer_departed):
'''
Wrapper around check_cluster.
Together with check_cluster, implements the following logic:
"Run this when zkpeer.joined and zkpeer departed, or zkpeer.joined
and not zkpeer.departed"
'''
check_cluster(zkpeer)
@when('zookeeper.started', 'leadership.is_leader', 'zkpeer.changed')
def check_cluster_changed(zkpeer):
check_cluster(zkpeer)
zkpeer.dismiss_changed()
@when('leadership.changed.restart_queue', 'zkpeer.joined')
def restart_for_quorum(zkpeer):
'''
If we're the next node in the restart queue, restart, and then
inform the leader that we've restarted. (If we are the leader,
remove ourselves from the queue, and update the leadership data.)
'''
private_address = hookenv.unit_get('private-address')
queue = json.loads(leader_get('restart_queue') or '[]')
if not queue:
# Everything has restarted.
return
if private_address == queue[0]:
# It's our turn to restart.
_restart_zookeeper('rolling restart for quorum update')
if is_state('leadership.is_leader'):
queue = queue[1:]
hookenv.log('Leader updating restart queue: {}'.format(queue))
leader_set(restart_queue=json.dumps(queue))
else:
zkpeer.inform_restart()
@when('leadership.is_leader', 'zkpeer.joined')
def update_restart_queue(zkpeer):
'''
If a Zookeeper node has restarted as part of a rolling restart,
pop it off of the queue.
'''
queue = json.loads(leader_get('restart_queue') or '[]')
if not queue:
return
restarted_nodes = _ip_list(zkpeer.restarted_nodes())
new_queue = [node for node in queue if node not in restarted_nodes]
if new_queue != queue:
hookenv.log('Leader updating restart queue: {}'.format(queue))
leader_set(restart_queue=json.dumps(new_queue))
|
{
"content_hash": "76796df0f1dd8bd6798cec44e4597f46",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 77,
"avg_line_length": 34.05397727272727,
"alnum_prop": 0.6443647284558272,
"repo_name": "JunHe77/bigtop",
"id": "fe5de90537df6563fcc667c95dd62214ea7a9004",
"size": "12769",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bigtop-packages/src/charm/zookeeper/layer-zookeeper/reactive/zookeeper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4822"
},
{
"name": "Dockerfile",
"bytes": "2684"
},
{
"name": "Groovy",
"bytes": "632676"
},
{
"name": "HiveQL",
"bytes": "1658"
},
{
"name": "Java",
"bytes": "676559"
},
{
"name": "Makefile",
"bytes": "57346"
},
{
"name": "PigLatin",
"bytes": "3196"
},
{
"name": "Puppet",
"bytes": "180420"
},
{
"name": "Python",
"bytes": "240919"
},
{
"name": "Roff",
"bytes": "45904"
},
{
"name": "Ruby",
"bytes": "19903"
},
{
"name": "Scala",
"bytes": "85334"
},
{
"name": "Shell",
"bytes": "615830"
},
{
"name": "TSQL",
"bytes": "13064"
},
{
"name": "XSLT",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import TestCase
from cms.api import add_plugin, create_page
from cms.models.placeholdermodel import Placeholder
from cmsplugin_cascade.link.plugin_base import LinkElementMixin
from cmsplugin_cascade.bootstrap3.buttons import BootstrapButtonPlugin
class ButtonWrapperPluginTest(TestCase):
def setUp(self):
self.placeholder = Placeholder.objects.create(slot='test')
def test_plugin_context(self):
glossary = {'link_content': 'Knopf', 'button-type': 'btn-default'}
model_instance = add_plugin(self.placeholder, BootstrapButtonPlugin, 'en', glossary=glossary)
button_plugin = model_instance.get_plugin_class_instance()
context = button_plugin.render({}, model_instance, None)
self.assertIn('instance', context)
self.assertIsInstance(context['instance'], LinkElementMixin)
self.assertListEqual(button_plugin.get_css_classes(model_instance), ['btn', 'btn-default'])
self.assertEqual(button_plugin.get_identifier(model_instance), 'Knopf')
def test_external_link(self):
glossary = {'link_content': 'Django', 'button-type': 'btn-primary',
'link': {'url': 'https://www.djangoproject.com/', 'type': 'exturl'}}
model_instance = add_plugin(self.placeholder, BootstrapButtonPlugin, 'en', glossary=glossary)
html = model_instance.render_plugin({})
self.assertHTMLEqual(html, '<a href="https://www.djangoproject.com/" class="btn btn-primary">Django</a>')
def test_internal_link(self):
page = create_page('HOME', 'testing.html', 'en-us')
glossary = {'link_content': 'HOME', 'button-type': 'btn-success',
'link': {'pk': page.id, 'model': 'cms.Page', 'type': 'cmspage'}, 'target': ''}
model_instance = add_plugin(self.placeholder, BootstrapButtonPlugin, 'en', glossary=glossary)
html = model_instance.render_plugin({})
self.assertHTMLEqual(html, '<a href="/" class="btn btn-success">HOME</a>')
|
{
"content_hash": "4e00d01bc8ae050a41698b8b90894757",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 113,
"avg_line_length": 56.388888888888886,
"alnum_prop": 0.680295566502463,
"repo_name": "aldryn/djangocms-cascade",
"id": "a3ca544b6d3a26233b2ae33c4afc4bfc6cac81f3",
"size": "2054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/bs3demo/tests/test_button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3472"
},
{
"name": "JavaScript",
"bytes": "86926"
},
{
"name": "Python",
"bytes": "212332"
},
{
"name": "Shell",
"bytes": "5131"
}
],
"symlink_target": ""
}
|
from os import path
from codecs import open
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='claimchain',
version='0.3.1',
packages=["claimchain", "claimchain.crypto", "claimchain.utils"],
license='MIT',
description='Implementation of ClaimChain, a cryptographic data structure',
long_description=long_description,
author=('Bogdan Kulynych (EPFL SPRING Lab), '
'Marios Isaakidis, George Danezis (UCL)'),
author_email=('bogdan.kulynych@epfl.ch, '
'm.isaakidis@cs.ucl.ac.uk, g.danezis@ucl.ac.uk'),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Security :: Cryptography'
],
install_requires=[
'six',
'petlib',
'pyyaml',
'attrs',
'base58',
'statistics',
'defaultcontext',
'hippiehug >= 0.1.3',
'profiled'
],
)
|
{
"content_hash": "eb1e64bd490f70a6f0d57375d0994800",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 26,
"alnum_prop": 0.597985347985348,
"repo_name": "gdanezis/claimchain-core",
"id": "45a368bbbfdcab59070be46be31e63e2bf03fd09",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44643"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_googlecalendar', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='googlecalendar',
name='title',
field=models.CharField(blank=True, max_length=32),
),
]
|
{
"content_hash": "5d20d7b4386a2efde7fb214860eecdc7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 22.27777777777778,
"alnum_prop": 0.6084788029925187,
"repo_name": "c4sc/arividam",
"id": "34f1e095e65ec57ff5d36cdfe2732cd9d467915f",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arividam/contrib/djangocms_googlecalendar/migrations/0002_googlecalendar_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10607"
},
{
"name": "HTML",
"bytes": "49340"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Python",
"bytes": "84703"
},
{
"name": "Shell",
"bytes": "7248"
}
],
"symlink_target": ""
}
|
import time, uuid, json, calendar
from flask import current_app
from app.forms import *
from app.lib import geo, files
from flask import request, redirect
from flask import session, render_template
from flask import make_response, abort
from flask import jsonify, Response
from rfc6266 import build_header
import mimetypes
from flask.ext.login import login_user, current_user
from flask.ext.login import login_required, login_url
def edit_link():
pass
def approve_link_target(link, target):
target_id = target["id"]
content_disposition = build_header(target['title']).encode('ascii')
headers = {'content-type': target['type'],
'content-disposition': content_disposition}
anon_bucket_cn = current_app.redis_client.get("config:%s:cn" %target["bucket"])
cn_bucket = current_app.redis_client.get("config:%s:bucket" % anon_bucket_cn)
s3_bucket = current_app.default_s3_conn.get_bucket(cn_bucket)
s3_bucket.copy_key(target_id, target["bucket"], target_id, metadata=headers)
s3_bucket = current_app.default_s3_conn.get_bucket(target["bucket"])
s3_bucket.delete_key(target_id)
target["bucket"] = cn_bucket
files.create(target)
target = create_link_target(link, target)
return target
def create_link_target(link, target):
target_id = target["id"]
link_id = link["id"]
target_link = target_id
created = int(time.time())
current_app.redis_client.zadd("link_targets:%s" %link_id, target_link, created)
current_app.redis_client.zadd("target_links:%s" %target_link, link_id, created)
target_url = "%s/link/%s/target/%s/%s" \
%(current_app.config.get('HOSTNAME'), link_id,
target_id, target["title"])
target["url"] = target_url
target["count"] = 0
if current_user.is_authenticated():
target["approved"] = True
else:
target["approved"] = False
# doc_script = """if (ctx._source.containsKey("links")) {
# ctx._source.links += link
# } else {
# ctx._source.links = [link]
# }
# """
# doc = {
# "script" : doc_script,
# "params": {
# "link": link_id
# }
# }
# es.update("files", "file", target_id, doc)
return target
def delete_link_target(link, target):
link_id = link["id"]
target_id = target["id"]
link_target = target_id
current_app.redis_client.zrem("link_targets:%s" %link_id, link_target)
current_app.redis_client.zrem("target_links:%s" %link_target, link_id)
current_app.redis_client.srem("link_uploads:%s" %(link_id), target_id)
# doc_script = """if (ctx._source.containsKey("links")) {
# ctx._source.links.remove(link)
# } else {
# ctx._source.links = []
# }
# """
# doc = {
# "script" : doc_script,
# "params": {
# "link": link_id
# }
# }
# es.update("files", "file", target_id, doc)
|
{
"content_hash": "a0ed88c6a72302bed7bfd6abb1d0b153",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 83,
"avg_line_length": 31.622448979591837,
"alnum_prop": 0.585350112939658,
"repo_name": "abhigd/bigboy",
"id": "c1440ec1aa4d90d2b0a451c2ab9db568227483c2",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/lib/link.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "162117"
},
{
"name": "JavaScript",
"bytes": "67902"
},
{
"name": "Python",
"bytes": "71037"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestHungarianGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'hun-Latn')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_fiaei(self):
self._assert_trans('fiáéi', 'fiaːeːi')
def test_baratnoje(self):
self._assert_trans('barátnője', 'bɒraːtnøːjɛ')
def test_magyar(self):
self._assert_trans('magyar', 'mɒɟɒr')
def test_(self):
self._assert_trans('nagyszülő', 'nɒɟsyløː')
|
{
"content_hash": "5b078daea9319391092fb21bbb36ed1b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 26.9,
"alnum_prop": 0.6344485749690211,
"repo_name": "dmort27/epitran",
"id": "d970bcaf6a2f4b0c9c7152162ad3c749c9d623e3",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epitran/test/test_hungarian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119801"
},
{
"name": "Shell",
"bytes": "73"
},
{
"name": "TeX",
"bytes": "253"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_mamien_ancient.iff"
result.attribute_template_id = 9
result.stfName("monster_name","mamien")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b61afb6304b1da9a261e7cddcbdacbfa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 22.23076923076923,
"alnum_prop": 0.6920415224913494,
"repo_name": "anhstudios/swganh",
"id": "b3bf7c91c4b0a7d9e5e7b60fb0ba087e71915c1e",
"size": "434",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_mamien_ancient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from insights.parsers.networkmanager_config import NetworkManagerConfig
from insights.parsers import networkmanager_config
from insights.tests import context_wrap
import doctest
NETWORKMANAGER_CONF = """
# Configuration file for NetworkManager.
#
# See "man 5 NetworkManager.conf" for details.
#
# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/
# can contain additional configuration snippets installed by packages. These files are
# read before NetworkManager.conf and have thus lowest priority.
# The directory /etc/NetworkManager/conf.d/ can contain additional configuration
# snippets. Those snippets are merged last and overwrite the settings from this main
# file.
#
# The files within one conf.d/ directory are read in asciibetical order.
#
# If /etc/NetworkManager/conf.d/ contains a file with the same name as
# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored.
# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can
# put an empty file to /etc with the same name. The same applies with respect
# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow
# /usr/lib and are themselves shadowed by files under /etc.
#
# If two files define the same key, the one that is read afterwards will overwrite
# the previous one.
[main]
#plugins=ifcfg-rh,ibft
dhcp=dhclient
[logging]
# When debugging NetworkManager, enabling debug logging is of great help.
#
# Logfiles contain no passwords and little sensitive information. But please
# check before posting the file online. You can also personally hand over the
# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode.
# Please post full logfiles except minimal modifications of private data.
#
# You can also change the log-level at runtime via
# $ nmcli general logging level TRACE domains ALL
# However, usually it's cleaner to enable debug logging
# in the configuration and restart NetworkManager so that
# debug logging is enabled from the start.
#
# You will find the logfiles in syslog, for example via
# $ journalctl -u NetworkManager
#
# Note that debug logging of NetworkManager can be quite verbose. Some messages
# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst
# in man journald.conf).
#
#level=TRACE
#domains=ALL
"""
NETWORKMANAGER_CONF_NOTMATCH = """
# Configuration file for NetworkManager.
#
# See "man 5 NetworkManager.conf" for details.
#
# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/
# can contain additional configuration snippets installed by packages. These files are
# read before NetworkManager.conf and have thus lowest priority.
# The directory /etc/NetworkManager/conf.d/ can contain additional configuration
# snippets. Those snippets are merged last and overwrite the settings from this main
# file.
#
# The files within one conf.d/ directory are read in asciibetical order.
#
# If /etc/NetworkManager/conf.d/ contains a file with the same name as
# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored.
# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can
# put an empty file to /etc with the same name. The same applies with respect
# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow
# /usr/lib and are themselves shadowed by files under /etc.
#
# If two files define the same key, the one that is read afterwards will overwrite
# the previous one.
[logging]
# When debugging NetworkManager, enabling debug logging is of great help.
#
# Logfiles contain no passwords and little sensitive information. But please
# check before posting the file online. You can also personally hand over the
# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode.
# Please post full logfiles except minimal modifications of private data.
#
# You can also change the log-level at runtime via
# $ nmcli general logging level TRACE domains ALL
# However, usually it's cleaner to enable debug logging
# in the configuration and restart NetworkManager so that
# debug logging is enabled from the start.
#
# You will find the logfiles in syslog, for example via
# $ journalctl -u NetworkManager
#
# Note that debug logging of NetworkManager can be quite verbose. Some messages
# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst
# in man journald.conf).
#
#level=TRACE
domains=ALL
"""
def test_networkmanager_config_match():
result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF))
assert result.get('main', 'dhcp') == 'dhclient'
def test_networkmanager_config_notmatch():
result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF_NOTMATCH))
assert result.has_option('main', 'dhcp') is False
def test_networkmanager_config_doc_examples():
env = {
'networkmanager_config_obj': NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF)),
}
failed, total = doctest.testmod(networkmanager_config, globs=env)
assert failed == 0
|
{
"content_hash": "6e5e63f35fe3a92d84c67f218f12a9d9",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 93,
"avg_line_length": 40.912,
"alnum_prop": 0.7731716855690262,
"repo_name": "RedHatInsights/insights-core",
"id": "042599855f55ef6e094a62a861bfffdb02b6de7a",
"size": "5114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/tests/parsers/test_networkmanager_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
}
|
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteSheetFormatPr(unittest.TestCase):
"""
Test the Worksheet _write_sheet_format_pr() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_format_pr(self):
"""Test the _write_sheet_format_pr() method"""
self.worksheet._write_sheet_format_pr()
exp = """<sheetFormatPr defaultRowHeight="15"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
{
"content_hash": "b007e57227a60bb9a754bb0fb65774f2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 24.44,
"alnum_prop": 0.6350245499181669,
"repo_name": "jmcnamara/XlsxWriter",
"id": "b2f7250e61c3705ac7fa79f8743716ea1318b14b",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/worksheet/test_write_sheet_format_pr.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
import bz2
import time
import sys
import numpy
import numpy.matlib
from splicesites.utils import create_dataset
from esvm.utils import calcroc
from esvm.experiment import crossvalidation
from esvm.mldata import init_datasetfile
def test_gc(gcfilename):
"""
Check the gc content files for conflicting labels
"""
fp = init_datasetfile(gcfilename,'vec')
(examples,labels) = fp.readlines()
print '%d positive and %d negative examples' % (sum(labels>0.0),sum(labels<0.0))
distance = sqr_dist(numpy.matrix(examples),numpy.matrix(examples))
labdist = numpy.matrix(labels).T*numpy.matrix(labels)
#difflab = numpy.where(labdist.A<0,distance,numpy.matlib.ones((len(labels),len(labels))))
contracount = 0
for ix in xrange(len(labels)):
for iy in xrange(ix+1,len(labels)):
if labdist[ix,iy]<0 and distance[ix,iy]<0.01:
contracount += 1
print distance.shape, labdist.shape
#print '%d identical examples with opposing labels' %len(numpy.unique(numpy.where(difflab==0)[0]))
print '%d identical examples with opposing labels' % contracount
def sqr_dist(a,b):
"""Compute the square distance between vectors"""
dot_a = numpy.sum(numpy.multiply(a,a),axis=0).T
dot_b = numpy.sum(numpy.multiply(b,b),axis=0).T
unitvec = numpy.matlib.ones(dot_a.shape)
D = 2.0*a.T*b
for ix,bval in enumerate(dot_b):
D[:,ix] = dot_a - D[:,ix] + numpy.kron(bval,unitvec)
return D
if __name__ == '__main__':
test_gc('C_elegans_don_freq.csv')
test_gc('C_elegans_acc_freq.csv')
|
{
"content_hash": "9281081b703254816cbd8681f1da7f00",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 102,
"avg_line_length": 33.0625,
"alnum_prop": 0.6679269061121613,
"repo_name": "ratschlab/oqtans_tools",
"id": "a519891dac9397c59b39959c2752937412d4e65b",
"size": "1610",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "EasySVM/0.3.3/build/lib.linux-x86_64-2.7/splicesites/test_gc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3581137"
},
{
"name": "C++",
"bytes": "5058934"
},
{
"name": "CSS",
"bytes": "10859"
},
{
"name": "Groff",
"bytes": "373250"
},
{
"name": "HTML",
"bytes": "250786"
},
{
"name": "Java",
"bytes": "383194"
},
{
"name": "M",
"bytes": "12429"
},
{
"name": "Makefile",
"bytes": "231176"
},
{
"name": "Matlab",
"bytes": "936612"
},
{
"name": "Objective-C",
"bytes": "8277"
},
{
"name": "Perl",
"bytes": "1834796"
},
{
"name": "Python",
"bytes": "1394743"
},
{
"name": "R",
"bytes": "101693"
},
{
"name": "Shell",
"bytes": "1372819"
},
{
"name": "TeX",
"bytes": "35508"
}
],
"symlink_target": ""
}
|
from ansible.module_utils.basic import *
import sys, os, hashlib, json, yaml
# Calculate md5 hash and return
def md5Checksum(filePath):
with open(filePath, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
# Calculate sha256 hash and return
def sha256Checksum(filePath):
with open(filePath, 'rb') as fh:
contents = fh.read()
m = hashlib.sha256(contents)
return m.hexdigest()
# Calculate sha512 hash and return
def sha512Checksum(filePath):
with open(filePath, 'rb') as fh:
contents = fh.read()
m = hashlib.sha512(contents)
return m.hexdigest()
def checkPathExists(filePath):
if os.path.exists(filePath):
return True
else:
return False
def generateChecksumData(checksumType, source):
if checksumType == 'all':
return { 'md5': md5Checksum(source), 'sha256': sha256Checksum(source), 'sha512': sha512Checksum(source) }
elif checksumType == 'md5':
return { 'md5': md5Checksum(source) }
elif checksumType == 'sha256':
return { 'sha256': sha256Checksum(source) }
else:
return { 'sha512': sha512Checksum(source) }
def toJSONFile(outData, outFile):
with open(outFile, 'w') as out:
json.dump(outData, out, indent=4)
def toYAMLFile(outData, outFile):
with open(outFile, 'w') as out:
out.write('---\n')
yaml.safe_dump(outData, out, default_flow_style=False)
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True, type='str'),
dest_path = dict(required=True, type='str'),
dest_file = dict(required=True, type='str'),
outfile_type = dict(required=False, choices=['json', 'yaml'], default='json'),
checksum_type = dict(required=False, choices=['all', 'md5', 'sha256', 'sha512'], default='all'),
)
)
# Some file/path checks on what is passed to the module
if checkPathExists(module.params['src']) == False:
message = 'Source file {0} does not exist at the supplied location' % [module.params['src']]
module.exit_json(changed = False, msg = message)
elif checkPathExists(module.params['dest_path']) == False:
message = 'Destination path {0} does not exist at the supplied location' % [module.params['dest_path']]
module.exit_json(changed = False, msg = message)
else:
# Checking what checksum tupe has been chosen and generating data var based on checksum type
data = generateChecksumData(module.params['checksum_type'], module.params['src'])
# Generating the outfile name with full path
filename = module.params['dest_path'] + '/' + module.params['dest_file']
if module.params['outfile_type'] == 'yaml':
toYAMLFile(data, filename)
else:
toJSONFile(data, filename)
message = 'Generated checksum file: {0}'.format(filename)
module.exit_json(changed = True, msg = message)
if __name__ == '__main__':
main()
|
{
"content_hash": "e77c62e5308d6daaeb64ea87d3cfd155",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 113,
"avg_line_length": 36.310344827586206,
"alnum_prop": 0.6191832858499525,
"repo_name": "shinesolutions/aem-aws-stack-builder",
"id": "90370d27f9944c65f739261729843cd7dbd89199",
"size": "3179",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "provisioners/ansible/library/generate_checksum_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "35951"
},
{
"name": "Makefile",
"bytes": "14926"
},
{
"name": "Python",
"bytes": "76637"
},
{
"name": "Shell",
"bytes": "106645"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('_1327.information_pages.views',
url(r"edit/(?P<title>[\w-]+)/$", 'edit', name='edit'),
url(r"(?P<title>[\w-]+)/$", 'view_information', name='view_information'),
)
|
{
"content_hash": "c87077a689ff69019cec8bde3ab50814",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 32,
"alnum_prop": 0.6805555555555556,
"repo_name": "janno42/1327",
"id": "7c88e08f159a9fb5c3676d963099979904682e8e",
"size": "288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_1327/information_pages/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30602"
},
{
"name": "JavaScript",
"bytes": "53800"
},
{
"name": "Python",
"bytes": "21104"
}
],
"symlink_target": ""
}
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase, StanzaBase, ET
from sleekxmpp.xmlstream import register_stanza_plugin
class StreamFeatures(StanzaBase):
"""
"""
name = 'features'
namespace = 'http://etherx.jabber.org/streams'
interfaces = set(('features', 'required', 'optional'))
sub_interfaces = interfaces
plugin_tag_map = {}
plugin_attrib_map = {}
def setup(self, xml):
StanzaBase.setup(self, xml)
self.values = self.values
def get_features(self):
"""
"""
return self.plugins
def set_features(self, value):
"""
"""
pass
def del_features(self):
"""
"""
pass
def get_required(self):
"""
"""
features = self['features']
return [f for n, f in features.items() if f['required']]
def get_optional(self):
"""
"""
features = self['features']
return [f for n, f in features.items() if not f['required']]
|
{
"content_hash": "04d5c514c86247439916b91a6e0b14c7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 68,
"avg_line_length": 22.01851851851852,
"alnum_prop": 0.5693860386879731,
"repo_name": "Petraea/jsonbot",
"id": "b800011f80d7e1e642c7006fa87553be6f407fa8",
"size": "1189",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsb/contrib/sleekxmpp/stanza/stream_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36140"
},
{
"name": "JavaScript",
"bytes": "42430"
},
{
"name": "Python",
"bytes": "3234788"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to configure the $TARCOM construction variable.
"""
import os
import string
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mytar.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
infile = open(sys.argv[2], 'rb')
for l in filter(lambda l: l != '/*tar*/\n', infile.readlines()):
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['tar'],
TARCOM = r'%(_python_)s mytar.py $TARGET $SOURCE')
env.Tar('test1.tar', 'test1.in')
""" % locals())
test.write('test1.in', """\
test1.in
/*tar*/
""")
test.run()
test.must_match('test1.tar', "test1.in\n")
test.pass_test()
|
{
"content_hash": "331ca2e9cd2945d5160f5a7e89018d45",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 27.558823529411764,
"alnum_prop": 0.7075773745997865,
"repo_name": "datalogics/scons",
"id": "a39dd22f091a1f57ec9039cc5bb083091f3b3c14",
"size": "1874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/TAR/TARCOM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4756209"
},
{
"name": "Shell",
"bytes": "13866"
}
],
"symlink_target": ""
}
|
import logging
from gensim.corpora import TextCorpus, Dictionary
from gensim.models.doc2vec import LabeledSentence
class SeriesCorpus(TextCorpus):
def __init__(self, series, vocab=None, stem=False, bigram=None,
labels=True):
""" Create a corpus that returns one row at a time out
of a Pandas Series"""
self.series = series
self.metadata = False
if vocab is not None:
vocab = set(vocab)
self.vocab = vocab
self.labels = labels
self.kwargs = dict(stem=stem, bigram=bigram)
logging.info("Building SeriesCorpus")
self.dictionary = Dictionary()
self.dictionary.add_documents(self.get_texts())
def __iter__(self):
if self.labels:
for index, line in zip(self.series.index, self.series.values):
label = ['SENT_%s' % str(index)]
ls = LabeledSentence(line.split(' '), label)
yield ls
else:
for index, line in self.series.index, self.series.values:
yield line.split(' ')
def line_iter(self, line):
if self.vocab is not None:
for word in line.split(' '):
if word in self.vocab:
yield word
else:
for word in line.split(' '):
yield word
def get_texts(self):
logging.info("Iterating SeriesCorpus")
for lineno, line in enumerate(self.series.values):
if self.metadata:
yield self.line_iter(line), (lineno,)
else:
yield self.line_iter(line)
|
{
"content_hash": "d994bb905e55f44fc7934bfc88284db3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 34.744680851063826,
"alnum_prop": 0.5590936925903246,
"repo_name": "cemoody/Document2Vec",
"id": "43536e31000142b3272ced2c81892dd8936b8605",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "document2vec/corpora.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12654"
}
],
"symlink_target": ""
}
|
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functionaltests.api.v1.behaviors import base_behaviors
from functionaltests.api.v1.models import container_models
class ContainerBehaviors(base_behaviors.BaseBehaviors):
def create_container(self, model, extra_headers=None):
"""Create a container from the data in the model.
:param model: The metadata used to create the container
:param extra_headers: Headers used to create the container
:return: A tuple containing the response from the create
and the href to the newly created container
"""
resp = self.client.post('containers', request_model=model,
extra_headers=extra_headers)
returned_data = self.get_json(resp)
container_ref = returned_data.get('container_ref')
if container_ref:
self.created_entities.append(container_ref)
return resp, container_ref
def get_container(self, container_ref, extra_headers=None):
"""Handles getting a single container
:param container_ref: Reference to the container to be retrieved
:param extra_headers: Headers used to get the container
:return: The response of the GET.
"""
resp = self.client.get(
container_ref, response_model_type=container_models.ContainerModel)
return resp
def get_containers(self, limit=10, offset=0, extra_headers=None):
"""Handles getting a list of containers.
:param limit: limits number of returned containers
:param offset: represents how many records to skip before retrieving
the list
:param extra_headers: Extra headers used to retrieve a list of
containers
:return: Returns the response, a list of container models, and
references to the next and previous list of containers.
"""
params = {'limit': limit, 'offset': offset}
resp = self.client.get('containers', params=params)
container_list = self.get_json(resp)
containers, next_ref, prev_ref = self.client.get_list_of_models(
container_list, container_models.ContainerModel)
return resp, containers, next_ref, prev_ref
def delete_container(self, container_ref, extra_headers=None,
expected_fail=False):
"""Handles deleting a containers.
:param container_ref: Reference of the container to be deleted
:param extra_headers: Any additional headers needed.
:param expected_fail: If there is a negative test, this should be
marked true if you are trying to delete a container that does
not exist.
:return: Response of the delete.
"""
resp = self.client.delete(container_ref, extra_headers)
if not expected_fail:
self.created_entities.remove(container_ref)
return resp
def delete_all_created_containers(self):
"""Delete all of the containers that we have created."""
containers_to_delete = [container for container
in self.created_entities]
for container_ref in containers_to_delete:
self.delete_container(container_ref)
|
{
"content_hash": "8c7eec31d88f8a77e17ed7e5f302a848",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 37.46534653465346,
"alnum_prop": 0.6683403805496829,
"repo_name": "jmvrbanac/barbican",
"id": "ade82fe09141497d462f95ebb9e39da8bfa1ae7a",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functionaltests/api/v1/behaviors/container_behaviors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1217522"
},
{
"name": "Shell",
"bytes": "19818"
}
],
"symlink_target": ""
}
|
"""Google Cloud Platform library - BigQuery UDF Functionality."""
from __future__ import absolute_import
from __future__ import unicode_literals
from past.builtins import basestring
from builtins import object
class UDF(object):
"""Represents a BigQuery UDF declaration.
"""
@property
def name(self):
return self._name
@property
def imports(self):
return self._imports
@property
def code(self):
return self._code
def __init__(self, name, code, return_type, params=None, language='js', imports=None):
"""Initializes a UDF object from its pieces.
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: list of parameter tuples: (name, type)
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
"""
if not isinstance(return_type, basestring):
raise TypeError('Argument return_type should be a string. Instead got: ', type(return_type))
if params and not isinstance(params, list):
raise TypeError('Argument params should be a list of parameter names and types')
if imports and not isinstance(imports, list):
raise TypeError('Argument imports should be a list of GCS string paths')
if imports and language != 'js':
raise Exception('Imports are available for Javascript UDFs only')
self._name = name
self._code = code
self._return_type = return_type
self._params = params or []
self._language = language
self._imports = imports or []
self._sql = None
def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params,
self._language, self._imports)
return self._sql
def _repr_sql_(self):
return self._expanded_sql()
def __repr__(self):
return 'BigQuery UDF - code:\n%s' % self._code
@staticmethod
def _build_udf(name, code, return_type, params, language, imports):
"""Creates the UDF part of a BigQuery query using its pieces
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: dictionary of parameter names and types
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
"""
params = ','.join(['%s %s' % named_param for named_param in params])
imports = ','.join(['library="%s"' % i for i in imports])
if language.lower() == 'sql':
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + \
'RETURNS {return_type}\n' + \
'AS (\n' + \
'{code}\n' + \
');'
else:
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' +\
'RETURNS {return_type}\n' + \
'LANGUAGE {language}\n' + \
'AS """\n' +\
'{code}\n' +\
'"""\n' +\
'OPTIONS (\n' +\
'{imports}\n' +\
');'
return udf.format(name=name, params=params, return_type=return_type,
language=language, code=code, imports=imports)
|
{
"content_hash": "ddbc8ed9e9553e521ca3e9a2c98b06bd",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 98,
"avg_line_length": 34.63461538461539,
"alnum_prop": 0.6204886174347585,
"repo_name": "googledatalab/pydatalab",
"id": "3e4acf54e766c089ec58e2a9549de39a5eb59799",
"size": "4191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google/datalab/bigquery/_udf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7596"
},
{
"name": "Python",
"bytes": "2424850"
},
{
"name": "Shell",
"bytes": "4312"
},
{
"name": "TypeScript",
"bytes": "105381"
}
],
"symlink_target": ""
}
|
"""Test utils for tensorflow."""
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.config import flags
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import _test_metrics_util
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import traceback_utils
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_asan_enabled():
"""Check if ASAN is enabled."""
return pywrap_sanitizers.is_asan_enabled()
def is_msan_enabled():
"""Check if MSAN is enabled."""
return pywrap_sanitizers.is_msan_enabled()
def is_tsan_enabled():
"""Check if TSAN is enabled."""
return pywrap_sanitizers.is_tsan_enabled()
def is_ubsan_enabled():
"""Check if UBSAN is enabled."""
return pywrap_sanitizers.is_ubsan_enabled()
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or a empty string.
This method should only be used in tests written with `tf.test.TestCase`.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device(tf.test.gpu_device_name()):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
"""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return (_pywrap_util_port.IsMklEnabled() or
os.getenv("TF_ENABLE_ONEDNN_OPTS", "False").lower() in ["true", "1"])
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# Make sure any registered functions are cleaned up in the C++ runtime.
registered_function_names = context.context().list_function_names()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# There should be no newly registered functions hanging around.
leftover_functions = (
context.context().list_function_names() - registered_function_names)
assert not leftover_functions, (
"The following functions were newly created: %s" %
leftover_functions)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
for i, obj in enumerate(gc.garbage[previous_garbage:]):
# Known false positive for ast.fix_missing_locations.
if getattr(obj, "__module__", "") == "ast":
new_garbage -= 3
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def enable_eager_op_as_function(fn):
"""Decorator for enabling eager_op_as_function on a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will enable run_eager_op_as_function,
reset the context, execute the test, then reset the context to the state
it was in prior to this test.
Example:
class MyTest(test.TestCase):
@enable_eager_op_as_function
def testFoo(self):
...
Args:
fn: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
# If `run_eager_op_as_function` is already enabled do nothing.
if context.run_eager_op_as_function_enabled():
return fn(*args, **kwargs)
context.enable_run_eager_op_as_function()
try:
return fn(*args, **kwargs)
finally:
context.disable_run_eager_op_as_function()
return wrapper
@tf_export("test.with_eager_op_as_function")
def with_eager_op_as_function(cls=None, only_as_function=False):
"""Adds methods that call original methods with eager_op_as_function enabled.
Example:
@test_util.with_eager_op_as_function
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
@disable_eager_op_as_function("b/xyzabc")
def testDisabledForEagerOpAsFunction(self):
...
Generated class:
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
def testEnabledForEagerOpAsFunctionWithEagerOpAsFunctionEnabled(self):
// Enable run_eager_op_as_function
// Reset context
testEnabledForEagerOpAsFunction(self)
// Disable run_eager_op_as_function
// Reset context
def testDisabledForEagerOpAsFunction(self):
...
Args:
cls: class to decorate.
only_as_function: whether to run all the tests in the TestCase in eager mode
and in eager_op_as_function mode. By default it will run all tests in both
modes. When `only_as_function=True` tests will not be run in eager mode.
Returns:
cls with new test methods added.
"""
def decorator(cls):
if context.run_eager_op_as_function_enabled():
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
(name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("benchmark")) and
not getattr(value, "_disable_eager_op_as_function", False)):
setattr(cls, name + "WithEagerOpAsFunctionEnabled",
enable_eager_op_as_function(value))
if only_as_function:
delattr(cls, name)
return cls
if cls is not None:
return decorator(cls)
return decorator
def enable_graph_building_optimization(fn):
"""Decorator for enabling graph_building_optimization on a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will enable graph_building_optimization,
execute the test, then reset the feature flag to its default value.
Example:
class MyTest(test.TestCase):
@enable_graph_building_optimization
def testFoo(self):
...
Args:
fn: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
# If `graph_building_optimization` is already enabled do nothing.
if flags.config().graph_building_optimization.value():
return fn(*args, **kwargs)
flags.config().graph_building_optimization.reset(True)
try:
return fn(*args, **kwargs)
finally:
flags.config().graph_building_optimization.reset(False)
return wrapper
def add_graph_building_optimization_tests(cls=None):
"""Adds methods with graph_building_optimization enabled to the test suite.
Example:
@test_util.add_graph_building_optimization_tests
class FooTest(test.TestCase):
def testBar(self):
...
Generated class:
class FooTest(test.TestCase):
def testBar(self):
...
def testBarWithGraphBuildingOptimization(self):
// Enable graph_building_optimization
testBar(self)
// Disable graph_building_optimization
Args:
cls: class to decorate.
Returns:
cls with new test methods added.
"""
def decorator(cls):
if flags.config().graph_building_optimization.value():
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
(name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("benchmark"))):
setattr(cls, name + "WithGraphBuildingOptimization",
enable_graph_building_optimization(value))
return cls
if cls is not None:
return decorator(cls)
return decorator
def disable_eager_op_as_function(unused_msg):
"""Decorator for a function in a with_eager_op_as_function enabled test class.
Blocks the function from being run with eager_op_as_function enabled.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_eager_op_as_function attr set to True.
"""
def wrapper(func):
func._disable_eager_op_as_function = True
return func
# Once the environment flag is flipped and `run_eager_op_as_function_enabled`
# is True by default, the `with_eager_op_as_function` wrapper will not add a
# separate test for eager_op_as_function execution. In that case the test with
# the original name needs to be disabled.
if context.run_eager_op_as_function_enabled():
return _disable_test(execute_func=False)
return wrapper
def set_xla_env_flag(func=None, flag=""):
"""Decorator for setting XLA_FLAGS prior to running a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will allow users to set any xla flags
exposed via the XLA_FLAGS environment variable, execute the test, then reset
the XLA_FLAGS to the state it was in prior to this test.
Example:
class MyTest(test.TestCase):
@set_xla_env_flag(flag='--xla_gpu_enable_fast_min_max=false')
def testFoo(self):
...
Args:
func: The function to be wrapped.
flag: The xla flag to be set in the XLA_FLAGS env variable.
Returns:
The wrapped function.
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = flag
if original_xla_flags:
new_xla_flags = new_xla_flags + " " + original_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
try:
return f(*args, **kwargs)
finally:
if original_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return decorated
if func is not None:
return decorator(func)
return decorator
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
raise ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_or_tpu(func=None):
"""Execute the decorated test only if a physical GPU or TPU is available.
This function is intended to be applied to tests that require the presence
of a physical GPU or TPU. It complies with the following rules:
- If a GPU is available, the test will run on the GPU.
- If a GPU is absent and a TPU is available, the test will run on the TPU.
- If both GPU and TPU are absent, the test will be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_or_tpu` only supports test methods.")
def decorated(self, *args, **kwargs):
if config.list_physical_devices("GPU"):
return f(self, "GPU", *args, **kwargs)
if config.list_physical_devices("TPU"):
return f(self, "TPU", *args, **kwargs)
self.skipTest("Test requires GPU or TPU")
return decorated
return decorator if func is None else decorator(func)
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
@contextlib.contextmanager
def deterministic_ops():
"""Enables deterministic ops."""
try:
config.enable_op_determinism()
yield
finally:
config.disable_op_determinism()
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return tf_decorator.make_decorator(func, decorated)
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_asan(description): # pylint: disable=unused-argument
"""Execute the test method only if ASAN is not enabled."""
execute_func = not is_asan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_msan(description): # pylint: disable=unused-argument
"""Execute the test method only if MSAN is not enabled."""
execute_func = not is_msan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tsan(description): # pylint: disable=unused-argument
"""Execute the test method only if TSAN is not enabled."""
execute_func = not is_tsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_ubsan(description): # pylint: disable=unused-argument
"""Execute the test method only if UBSAN is not enabled."""
execute_func = not is_ubsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
# Make sure we get unfiltered stack traces during the test
traceback_utils.disable_traceback_filtering()
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file, tmp_file_path = tempfile.mkstemp(dir=self.get_temp_dir())
orig_fd = os.dup(fd)
os.dup2(tmp_file, fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
os.close(tmp_file)
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, indexed_slices.IndexedSlices):
return indexed_slices.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session():
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session() as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tf_type(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def evaluate_if_both_tensors(self, a, b):
if (tensor_util.is_tf_type(a) and tensor_util.is_tf_type(b) and
not isinstance(a, ops._EagerTensorBase) and
not isinstance(b, ops._EagerTensorBase)):
return self.evaluate((a, b))
else:
return (a, b)
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
(a, b) = self.evaluate_if_both_tensors(a, b)
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError, NotImplementedError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertRaisesIncompatibleShapesError(
self, exception_type=errors.InvalidArgumentError):
return self.assertRaisesWithPredicateMatch(
exception_type, r"Incompatible shapes|Dimensions must be equal|"
r"required broadcastable shapes")
def assertShapeEqual(self, input_a, input_b, msg=None):
"""Asserts that two Numpy or TensorFlow objects have the same shape.
For Tensors, this compares statically known shapes at compile time, not
dynamic shapes at runtime.
Args:
input_a: A Numpy ndarray, Numpy scalar, or a Tensor.
input_b: A Numpy ndarray, Numpy scalar, or a Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(input_a, (np.ndarray, np.generic, ops.Tensor)):
raise TypeError(
"input_a must be a Numpy ndarray, Numpy scalar, or a Tensor."
f"Instead received {type(input_a)}")
if not isinstance(input_b, (np.ndarray, np.generic, ops.Tensor)):
raise TypeError(
"input_b must be a Numpy ndarray, Numpy scalar, or a Tensor."
f"Instead received {type(input_b)}")
shape_a = input_a.get_shape().as_list() if isinstance(
input_a, ops.Tensor) else input_a.shape
shape_b = input_b.get_shape().as_list() if isinstance(
input_b, ops.Tensor) else input_b.shape
self.assertAllEqual(shape_a, shape_b, msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
@py_func_if_in_function
def assertDictEqual(self, a, b, msg=None):
"""Assert that two given dictionary of tensors are the same.
Args:
a: Expected dictionary with numpy ndarray or anything else that can be
converted to one as values.
b: Actual dictionary with numpy ndarray or anything else that can be
converted to one as values.
msg: Optional message to report on failure.
"""
# To keep backwards compatibility, we first try the base class
# assertDictEqual. If that fails we try the tensorflow one.
try:
super().assertDictEqual(a, b, msg)
except Exception: # pylint: disable=broad-except
self.assertSameElements(a.keys(), b.keys()) # pylint: disable=g-assert-in-except
for k, v in a.items():
(a_k, b_k) = self.evaluate_if_both_tensors(v, b[k])
a_k = self._GetNdArray(a_k)
b_k = self._GetNdArray(b_k)
if np.issubdtype(a_k.dtype, np.floating):
self.assertAllClose(v, b[k], msg=k)
else:
self.assertAllEqual(v, b[k], msg=k)
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
class TestDelta(object):
"""A utility class to track increments to test counters."""
def __init__(self, name, label):
self.name = name
self.label = label
self.Reset()
def Reset(self):
self.last_value = _test_metrics_util.test_counter_value(
self.name, self.label)
def Get(self):
value = _test_metrics_util.test_counter_value(self.name, self.label)
return value - self.last_value
|
{
"content_hash": "75e86ecdebb34b0e4f38c36ab216175b",
"timestamp": "",
"source": "github",
"line_count": 3926,
"max_line_length": 145,
"avg_line_length": 34.90244523688232,
"alnum_prop": 0.6581111751698571,
"repo_name": "gautam1858/tensorflow",
"id": "653032057c4b0b8e6a8eb31e170960f3dc7a4180",
"size": "137748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "47492"
},
{
"name": "C",
"bytes": "1129549"
},
{
"name": "C#",
"bytes": "13496"
},
{
"name": "C++",
"bytes": "116904214"
},
{
"name": "CMake",
"bytes": "165809"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "341994"
},
{
"name": "Go",
"bytes": "2052513"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1053827"
},
{
"name": "JavaScript",
"bytes": "5772"
},
{
"name": "Jupyter Notebook",
"bytes": "787371"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "9549263"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "180638"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Pawn",
"bytes": "5336"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "43775271"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "7854"
},
{
"name": "Shell",
"bytes": "566970"
},
{
"name": "Smarty",
"bytes": "89664"
},
{
"name": "SourcePawn",
"bytes": "8509"
},
{
"name": "Starlark",
"bytes": "6897556"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""
CartoDB Spatial Analysis Python Library
See:
https://github.com/CartoDB/crankshaft
"""
from setuptools import setup, find_packages
setup(
name='crankshaft',
version='0.0.01',
description='CartoDB Spatial Analysis Python Library',
url='https://github.com/CartoDB/crankshaft',
author='Data Services Team - CartoDB',
author_email='dataservices@cartodb.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Mapping comunity',
'Topic :: Maps :: Mapping Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='maps mapping tools spatial analysis geostatistics',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'dev': ['unittest'],
'test': ['unittest', 'nose', 'mock'],
},
# The choice of component versions is dictated by what's
# provisioned in the production servers.
install_requires=['pysal==1.11.0','numpy==1.6.1','scipy==0.17.0'],
requires=['pysal', 'numpy'],
test_suite='test'
)
|
{
"content_hash": "4bde03a768b1d1938c0349524004498c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 24.319148936170212,
"alnum_prop": 0.6281714785651793,
"repo_name": "CartoDB/crankshaft",
"id": "f045b6233584d927b2545dcf5e272fe3a5da8c38",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "release/python/0.0.1/crankshaft/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "104176"
},
{
"name": "Makefile",
"bytes": "5965"
},
{
"name": "PLpgSQL",
"bytes": "2108153"
},
{
"name": "Python",
"bytes": "4215676"
},
{
"name": "Shell",
"bytes": "5663"
}
],
"symlink_target": ""
}
|
"""
Demo platform that offers a fake thermostat.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo thermostats."""
add_devices([
DemoThermostat("Nest", 21, TEMP_CELSIUS, False, 19, False),
DemoThermostat("Thermostat", 68, TEMP_FAHRENHEIT, True, 77, True),
])
# pylint: disable=too-many-arguments, abstract-method
class DemoThermostat(ThermostatDevice):
"""Representation of a demo thermostat."""
def __init__(self, name, target_temperature, unit_of_measurement,
away, current_temperature, is_fan_on):
"""Initialize the thermostat."""
self._name = name
self._target_temperature = target_temperature
self._unit_of_measurement = unit_of_measurement
self._away = away
self._current_temperature = current_temperature
self._is_fan_on = is_fan_on
@property
def should_poll(self):
"""No polling needed for a demo thermostat."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
@property
def is_fan_on(self):
"""Return true if the fan is on."""
return self._is_fan_on
def set_temperature(self, temperature):
"""Set new target temperature."""
self._target_temperature = temperature
def turn_away_mode_on(self):
"""Turn away mode on."""
self._away = True
def turn_away_mode_off(self):
"""Turn away mode off."""
self._away = False
def turn_fan_on(self):
"""Turn fan on."""
self._is_fan_on = True
def turn_fan_off(self):
"""Turn fan off."""
self._is_fan_on = False
|
{
"content_hash": "024388fa2c7b0d0281c9107147983e18",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 74,
"avg_line_length": 29.162790697674417,
"alnum_prop": 0.6271929824561403,
"repo_name": "leoc/home-assistant",
"id": "7718299ef6a35c8384ea8d7df3e7d12b28712638",
"size": "2508",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "homeassistant/components/thermostat/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import unittest
import synapse.daemon as s_daemon
import synapse.telepath as s_telepath
from synapse.tests.common import *
class Foo:
def bar(self):
return 'baz'
class CryptoTest(SynTest):
def test_crypto_rc4(self):
dmon = s_daemon.Daemon()
dmon.share('foo',Foo())
link = dmon.listen('tcp://127.0.0.1:0/foo?rc4key=asdfasdf')
prox = s_telepath.openlink(link)
self.assertEqual( prox.bar(), 'baz' )
prox.fini()
dmon.fini()
def test_crypto_zerosig(self):
dmon = s_daemon.Daemon()
dmon.share('foo',Foo())
link = dmon.listen('tcp://127.0.0.1:0/foo?zerosig=1')
prox = s_telepath.openlink(link)
self.assertEqual( prox.bar(), 'baz' )
prox.fini()
dmon.fini()
|
{
"content_hash": "e7c14f6e972ea25152623e5a6f191896",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 67,
"avg_line_length": 20.92105263157895,
"alnum_prop": 0.5874213836477987,
"repo_name": "imjonsnooow/synapse",
"id": "8c1eed2b59aaf8e0ec5f990c3d5fd7da3e706808",
"size": "795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/tests/test_crypto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "162309"
}
],
"symlink_target": ""
}
|
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at sales@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PySide import QtCore, QtGui
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
echoGroup = QtGui.QGroupBox("Echo")
echoLabel = QtGui.QLabel("Mode:")
echoComboBox = QtGui.QComboBox()
echoComboBox.addItem("Normal")
echoComboBox.addItem("Password")
echoComboBox.addItem("PasswordEchoOnEdit")
echoComboBox.addItem("No Echo")
self.echoLineEdit = QtGui.QLineEdit()
self.echoLineEdit.setFocus()
validatorGroup = QtGui.QGroupBox("Validator")
validatorLabel = QtGui.QLabel("Type:")
validatorComboBox = QtGui.QComboBox()
validatorComboBox.addItem("No validator")
validatorComboBox.addItem("Integer validator")
validatorComboBox.addItem("Double validator")
self.validatorLineEdit = QtGui.QLineEdit()
alignmentGroup = QtGui.QGroupBox("Alignment")
alignmentLabel = QtGui.QLabel("Type:")
alignmentComboBox = QtGui.QComboBox()
alignmentComboBox.addItem("Left")
alignmentComboBox.addItem("Centered")
alignmentComboBox.addItem("Right")
self.alignmentLineEdit = QtGui.QLineEdit()
inputMaskGroup = QtGui.QGroupBox("Input mask")
inputMaskLabel = QtGui.QLabel("Type:")
inputMaskComboBox = QtGui.QComboBox()
inputMaskComboBox.addItem("No mask")
inputMaskComboBox.addItem("Phone number")
inputMaskComboBox.addItem("ISO date")
inputMaskComboBox.addItem("License key")
self.inputMaskLineEdit = QtGui.QLineEdit()
accessGroup = QtGui.QGroupBox("Access")
accessLabel = QtGui.QLabel("Read-only:")
accessComboBox = QtGui.QComboBox()
accessComboBox.addItem("False")
accessComboBox.addItem("True")
self.accessLineEdit = QtGui.QLineEdit()
echoComboBox.activated[int].connect(self.echoChanged)
validatorComboBox.activated[int].connect(self.validatorChanged)
alignmentComboBox.activated[int].connect(self.alignmentChanged)
inputMaskComboBox.activated[int].connect(self.inputMaskChanged)
accessComboBox.activated[int].connect(self.accessChanged)
echoLayout = QtGui.QGridLayout()
echoLayout.addWidget(echoLabel, 0, 0)
echoLayout.addWidget(echoComboBox, 0, 1)
echoLayout.addWidget(self.echoLineEdit, 1, 0, 1, 2)
echoGroup.setLayout(echoLayout)
validatorLayout = QtGui.QGridLayout()
validatorLayout.addWidget(validatorLabel, 0, 0)
validatorLayout.addWidget(validatorComboBox, 0, 1)
validatorLayout.addWidget(self.validatorLineEdit, 1, 0, 1, 2)
validatorGroup.setLayout(validatorLayout)
alignmentLayout = QtGui.QGridLayout()
alignmentLayout.addWidget(alignmentLabel, 0, 0)
alignmentLayout.addWidget(alignmentComboBox, 0, 1)
alignmentLayout.addWidget(self.alignmentLineEdit, 1, 0, 1, 2)
alignmentGroup. setLayout(alignmentLayout)
inputMaskLayout = QtGui.QGridLayout()
inputMaskLayout.addWidget(inputMaskLabel, 0, 0)
inputMaskLayout.addWidget(inputMaskComboBox, 0, 1)
inputMaskLayout.addWidget(self.inputMaskLineEdit, 1, 0, 1, 2)
inputMaskGroup.setLayout(inputMaskLayout)
accessLayout = QtGui.QGridLayout()
accessLayout.addWidget(accessLabel, 0, 0)
accessLayout.addWidget(accessComboBox, 0, 1)
accessLayout.addWidget(self.accessLineEdit, 1, 0, 1, 2)
accessGroup.setLayout(accessLayout)
layout = QtGui.QGridLayout()
layout.addWidget(echoGroup, 0, 0)
layout.addWidget(validatorGroup, 1, 0)
layout.addWidget(alignmentGroup, 2, 0)
layout.addWidget(inputMaskGroup, 0, 1)
layout.addWidget(accessGroup, 1, 1)
self.setLayout(layout)
self.setWindowTitle("Line Edits")
def echoChanged(self, index):
if index == 0:
self.echoLineEdit.setEchoMode(QtGui.QLineEdit.Normal)
elif index == 1:
self.echoLineEdit.setEchoMode(QtGui.QLineEdit.Password)
elif index == 2:
self.echoLineEdit.setEchoMode(QtGui.QLineEdit.PasswordEchoOnEdit)
elif index == 3:
self.echoLineEdit.setEchoMode(QtGui.QLineEdit.NoEcho)
def validatorChanged(self, index):
if index == 0:
self.validatorLineEdit.setValidator(None)
elif index == 1:
self.validatorLineEdit.setValidator(QtGui.QIntValidator(self.validatorLineEdit))
elif index == 2:
self.validatorLineEdit.setValidator(QtGui.QDoubleValidator(-999.0, 999.0, 2, self.validatorLineEdit))
self.validatorLineEdit.clear()
def alignmentChanged(self, index):
if index == 0:
self.alignmentLineEdit.setAlignment(QtCore.Qt.AlignLeft)
elif index == 1:
self.alignmentLineEdit.setAlignment(QtCore.Qt.AlignCenter)
elif index == 2:
self.alignmentLineEdit.setAlignment(QtCore.Qt.AlignRight)
def inputMaskChanged(self, index):
if index == 0:
self.inputMaskLineEdit.setInputMask('')
elif index == 1:
self.inputMaskLineEdit.setInputMask('+99 99 99 99 99;_')
elif index == 2:
self.inputMaskLineEdit.setInputMask('0000-00-00')
self.inputMaskLineEdit.setText('00000000')
self.inputMaskLineEdit.setCursorPosition(0)
elif index == 3:
self.inputMaskLineEdit.setInputMask('>AAAAA-AAAAA-AAAAA-AAAAA-AAAAA;#')
def accessChanged(self, index):
if index == 0:
self.accessLineEdit.setReadOnly(False)
elif index == 1:
self.accessLineEdit.setReadOnly(True)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
{
"content_hash": "886450f9e18aab6f8084e13171a1ab50",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 113,
"avg_line_length": 38.72826086956522,
"alnum_prop": 0.6422958181307886,
"repo_name": "cherry-wb/SideTools",
"id": "ea42df36b9564d4addaf358c1aefd4545b9c796a",
"size": "7149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/widgets/lineedits.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9501"
},
{
"name": "Python",
"bytes": "4071976"
},
{
"name": "Shell",
"bytes": "182"
},
{
"name": "TypeScript",
"bytes": "25292"
}
],
"symlink_target": ""
}
|
from mxnet.test_utils import *
from mxnet.base import MXNetError
import pytest
from common import assertRaises
import random
import warnings
def is_scalar(var):
return False if hasattr(var, "__len__") else True
def get_result_type(call, dflt_stype):
"""Try to infer result storage type for a sparse matrix and a given unary operation"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = do_normalize(call(zero))
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_with_scalar(call, dflt_stype):
"""Try to infer result storage type when operating a sparse matrices and a scalar"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = call(zero, 5)
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_2(call, dflt_stype):
"""Try to infer result storage type when operating on two sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for outer in [zero, np.ones(zero.shape)]:
for inner in [zero, np.ones(zero.shape)]:
result = do_normalize(call(outer, inner))
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_3(call, dflt_stype):
"""Try to infer result storage type when operating on three sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for moon in [zero]:
for outer in [zero]:
for inner in [zero]:
res_1, res_2 = call(moon, outer, inner)
result = do_normalize(res_1)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
result = do_normalize(res_2)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_fw_bw_result_types(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_2(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type_2(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_with_scalar(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type_with_scalar(forward_numpy_call, fwd_res_dflt),
get_result_type_with_scalar(backward_numpy_call, bwd_res_dflt))
def gen_rsp_random_indices(shape, density=.5, force_indices=None):
assert density >= 0 and density <= 1
indices = set()
if force_indices is not None:
for val in force_indices:
indices.add(int(val))
if not np.isclose(density, .0, rtol=1.e-3, atol=1.e-3, equal_nan=True) and len(shape) > 0:
row_count = shape[0]
for i in range(row_count):
r = random.uniform(0, 1)
if r <= density and len(indices) < shape[0]:
indices.add(i)
assert len(indices) <= shape[0]
return list(indices)
def all_zero(var):
return 0
@pytest.mark.skip(reason="https://github.com/apache/incubator-mxnet/issues/18740")
def test_elemwise_binary_ops():
# skip testing on GPU because only CPU ops are implemented
if default_device().device_type is 'gpu':
return
def test_elemwise_binary_op(name, lhs_stype, rhs_stype, shape,
forward_mxnet_call, forward_numpy_call, backward_numpy_call,
lhs_grad_stype,
rhs_grad_stype,
expected_result_storage_type=None,
modifier_func=None,
lhs_density=.5,
rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0,
skip_gradient_check=False,
shuffle_csr_indices=True,
verbose=False):
if lhs_grad_stype is None:
lhs_grad_stype = lhs_stype
if rhs_grad_stype is None:
rhs_grad_stype = rhs_stype
lhs_grad_stype = get_result_type_3(backward_numpy_call, lhs_grad_stype)
rhs_grad_stype = get_result_type_3(backward_numpy_call, rhs_grad_stype)
if verbose is True:
print("testing: {} lhs={}, rhs={}, lhs_grad_stype={}, rhs_grad_stype={}"
.format(name, lhs_stype, rhs_stype, lhs_grad_stype, rhs_grad_stype))
# Output type should be same as lvalue type, unless otherwise specified
if expected_result_storage_type is None:
if lhs_stype == 'default' or rhs_stype == 'default':
expected_result_storage_type = 'default'
else:
expected_result_storage_type = lhs_stype
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
grad_stypes = dict()
grad_stypes['lhs'] = lhs_grad_stype
grad_stypes['rhs'] = rhs_grad_stype
if lhs_stype == 'default':
lhs_nd = rand_ndarray(shape, 'default')
if abs(lhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
lhs_nd = mx.nd.array(assign_each(lhs_nd.asnumpy(), func))
else:
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=lhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=lhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
if rhs_stype == 'default':
rhs_nd = rand_ndarray(shape, 'default')
if abs(rhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
rhs_nd = mx.nd.array(assign_each(rhs_nd.asnumpy(), func))
else:
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=rhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=rhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
if verbose is True:
print("lhs input: {}".format(lhs_np))
print("rhs input: {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("out_np: {}".format(out_np))
test = forward_mxnet_call(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
outputs = check_symbolic_forward(test, location, [out_np], equal_nan=True)
assert len(outputs) == 1
assert outputs[0].stype == expected_result_storage_type
if verbose is True:
print ("mx forward output: ", outputs[0].asnumpy())
print ("lhs_nd: ", lhs_nd.stype)
print ("rhs_nd: ", rhs_nd.stype)
print ("forward output: ", outputs[0].stype)
if outputs[0].stype != 'default':
out_grad = create_sparse_array_zd(
shape, outputs[0].stype, density=ograd_density,
data_init=1,
modifier_func=lambda x: 2,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_grad_overlap is True else None
))
else:
if abs(ograd_density) < 1e-4:
out_grad = mx.nd.array(np.zeros(shape))
else:
out_grad = mx.nd.array(np.ones(shape))
out_grad_np = out_grad.asnumpy()
if verbose is True:
print("out_grad_np", out_grad_np)
ingrad_lhs_np, ingrad_rhs_np = backward_numpy_call(out_grad_np, lhs_np, rhs_np)
if verbose is True:
print("out_grad", out_grad.asnumpy())
print("ingrad_lhs_np", ingrad_lhs_np)
print("ingrad_rhs_np", ingrad_rhs_np)
igrads_result = check_symbolic_backward(test, location, [out_grad],
[ingrad_lhs_np, ingrad_rhs_np],
grad_stypes=grad_stypes,
equal_nan=True)
if verbose is True:
print("ingrad_lhs", igrads_result['lhs'].asnumpy())
print("ingrad_rhs", igrads_result['rhs'].asnumpy())
assert len(igrads_result) == 2
if lhs_grad_stype is not None:
assert igrads_result['lhs'].stype == lhs_grad_stype
if rhs_grad_stype is not None:
assert igrads_result['rhs'].stype == rhs_grad_stype
if not skip_gradient_check:
check_numeric_gradient(test, location,
grad_stype_dict=grad_stypes)
def check_all(l, r, check_function):
assert l.shape == r.shape
return check_function(l, r)
def gt(l, r):
return check_all(l, r, lambda a, b: a > b)
def ge(l, r):
return check_all(l, r, lambda a, b: a >= b)
def lt(l, r):
return check_all(l, r, lambda a, b: a < b)
def le(l, r):
return check_all(l, r, lambda a, b: a <= b)
def elemwise_mul_stype(lstype, rstype):
if lstype == rstype:
return lstype
elif lstype == 'default' and rstype == 'row_sparse':
return 'row_sparse'
elif lstype == 'row_sparse' and rstype == 'default':
return 'row_sparse'
else:
return 'default'
def elemwise_mul_lhs_grad_stype(lstype, rstype):
return elemwise_mul_stype(elemwise_mul_stype(lstype, rstype), rstype)
def elemwise_mul_rhs_grad_stype(lstype, rstype):
return elemwise_mul_stype(elemwise_mul_stype(lstype, rstype), lstype)
def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape,
lhs_grad_stype=None, rhs_grad_stype=None,
lhs_density=.5, rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
if ((lhs_stype is 'default' and rhs_stype is 'row_sparse') or
(lhs_stype is 'row_sparse' and rhs_stype is 'row_sparse') and (rhs_density == 0.0)):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r, out=l),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r, out=l),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
if ((lhs_stype is 'row_sparse' and rhs_stype is 'row_sparse') and (lhs_density == 0.0)):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r, out=r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r, out=l),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density,
rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_mul", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_mul(l, r),
lambda l, r: l * r,
lambda outg, l, r: (outg * r, outg * l),
elemwise_mul_lhs_grad_stype(lhs_stype, rhs_stype),
elemwise_mul_rhs_grad_stype(lhs_stype, rhs_stype),
expected_result_storage_type=elemwise_mul_stype(lhs_stype, rhs_stype),
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_div", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_div(l, r),
lambda l, r: l / r,
lambda outg, l, r: (outg * (1/r), outg * (-l/(r*r))),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
expected_result_storage_type='default',
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("maximum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._maximum(l, r),
lambda l, r: np.maximum(l, r),
lambda outg, l, r: (outg * ge(l, r), outg * lt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
skip_gradient_check=True,
ograd_density=ograd_density,
verbose=False)
test_elemwise_binary_op("minimum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._minimum(l, r),
lambda l, r: np.minimum(l, r),
lambda outg, l, r: (outg * le(l, r), outg * gt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("hypot", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._hypot(l, r),
lambda l, r: np.hypot(l, r),
lambda outg, l, r: (
outg * assign_each2(
l, r, lambda a, b: a/np.sqrt(a * a + b * b)),
outg * assign_each2(
l, r, lambda a, b: b/np.sqrt(a * a + b * b))
),
lhs_grad_stype, rhs_grad_stype,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
# Run basic tests
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for _ in range(1):
# Run defaults
check_elemwise_binary_ops('default', 'default', rand_shape_2d(5, 5))
# Try different densities
shape = rand_shape_2d(5, 5)
for lhs_density in [0.0, random.uniform(0, 1), 1.0]:
for rhs_density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
print("lhs_density={}, rhs_density={}, ograd_density={}, shape: {}".format(
lhs_density, rhs_density, ograd_density, shape))
# Try row_sparse overlaps
for force_lr_overlap in [False, True]:
for force_grad_overlap in [False, True]:
print(" force_lr_overlap={}, force_grad_overlap={}, shape={}".
format(force_lr_overlap, force_grad_overlap, shape))
# Back to left-right overlap possiblities
check_elemwise_binary_ops('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse',
rhs_grad_stype='row_sparse',
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
def test_elemwise_csr_same_zeros():
# Zeroes
a = mx.nd.sparse.zeros('csr', (1,1))
b = mx.nd.elemwise_add(a,a)
res = a.asnumpy() + a.asnumpy()
assert_almost_equal(b.asnumpy(), res)
def as_dense(arr):
if arr.stype != 'default':
return mx.nd.cast_storage(arr, stype='default')
else:
return arr;
# Make sure that 0's look like 0's when we do a comparison
def do_normalize(arr):
ret = arr.copy()
idx = np.isclose(arr, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True)
ret[idx] = 0
return ret
def check_sparse_mathematical_core(name, stype,
forward_mxnet_call, forward_numpy_call, backward_numpy_call=None,
rhs_arg=None, data_init=9., grad_init=2., output_grad_stype=None,
input_grad_stype=None, force_overlap=False, density=.5,
ograd_density=.5, verbose=False, shuffle_csr_indices=True):
if verbose is True:
print("TESTING: " + name)
data = mx.symbol.Variable('data', stype=stype)
temp_input_grad_stype = input_grad_stype
if temp_input_grad_stype is None:
temp_input_grad_stype = stype
if rhs_arg is not None:
if is_scalar(rhs_arg):
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_with_scalar(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
if input_grad_stype is not None and input_grad_stype != expected_grad_result_type:
print("{}: explicit override of deduced input grad type '{}' with '{}'".format(
name, expected_grad_result_type, input_grad_stype))
expected_grad_result_type = input_grad_stype
shape = rand_shape_2d()
if verbose is True:
print("Shape: ", shape, "density: ", density, "force_overlap", force_overlap)
if stype == 'default':
data_tmp = np.zeros(shape)
if abs(density) >= 1e-4:
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
else:
arr_data = create_sparse_array_zd(
shape, stype, density=density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
data_tmp = arr_data.asnumpy()
if verbose is True:
print("arr_data indices", arr_data.indices.asnumpy())
if verbose is True:
print("input", data_tmp)
if backward_numpy_call is None:
arr_grad = None
elif expected_grad_result_type == 'default':
if abs(density) < 1e-4:
arr_grad = mx.nd.zeros(shape)
else:
arr_grad = mx.nd.ones(shape)
else:
arr_grad = create_sparse_array_zd(
shape,
expected_grad_result_type,
density=density,
data_init=1,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
if rhs_arg is not None:
test = forward_mxnet_call(data, rhs_arg)
else:
test = forward_mxnet_call(data)
args = list()
args.append(arr_data)
if arr_grad is not None:
exe_test = test._bind(default_device(), args=args, args_grad=[arr_grad])
else:
exe_test = test._bind(default_device(), args=args)
exe_test.forward(is_train=True)
assert exe_test.outputs[0].stype == expected_result_type
out = exe_test.outputs[0].asnumpy()
if rhs_arg is not None:
npout = forward_numpy_call(data_tmp, rhs_arg)
else:
npout = forward_numpy_call(data_tmp)
if verbose is True:
print("out", out)
print("npout", npout)
assert_almost_equal(out, npout, equal_nan=True)
if backward_numpy_call is not None:
if output_grad_stype == 'default' or output_grad_stype is None:
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
else:
out_grad = create_sparse_array_zd(
shape, output_grad_stype,
density=density,
data_init=grad_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_overlap is True else None))
npout_grad = out_grad.asnumpy()
if verbose is True:
print("npout_grad", npout_grad)
if rhs_arg is not None:
temp = backward_numpy_call(data_tmp, rhs_arg)
else:
temp = backward_numpy_call(data_tmp)
input_grad = npout_grad * temp
if verbose is True:
print(arr_grad.asnumpy())
exe_test.backward(out_grad)
if verbose is True:
print(arr_grad.asnumpy())
assert arr_grad.stype == expected_grad_result_type
if verbose is True:
print(name)
print("arr_grad", arr_grad.asnumpy())
print("input_grad", input_grad)
assert_almost_equal(arr_grad, input_grad, equal_nan=True)
@pytest.mark.serial
@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18829')
def test_sparse_mathematical_core():
def util_sign(a):
if np.isclose(a, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif np.isclose(a, 0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif a < 0.0:
return -1
else: # a > 0.0:
return 1
# Check scalar binary operators
def check_binary_op_with_scalar(stype,
output_grad_stype=None,
input_grad_stype=None,
density=.5, ograd_density=.5,
force_overlap=False,):
# mul_scalar
check_sparse_mathematical_core("mul_scalar", stype,
lambda x, y: x * y,
lambda x, y: x * y,
lambda input, rhs: rhs,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# plus_scalar
check_sparse_mathematical_core("plus_scalar", stype,
lambda x, y: x + y,
lambda x, y: x + y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# minus_scalar
check_sparse_mathematical_core("minus_scalar", stype,
lambda x, y: x - y,
lambda x, y: x - y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# Check many basic unary operators
def check_mathematical_core(stype, output_grad_stype=None,
input_grad_stype=None, force_overlap=False,
density=.5, ograd_density=.5):
# negative
check_sparse_mathematical_core("negative", stype,
lambda x: mx.sym.sparse.negative(x),
lambda x: np.negative(x),
force_overlap=force_overlap,
density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# square
check_sparse_mathematical_core("square", stype,
lambda x: mx.sym.sparse.square(x),
lambda x: np.square(x),
lambda x: 2 * x,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# sqrt
check_sparse_mathematical_core("sqrt", stype,
lambda x: mx.sym.sparse.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 1.0/(2.0 * np.sqrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# cbrt
check_sparse_mathematical_core("cbrt", stype,
lambda x: mx.sym.sparse.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1.0/(3.0 * np.cbrt(x) * np.cbrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# rint
check_sparse_mathematical_core("rint", stype,
lambda x: mx.sym.sparse.rint(x),
lambda x: np.rint(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# fix
check_sparse_mathematical_core("fix", stype,
lambda x: mx.sym.sparse.fix(x),
lambda x: np.fix(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# floor
check_sparse_mathematical_core("floor", stype, lambda x: mx.sym.sparse.floor(x),
lambda x: np.floor(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# ceil
check_sparse_mathematical_core("ceil", stype,
lambda x: mx.sym.sparse.ceil(x),
lambda x: np.ceil(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# round
check_sparse_mathematical_core("round", stype,
lambda x: mx.sym.sparse.round(x),
lambda x: np.round(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# trunc
check_sparse_mathematical_core("trunc", stype,
lambda x: mx.sym.sparse.trunc(x),
lambda x: np.trunc(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# sign
check_sparse_mathematical_core("sign", stype,
lambda x: mx.sym.sparse.sign(x),
lambda x: np.sign(x),
lambda x: np.zeros(x.shape),
output_grad_stype=output_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# log1p
check_sparse_mathematical_core("log1p", stype,
lambda x: mx.sym.sparse.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# expm1
check_sparse_mathematical_core("expm1", stype,
lambda x: mx.sym.sparse.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# sin
check_sparse_mathematical_core("sin", stype,
lambda x: mx.sym.sparse.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tan
check_sparse_mathematical_core("tan", stype,
lambda x: mx.sym.sparse.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density,
ograd_density=ograd_density)
# arcsin
check_sparse_mathematical_core("arcsin", stype,
lambda x: mx.sym.sparse.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arctan
check_sparse_mathematical_core("arctan", stype,
lambda x: mx.sym.sparse.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# degrees
check_sparse_mathematical_core("degrees", stype,
lambda x: mx.sym.sparse.degrees(x),
lambda x: np.degrees(x),
lambda x: assign_each(x, lambda a: 180./np.pi),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# radians
check_sparse_mathematical_core("radians", stype,
lambda x: mx.sym.sparse.radians(x),
lambda x: np.radians(x),
lambda x: assign_each(x, lambda a: np.pi / 180.),
data_init=0.6, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sinh
check_sparse_mathematical_core("sinh", stype,
lambda x: mx.sym.sparse.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tanh
check_sparse_mathematical_core("tanh", stype,
lambda x: mx.sym.sparse.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
data_init=0.5, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arcsinh
check_sparse_mathematical_core("arcsinh", stype,
lambda x: mx.sym.sparse.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctanh
check_sparse_mathematical_core("arctanh", stype,
lambda x: mx.sym.sparse.arctanh(x),
lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.),
data_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# abs
check_sparse_mathematical_core("abs", stype,
lambda x: mx.sym.sparse.abs(x),
lambda x: np.abs(x),
lambda x: assign_each(x, function=util_sign),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
if stype != "csr":
# rsqrt
check_sparse_mathematical_core("rsqrt", stype,
lambda x: mx.sym.sparse.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cos
check_sparse_mathematical_core("cos", stype,
lambda x: mx.sym.sparse.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccos
check_sparse_mathematical_core("arccos", stype,
lambda x: mx.sym.sparse.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# cosh
check_sparse_mathematical_core("cosh", stype,
lambda x: mx.sym.sparse.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
data_init=5, grad_init=5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccosh
check_sparse_mathematical_core("arccosh", stype,
lambda x: mx.sym.sparse.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log10
check_sparse_mathematical_core("log10", stype,
lambda x: mx.sym.sparse.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log2
check_sparse_mathematical_core("log2", stype,
lambda x: mx.sym.sparse.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
try:
from scipy import special as scipy_special
# On scipy v1.0, psi([0, -1, -2, -3, ...]) = [ inf, inf, inf, inf, ...]
# On scipy v1.1, psi([0, -1, -2, -3, ...]) = [-inf, nan, nan, nan, ...]
# Map the behavior of v1.1 psi() to that of v1.0 for ints <= 0 for consistency
scipy_psi = np.vectorize(lambda x: np.inf if float(x).is_integer() and x <= 0 else
scipy_special.psi(x))
# gamma
check_sparse_mathematical_core("gamma", stype,
lambda x: mx.sym.sparse.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# gammaln
check_sparse_mathematical_core("gammaln", stype,
lambda x: mx.sym.sparse.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
except ImportError:
print("Could not import scipy. Skipping unit tests for special functions")
for i in range(1):
print("pass", i)
for density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
for force_overlap in [False, True]:
print("{}, {}, {}".format(density, ograd_density, force_overlap))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check unary ops (unary fwd, binary bwd)
check_mathematical_core('default', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='row_sparse',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='csr',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# Check binary with scalar ops
check_binary_op_with_scalar('default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse', output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
output_grad_stype='row_sparse',
density=density, ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='csr',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
@pytest.mark.serial
def test_elemwise_add_ex():
def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_grad_stype=None):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
out_np = lhs_np + rhs_np
test = mx.symbol.sparse.elemwise_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_np])
check_numeric_gradient(test, location)
grad_stypes = {}
if lhs_grad_stype is not None and lhs_grad_stype != 'default':
grad_stypes['lhs'] = lhs_grad_stype
if rhs_grad_stype is not None and rhs_grad_stype != 'default':
grad_stypes['rhs'] = rhs_grad_stype
check_symbolic_backward(test, location, [out_np], [out_np, out_np],
grad_stypes=grad_stypes)
shapes = [rand_shape_2d(), rand_shape_3d()]
for shape in shapes:
check_elemwise_add_ex('default', 'default', shape)
check_elemwise_add_ex('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse')
@pytest.mark.serial
def test_cast_storage_ex():
def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad=True):
x = mx.symbol.Variable('x', stype=from_stype)
x_nd = rand_ndarray(shape, from_stype, density=density)
x_np = x_nd.asnumpy()
out_np = x_np
test = mx.symbol.cast_storage(x, stype=to_stype)
location = {'x': x_nd}
check_symbolic_forward(test, location, [out_np])
# consider disable the numeric grad check for gpu block kernel since the input is large
if check_numeric_grad:
check_numeric_gradient(test, location)
grad_stypes = {'x': to_stype}
check_symbolic_backward(test, location, [out_np], [out_np], grad_stypes=grad_stypes)
density = [1.00, 0.50, 0.01]
for d in density:
shape_2d = rand_shape_2d()
shape_3d = rand_shape_3d()
check_cast_storage(shape_2d, d, 'csr', 'default')
check_cast_storage(shape_2d, d, 'default', 'csr')
check_cast_storage(shape_2d, d, 'csr', 'csr')
check_cast_storage(shape_2d, d, 'row_sparse', 'default')
check_cast_storage(shape_2d, d, 'default', 'row_sparse')
check_cast_storage(shape_2d, d, 'row_sparse', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'default')
check_cast_storage(shape_3d, d, 'default', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'row_sparse')
for i in range(4, 6):
shape = rand_shape_nd(i, 5)
check_cast_storage(shape, d, 'default', 'row_sparse')
check_cast_storage(shape, d, 'row_sparse', 'default')
# Test specific gpu kernels
if default_device().device_type is 'gpu':
dim0 = rnd.randint(1, 10)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'csr')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'csr')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'csr',
check_numeric_grad=False)
# check race condition in block kernel
check_cast_storage((200, 128 * 2 + 1), d, 'default', 'csr',
check_numeric_grad=False)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'row_sparse')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'row_sparse')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'row_sparse',
check_numeric_grad=False)
@pytest.mark.serial
def test_sparse_dot():
def test_infer_forward_stype(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_a, trans_b):
all_stypes = ["default", "csr", "row_sparse"]
lhs_nd = rand_ndarray(lhs_shape, 'default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, 'default', density=rhs_density)
out_nd = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_a, transpose_b=trans_b)
out_np = out_nd.asnumpy()
for lhs_stype in all_stypes:
for rhs_stype in all_stypes:
for forward_stype in all_stypes:
lhs = lhs_nd.tostype(lhs_stype)
rhs = rhs_nd.tostype(rhs_stype)
out = mx.nd.dot(lhs, rhs, forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
assert_almost_equal(out.tostype('default').asnumpy(), out_np, rtol=1e-3, atol=1e-4)
lhs_var = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs_var = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs_var, rhs_var,
forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
location = {'lhs': lhs, 'rhs': rhs}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
def test_dot_csr(lhs_shape, rhs_shape, rhs_stype, trans_lhs, lhs_density, rhs_density):
lhs_nd = rand_ndarray(lhs_shape, 'csr', density=lhs_density, shuffle_csr_indices=False)
lhs_dns = lhs_nd.tostype('default')
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
out = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs)
out_dns = mx.nd.dot(lhs_dns, rhs_dns, transpose_a=trans_lhs)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='csr')
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_dns, out_dns, transpose_a=backward_trans).asnumpy()
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
def test_dot_dns_csr(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_lhs=False, trans_rhs=False):
lhs_nd = rand_ndarray(lhs_shape, stype='default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, stype='csr', density=rhs_density)
rhs_dns = rhs_nd.tostype('default')
if default_device() == mx.cpu():
forward_stype = 'csr'
else:
forward_stype = 'default'
out = mx.nd.sparse.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_dns = mx.nd.dot(lhs_nd, rhs_dns, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='default')
rhs = mx.symbol.Variable('rhs', stype='csr')
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
if default_device() == mx.cpu():
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_nd, out_dns, transpose_a=backward_trans).asnumpy()
if trans_rhs is True:
rhs_backward_grad = rhs_backward_grad.T
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
else:
transpose_b = not trans_rhs
lhs_backward_grad = mx.nd.dot(out_dns, rhs_dns, transpose_b=transpose_b)
expected = {'lhs': lhs_backward_grad.asnumpy()}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'write', 'rhs': 'null'},
rtol=1e-3, atol=1e-4)
def test_sparse_dot_zero_output(lhs_shape, trans_lhs, rhs_num_cols):
"""Test for nnr_out = 0. Before the fix, the test would fail."""
lhs = mx.nd.zeros(lhs_shape)
irow = np.random.randint(0, lhs_shape[0])
icol = np.random.randint(0, lhs_shape[1])
lhs[irow, icol] = 1.0
if trans_lhs:
rhs = rand_ndarray(shape=(lhs_shape[0], rhs_num_cols), stype='default')
rhs[irow, :] = 0
else:
rhs = rand_ndarray(shape=(lhs_shape[1], rhs_num_cols), stype='default')
rhs[icol, :] = 0
dns_out = mx.nd.dot(lhs, rhs, transpose_a=trans_lhs)
assert mx.nd.sum(mx.nd.abs(dns_out)).asscalar() == 0
sps_out = mx.nd.sparse.dot(lhs.tostype('csr'), rhs.tostype('row_sparse'), transpose_a=trans_lhs)
assert same(dns_out.asnumpy(), sps_out.asnumpy())
density = [1.00, 0.5, 0.01]
for lhs_d in density:
lhs_shape = rand_shape_2d(50, 200)
rhs_d = 1
test_dot_csr(lhs_shape, (lhs_shape[1], 1), 'default', False, lhs_d, rhs_d) # test gpu SpMV
test_dot_csr(lhs_shape, (lhs_shape[0], 1), 'default', True, lhs_d, rhs_d) # (vector kernel)
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(5, 10)), 'default', False, lhs_d, rhs_d) # test gpu SpMM
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(5, 10)), 'default', True, lhs_d, rhs_d) # (scalar kernel)
test_dot_dns_csr(lhs_shape, (lhs_shape[1], rnd.randint(50, 200)), lhs_d, lhs_d)
test_dot_dns_csr(lhs_shape, (rnd.randint(50, 200), lhs_shape[1]), lhs_d, lhs_d, trans_rhs=True)
for rhs_d in density:
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(1, 10)), 'row_sparse', False, lhs_d, rhs_d)
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(1, 10)), 'row_sparse', True, lhs_d, rhs_d)
test_infer_forward_stype(lhs_shape, (lhs_shape[1], rnd.randint(10, 20)),
lhs_d, rhs_d, False, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[1]),
lhs_d, rhs_d, False, True)
test_infer_forward_stype(lhs_shape, (lhs_shape[0], rnd.randint(10, 20)),
lhs_d, rhs_d, True, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[0]),
lhs_d, rhs_d, True, True)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), False, 40)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), True, 40)
@pytest.mark.serial
def test_sparse_dot_determinism():
def check_dot_determinism(lhs_stype, rhs_stype, lhs_density, rhs_density, transpose_a, transpose_b, forward_stype):
lhs_row = rnd.randint(50, 100)
lhs_col = rnd.randint(50, 100)
if transpose_a:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_row)
else:
rhs_shape = (lhs_row, rnd.randint(50, 100))
else:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_col)
else:
rhs_shape = (lhs_col, rnd.randint(50, 100))
lhs_shape = (lhs_row, lhs_col)
lhs = rand_ndarray(lhs_shape, lhs_stype, density=lhs_density)
rhs = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
res1 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
res2 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.0, atol=0.0)
check_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'row_sparse')
forward_stype = 'csr' if default_device() == mx.cpu() else 'default'
check_dot_determinism('default', 'csr', 1.0, 0.1, False, False, forward_stype)
check_dot_determinism('default', 'csr', 1.0, 0.1, False, True, forward_stype)
check_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'default')
def test_sparse_slice():
def check_csr_slice(shape, slice_input):
storage_type = 'csr'
B, _ = rand_sparse_ndarray(shape, storage_type)
np = B.asnumpy()
begin = rnd.randint(0, B.shape[0] - 1)
end = rnd.randint(begin + 1, B.shape[0])
nd_slice = mx.nd.crop(B, begin=begin, end=end)
assert same(nd_slice.asnumpy(), np[begin:end]), (nd_slice.asnumpy(), np[begin:end])
shape = (rnd.randint(7, 15), rnd.randint(1, 10))
check_csr_slice(shape, True)
check_csr_slice(shape, False)
@pytest.mark.serial
def test_sparse_retain():
def check_sparse_retain(shape, density, index_type=np.int64):
num_rows = shape[0]
rsp, _ = rand_sparse_ndarray(shape=shape, stype='row_sparse', density=density)
length = np.random.randint(1, num_rows + 1)
idx = random_sample(list(range(0, num_rows)), length)
idx.sort()
dns = rsp.asnumpy()
tensor_retained_expected = np.zeros(shape)
for i in idx:
tensor_retained_expected[i][:] = dns[i]
indices = mx.nd.array(idx, dtype=index_type)
rsp_retained = mx.nd.sparse.retain(rsp, indices=indices)
assert same(tensor_retained_expected, rsp_retained.asnumpy())
# check numeric gradient
data = mx.symbol.Variable('data')
idx = mx.symbol.Variable('indices')
sym = mx.sym.sparse.retain(data=data, indices=idx)
check_numeric_gradient(sym, [rsp, indices], grad_nodes=['data'],
grad_stype_dict={'data': 'row_sparse'})
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0.01, 0.5, 1.0]
index_types = [np.float32, np.int32, np.int64]
for density in densities:
for itype in index_types:
check_sparse_retain(shape, density, itype)
check_sparse_retain(shape_3d, density, itype)
def test_sparse_unary_with_numerics():
def check_sparse_simple(name, stype, mxnet_func, forward_numpy_call,
backward_numpy_call, output_grad_stype=None,
backward_is_use_output=False):
if output_grad_stype is None:
output_grad_stype = stype
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype, backward_numpy_call, output_grad_stype)
if backward_is_use_output is True:
expected_grad_result_type = expected_result_type
shape = (3, 4)
data = mx.symbol.Variable("data")
grad_stypes = {'data' : expected_grad_result_type}
y = mxnet_func(data)
if stype == 'default':
xa = np.random.uniform(low=-1.0, high=1.0, size=shape)
xa_np = xa
else:
xa = create_sparse_array(shape, stype, data_init=None, rsp_indices=[1],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
xa_np = xa.asnumpy()
if output_grad_stype != 'default':
out_grad = create_sparse_array(shape, output_grad_stype, data_init=None,
rsp_indices=[1, 2],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
out_grad_np = out_grad.asnumpy()
else:
out_grad_np = np.ones(xa.shape)
out_grad = mx.nd.array(out_grad_np)
output_np = forward_numpy_call(xa_np)
input_grad_np = backward_numpy_call(output_np, out_grad_np)
outputs = check_symbolic_forward(y, [xa], [output_np])
output = outputs[0]
assert output.stype == expected_result_type
input_grad_dict = check_symbolic_backward(y, location=[xa], out_grads=[out_grad],
expected=[input_grad_np],
grad_stypes=grad_stypes)
inp_grad = input_grad_dict["data"]
assert inp_grad.stype == expected_grad_result_type
def check_sparse_function(name, mxnet_func, forward_numpy_call, backward_numpy_call,
backward_is_use_output=False):
check_sparse_simple(name, 'default', mxnet_func, forward_numpy_call, backward_numpy_call)
for output_grad_stype in [None, "row_sparse", "default"]:
check_sparse_simple(name, 'row_sparse', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
for output_grad_stype in [None, "csr", "default"]:
check_sparse_simple(name, 'csr', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
check_sparse_function('relu',
lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.0),
lambda output, outg: outg * assign_each(output, lambda x: x > 0.0), backward_is_use_output=True)
check_sparse_function('sigmoid',
lambda x: mx.sym.sigmoid(x),
lambda x: np.divide(1.0, (1.0 + np.exp(-x))),
lambda output, outg: outg * assign_each(output, lambda x: x * (1.0 - x)),
backward_is_use_output=True)
@pytest.mark.serial
def test_sparse_nd_zeros():
def check_sparse_nd_zeros(stype, shape):
zero = mx.nd.zeros(shape)
sparse_zero = mx.nd.zeros(shape=shape, stype=stype)
assert_almost_equal(sparse_zero.asnumpy(), zero.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros('row_sparse', shape)
check_sparse_nd_zeros('csr', shape)
check_sparse_nd_zeros('default', shape)
@pytest.mark.serial
def test_sparse_nd_zeros_like():
def check_sparse_nd_zeros_like(stype, shape):
zero = mx.nd.zeros(shape, stype=stype)
zero_like = mx.nd.sparse.zeros_like(zero)
assert_almost_equal(zero.asnumpy(), zero_like.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros_like('row_sparse', shape)
check_sparse_nd_zeros_like('csr', shape)
@pytest.mark.serial
def test_sparse_axis_operations():
def test_variations(func_name):
dim0 = 30
dim1 = 100
axes = [0, 1]
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=density)
dns = csr_array.tostype('default')
for axis in axes:
ret = func_name(csr_array, axis=axis)
assert ret.stype == 'default'
ret_expected = func_name(dns, axis=axis)
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
def test_fallback(func_name, axis=0, keepdims=True, exclude=True):
dim0 = 30
dim1 = 100
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=0.01)
ret= func_name(csr_array, axis=axis, keepdims=keepdims,
exclude=exclude)
test_variations(mx.nd.sum)
test_fallback(mx.nd.sum, axis=0, keepdims=True, exclude=True)
test_variations(mx.nd.mean)
test_fallback(mx.nd.mean, axis=0, keepdims=True, exclude=True)
@pytest.mark.serial
def test_sparse_square_sum():
dim0 = 30
dim1 = 30
axes = [0, 1]
keepdims = [False, True]
densities = [0, 0.01, 0.2, 0.5, 1.0]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
rsp = rand_ndarray(shape, 'row_sparse', density)
dns = rsp.tostype('default')
for axis in axes:
for keepdim in keepdims:
ret = mx.nd._internal._square_sum(rsp, axis=axis, keepdims=keepdim)
if axis == 1 and keepdim:
assert ret.stype == 'row_sparse'
else:
assert ret.stype == 'default'
ret_expected = mx.nd.sum(dns*dns, axis=axis, keepdims=keepdim)
# check forward result
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
rsp_data = mx.sym.Variable('data', stype='row_sparse')
test = mx.symbol._internal._square_sum(rsp_data, axis=axis, keepdims=keepdim)
# check symbolic backward since ograd can be an rsp
# and cannot be checked through check_numeric_gradient
# because it will add a loss layer as the output layer
# which makes ograd of the square_sum dense
if axis == 1 and keepdim:
dns_data = mx.sym.Variable('data')
baseline = mx.sym.sum(mx.sym.square(dns_data), axis=axis, keepdims=keepdim)
igrad_expected = mx.nd.empty(dns.shape)
baseline_exec = baseline._bind(default_device(), args=[dns],
args_grad=[igrad_expected])
baseline_exec.forward(is_train=True)
baseline_exec.backward([ret_expected])
# check backward when ograd is row sparse
check_symbolic_backward(test, [rsp], [ret_expected.tostype('row_sparse')],
[igrad_expected.asnumpy()], grad_stypes={'data': 'row_sparse'})
# check backward when ograd is dense
# the stype of output of the square_sum is deteremined in symbol binding stage.
# The ograd stype of the last layer is the same as the output stype of the last layer.
# Need to add one more layer after square_sum to trigger the kernel for ograd
# with default stype in square_sum op.
baseline1 = baseline + 1
baseline_exec1 = baseline1._bind(default_device(), args=[dns],
args_grad=[igrad_expected])
baseline_exec1.forward(is_train=True)
baseline_exec1.backward([ret_expected])
test1 = test + 1
check_symbolic_backward(test1, [rsp], [ret_expected], [igrad_expected.asnumpy()],
grad_stypes={'data': 'row_sparse'})
# check numeric gradient
check_numeric_gradient(test, [rsp], grad_stype_dict={'data': 'row_sparse'},
atol=1e-2, rtol=0.1)
@pytest.mark.serial
@pytest.mark.flaky
def test_sparse_storage_fallback():
""" test operators which don't implement FComputeEx or FStatefulComputeEx """
def check_broadcast_add(shape, lhs_stype, rhs_stype):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_dns = mx.nd.cast_storage(lhs_nd, stype='default')
rhs_dns = mx.nd.cast_storage(rhs_nd, stype='default')
out_dns = (lhs_dns + rhs_dns).asnumpy()
test = mx.symbol.broadcast_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_dns])
check_numeric_gradient(test, location)
check_symbolic_backward(test, location, [out_dns], [out_dns, out_dns])
def np_softmax(x, axis=-1):
# fix for old numpy on Travis not supporting keepdims
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_concat(shape, lhs_stype, rhs_stype):
x = mx.symbol.Variable('x', stype=lhs_stype)
w = mx.symbol.Variable('w', stype=rhs_stype)
test = mx.sym.Concat(x, w)
x_nd = rand_ndarray(shape, lhs_stype)
w_nd = rand_ndarray(shape, rhs_stype)
location = {'x': x_nd, 'w': w_nd}
check_numeric_gradient(test, location)
def check_operator_with_temp_resource(shape, stype):
x = mx.symbol.Variable('x', stype=stype)
test = mx.sym.sum(x)
x_nd = rand_ndarray(shape, stype)
location = {'x': x_nd}
check_numeric_gradient(test, location)
shape = rand_shape_2d()
stypes = ['default', 'csr', 'row_sparse']
for lhs in stypes:
check_operator_with_temp_resource(shape, lhs)
for rhs in stypes:
check_broadcast_add(shape, lhs, rhs)
check_concat(shape, lhs, rhs)
@pytest.mark.serial
def test_sparse_elementwise_sum():
def check_sparse_elementwise_sum_with_shape(stypes, shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.sparse.add_n(*inputs, name='esum')
arr = []
arr_grad = [mx.nd.empty(shape, stype=stype) for stype in stypes]
densities = [0, 0.01, 0.5, 1.0]
for stype in stypes:
arr.append(rand_ndarray(shape, stype, densities[np.random.randint(0, len(densities))]))
exec1 = out._bind(default_device(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), atol=1e-5)
all_stypes = ['default', 'csr', 'row_sparse']
for dim in range(2, 4):
shape = tuple(np.random.randint(5, 10, size=dim))
rsp_test_cnt = np.random.randint(1, 9)
check_sparse_elementwise_sum_with_shape(['row_sparse' for i in range(rsp_test_cnt)], shape, rsp_test_cnt)
if dim is 2:
check_sparse_elementwise_sum_with_shape(['default', 'csr', 'default'], shape, 3)
test_len = np.random.randint(5, 10)
# at least one default type
stypes = ['default']
for _ in range(test_len):
pick_side = np.random.randint(2)
pick_type = np.random.randint(3)
stypes = ([all_stypes[pick_type]] if pick_side is 0 else []) + stypes + ([all_stypes[pick_type]] if pick_side is 1 else [])
check_sparse_elementwise_sum_with_shape(stypes, shape, test_len+1)
@pytest.mark.serial
def test_sparse_embedding():
''' test sparse embedding operator '''
def check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad):
target_stype = 'row_sparse' if sparse_grad else 'default'
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight")
embed = mx.sym.sparse.Embedding(data=data, weight=weight, input_dim=in_dim,
sparse_grad=sparse_grad, output_dim=out_dim, name='embed')
grad_req = {'data': 'null', 'embed_weight': 'write'}
args = {'embed_weight': mx.nd.zeros((in_dim, out_dim)), 'data': mx.nd.ones((batch,))}
weight_grad = mx.nd.zeros((in_dim, out_dim))
if sparse_grad:
weight_grad = weight_grad.tostype('row_sparse')
args_grad = {'embed_weight': weight_grad}
exe_test = embed._bind(default_device(), args=args, args_grad=args_grad, grad_req=grad_req)
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# weight
weight = arg_map["embed_weight"]
for density in densities:
# update weight based on density
weight[:] = rand_ndarray(weight.shape, 'default', density=density)
# check forward
exe_test.forward(is_train=True)
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
# check backward
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
# check grad stype
assert(grad_map["embed_weight"].stype == target_stype)
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
sparse_grads = [True, False]
for sparse_grad in sparse_grads:
check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad)
def test_sparse_broadcast_add_sub():
def check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.add(mx_lhs, mx_rhs).asnumpy(), np.add(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.subtract(mx_lhs, mx_rhs).asnumpy(), np.subtract(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_add(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
check_broadcast_sub(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
def test_sparse_broadcast_mul_div():
def check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.multiply(mx_lhs, mx_rhs).asnumpy(), np.multiply(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.divide(mx_lhs, mx_rhs).asnumpy(), np.divide(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
def test_batchnorm_fallback():
# same test as test_operator.test_batchnorm_training, but tests fallback logic of batchnorm
stype = 'row_sparse'
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-3, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
assertRaises(MXNetError, check_numeric_gradient, test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01)
@pytest.mark.serial
def test_dnnl_sparse():
# This test is trying to create a race condition describedd in
# https://github.com/apache/incubator-mxnet/issues/10189
arr = mx.nd.random.uniform(shape=(10, 10, 32, 32))
weight1 = mx.nd.random.uniform(shape=(10, 10, 3, 3))
arr = mx.nd.Convolution(data=arr, weight=weight1, no_bias=True, kernel=(3, 3), num_filter=10)
rs_arr = mx.nd.sparse.row_sparse_array((mx.nd.zeros_like(arr), np.arange(arr.shape[0])))
weight2 = mx.nd.random.uniform(shape=(10, np.prod(arr.shape[1:4])))
fc_res = mx.nd.FullyConnected(data=arr, weight=weight2, no_bias=True, num_hidden=10)
sum_res = mx.nd.elemwise_sub(arr, rs_arr)
res1 = np.dot(mx.nd.flatten(sum_res).asnumpy(), weight2.asnumpy().T)
print(res1 - fc_res.asnumpy())
almost_equal(res1, fc_res.asnumpy())
@pytest.mark.serial
def test_sparse_nd_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape):
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y \
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
cond_nd = mx.nd.array(condition_np)
args = {'condition': cond_nd.tostype('csr'), 'x': mx.nd.array(x_np),
'y' : mx.nd.array(y_np)}
args_grad = {'condition': mx.nd.zeros_like(cond_nd),
'x': mx.nd.array(x_np).tostype('csr'), 'y' : mx.nd.array(y_np)}
# test req='write'
where_exe_write = where_sym._bind(ctx=default_device(), args=args,
args_grad=args_grad, grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._bind(ctx=default_device(), args=args,
args_grad=args_grad, grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True)
assert same(outputs[0].asnumpy(), out_expected)
def test_where_numeric_gradient(shape):
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
test_where_helper((5, 9))
test_where_numeric_gradient((5, 9))
@pytest.mark.serial
def test_sparse_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
def check_sparse_quadratic_function(a, b, c, expected_stype):
# check forward and compare the result with dense op
ndim = 2
shape = rand_shape_nd(ndim, 5)
data = rand_ndarray(shape=shape, stype='csr')
data_np = data.asnumpy()
expected = f(data_np, a, b, c)
output = mx.nd.contrib.quadratic(data, a=a, b=b, c=c)
assert(output.stype == expected_stype)
assert_almost_equal(output.asnumpy(), expected)
a = np.random.random_sample()
b = np.random.random_sample()
check_sparse_quadratic_function(a, b, 0.0, 'csr')
check_sparse_quadratic_function(a, b, 1.0, 'default')
def test_reshape_backward_fallback():
"""
out
| \
w_x x
/
w
in which x is a sparse tensor.
Due to sparse gradient optimization in sym.dot, grad(w_x) is sparse.
Though sym.reshape itself does not have sparse version,
if we somehow make grad(w) sparse as well, e.g.,
- by setting args_grad in symbol.bind
- or, we can have out_y = sym.dot(sparse_y, w), then grad(w) will be inferred as sparse
reshape backward (from w_x to w) needs to understand how to handle sparse inputs.
"""
ctx = default_device()
w_shape = (12, 4)
w_x_shape = (1, 48)
x_nd = rand_ndarray((4, 1), 'csr')
w_nd = rand_ndarray(w_shape)
w_x_nd = w_nd.reshape(w_x_shape)
out_x_nd = mx.nd.dot(x_nd, w_x_nd)
w_x_backward_grad = mx.nd.dot(x_nd, out_x_nd, transpose_a=True).asnumpy()
expected_grad_nd = w_x_backward_grad.reshape(w_shape)
x = mx.sym.Variable('x', stype='csr')
w = mx.sym.Variable('w')
w_x = mx.sym.reshape(w, w_x_shape, name="w_x")
out = mx.sym.sparse.dot(x, w_x, name='out_x')
grad_w_nd = rand_ndarray(w_shape, 'row_sparse')
executor = out._bind(ctx=ctx, args={"x": x_nd, "w": w_nd},
args_grad={"w": grad_w_nd})
executor.forward(is_train=True)
executor.backward(out_x_nd)
assert_almost_equal(grad_w_nd.asnumpy(), expected_grad_nd)
|
{
"content_hash": "078a8fbf34ab6bba46f51165c74ebe08",
"timestamp": "",
"source": "github",
"line_count": 2066,
"max_line_length": 139,
"avg_line_length": 48.63988383349468,
"alnum_prop": 0.49526321026967857,
"repo_name": "DickJC123/mxnet",
"id": "d82d1925ca1aadee7a9c9cefb4f4ceca746648f6",
"size": "101276",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/python/unittest/test_sparse_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151356"
},
{
"name": "C++",
"bytes": "12029257"
},
{
"name": "CMake",
"bytes": "213440"
},
{
"name": "Cuda",
"bytes": "1528224"
},
{
"name": "Cython",
"bytes": "26285"
},
{
"name": "Dockerfile",
"bytes": "54893"
},
{
"name": "Groovy",
"bytes": "132682"
},
{
"name": "Jupyter Notebook",
"bytes": "1889643"
},
{
"name": "Makefile",
"bytes": "8991"
},
{
"name": "PowerShell",
"bytes": "6699"
},
{
"name": "Python",
"bytes": "8615578"
},
{
"name": "Shell",
"bytes": "172547"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def hist_test():
kwargs = {}
kwargs['server'] = True
print "Import small prostate dataset"
hex = h2o.import_file(h2o.locate("smalldata/logreg/prostate.csv"))
hex["AGE"].hist(**kwargs)
hex["VOL"].hist(**kwargs)
if __name__ == "__main__":
tests.run_test(sys.argv, hist_test)
|
{
"content_hash": "2afe3dc762b87c78d97fb1c31a546a17",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 20.72222222222222,
"alnum_prop": 0.5924932975871313,
"repo_name": "brightchen/h2o-3",
"id": "2797bee533826e6df60878a87393f78a6bee104d",
"size": "373",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_misc/pyunit_hist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5463061"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34005"
},
{
"name": "Python",
"bytes": "2096823"
},
{
"name": "R",
"bytes": "1835571"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "47507"
},
{
"name": "TeX",
"bytes": "594697"
}
],
"symlink_target": ""
}
|
from nltk.corpus import movie_reviews
from nltk.probability import FreqDist
from nltk.corpus import stopwords
from scipy import sparse
import string
import os
import sys
import re
import json
import nltk
import pdb
import time
import numpy as np
path = '../data'
class gen_graphs(object):
'''
class that generates word-word, word-label and word-document graph from text
'''
def __init__(self):
'''
Initialize each graph
'''
# word2word graph
self.w2w = {}
# Store all u,v pairs
self.w2w_inv = []
self.w2l = {}
self.w2d = {}
self.all_words = {}
self.all_labels = {}
self.all_documents = {}
self.nedge = 0
self.ndocs = 0
self.nlabels = 0
nltk.data.path.append(path)
def contruct_graphs(self, args):
'''
Function to read text file from path and construct the corresponding
graphs.
'''
# documents = [(list(w.lower() for w in movie_reviews.words(fileid) if w.lower() not in string.punctuation), category, fileid)
# for category in movie_reviews.categories()
# for fileid in movie_reviews.fileids(category)]
documents = []
files = ['../data/train-pos.txt','../data/train-neg.txt','../data/test-pos.txt','../data/test-neg.txt']
class_labels = ['pos','neg','pos','neg']
document_no = 1
index = 0
for file_name in files:
fp = open(file_name)
lines = fp.readlines()
for line in lines:
words = line.split(" ")
document = (words,class_labels[index],document_no)
documents.append(document)
document_no += 1
index += 1
print documents[0]
unique_count = 0
for index in range(len(documents)):
for word_index in range(len(documents[index][0])):
word = documents[index][0][word_index]
if word not in self.all_words:
self.all_words[word] = unique_count
unique_count = unique_count + 1
self.nvertex = unique_count
unique_count = 0
for c in class_labels:
if c not in self.all_labels:
self.all_labels[c] = unique_count
unique_count = unique_count + 1
self.nlabels = unique_count
unique_count = 0
for i in xrange(1,document_no):
self.all_documents[i] = unique_count
unique_count = unique_count + 1
self.ndocs = unique_count
window_size = 10
for index in range(len(documents)):
for word_index in range(len(documents[index][0])):
word = documents[index][0][word_index]
if (word_index - window_size / 2) >= 0:
left = word_index - window_size / 2
else:
left = 0
if (word_index + window_size / 2) < len(documents[index][0]):
right = word_index + window_size / 2 + 1
else:
right = len(documents[index][0])
for i in xrange(left, word_index):
u = self.all_words[word]
v = self.all_words[documents[index][0][i]]
if u not in self.w2w:
self.w2w[u] = {}
if v not in self.w2w[u]:
self.w2w[u][v] = 0
self.w2w[u][v] += 1
if v not in self.w2w:
self.w2w[v] = {}
if u not in self.w2w[v]:
self.w2w[v][u] = 0
self.w2w[v][u] += 1
for i in xrange(word_index + 1, right):
u = self.all_words[word]
v = self.all_words[documents[index][0][i]]
if u not in self.w2w:
self.w2w[u] = {}
if v not in self.w2w[u]:
self.w2w[u][v] = 0
self.w2w[u][v] += 1
if v not in self.w2w:
self.w2w[v] = {}
if u not in self.w2w[v]:
self.w2w[v][u] = 0
self.w2w[v][u] += 1
u = self.all_documents[documents[index][2]]
v = self.all_words[word]
if u not in self.w2d:
self.w2d[u] = {}
if v not in self.w2d[u]:
self.w2d[u][v] = 0
self.w2d[u][v] += 1
u = self.all_labels[documents[index][1]]
if u not in self.w2l:
self.w2l[u] = {}
if v not in self.w2l[u]:
self.w2l[u][v] = 0
self.w2l[u][v] += 1
json.dump(self.all_words, open('word_mapping.json', 'wb'))
json.dump(self.all_labels, open('label_mapping.json', 'wb'))
json.dump(self.all_documents, open('document_mapping.json', 'wb'))
print 'w2l', len(self.w2l.keys())
print 'w2d', len(self.w2d.keys())
print 'w2w', len(self.w2w.keys())
def gen_edgeprob(self):
'''
returns edge probability vector (w2w graph)
'''
# Edge probability vector
p = []
v1 = []
v2 = []
for k in self.w2w.keys():
for kj in self.w2w[k].keys():
p.append(self.w2w[k][kj])
v1.append(k)
v2.append(kj)
self.nedge += 1
p = np.asarray(p, dtype=np.float64)
p = p / float(sum(p))
return p, v1, v2
if __name__ == "__main__":
graph = gen_graphs()
graph.contruct_graphs("graph")
#t = time.time()
#p = graph.gen_edgeprob()
#t = time.time() - t
#print sum(p)
#print len(p), t
|
{
"content_hash": "6849f869428ea7bd7a75f65d4f692838",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 134,
"avg_line_length": 33.96,
"alnum_prop": 0.46828201245162376,
"repo_name": "shashankg7/word2graph2vec",
"id": "86229ca1577893367009e8eb7e46791f98cad304",
"size": "5989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "word2graph2vec/data_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23858"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
}
|
DEBUG = True
# Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are working with
# SQLite for this example
#SQLALCHEMY_DATABASE_URI = os.environ('DATABASE_URL')
#DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
|
{
"content_hash": "082a53253dd07ccf24b9f18008da09a7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.7486338797814208,
"repo_name": "kevin-hannegan/vps-droplet",
"id": "94df2542f1469fb1b5c661fe78b18d4f1bd47010",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "263709"
},
{
"name": "HTML",
"bytes": "83775"
},
{
"name": "JavaScript",
"bytes": "86213"
},
{
"name": "PHP",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "3242337"
},
{
"name": "Shell",
"bytes": "5415"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0022_auto_20151125_1811'),
]
operations = [
migrations.AlterModelOptions(
name='updatehistoryitem',
options={'ordering': ['-recorded']},
),
]
|
{
"content_hash": "e1af594a474fcc5fad764cfd2b4512ca",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 21.176470588235293,
"alnum_prop": 0.6027777777777777,
"repo_name": "erikng/sal",
"id": "0ec603a8b0240060c8c46ec415a0a782090c22ac",
"size": "384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/migrations/0023_auto_20151130_1036.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "254975"
},
{
"name": "HTML",
"bytes": "248381"
},
{
"name": "JavaScript",
"bytes": "1148377"
},
{
"name": "Makefile",
"bytes": "2208"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "757954"
},
{
"name": "Shell",
"bytes": "5922"
}
],
"symlink_target": ""
}
|
from . import base
from unittest.mock import patch
import decorators
class TestDecorators(base.BaseCase):
@patch('decorators.LOG.info')
def test_timeit_smoke_test(self, info):
@decorators.timeit
def some_task(param, **kwargs):
pass
some_task(42, option='value')
(args, _) = info.call_args
self.assertIsInstance(args[0], str)
self.assertEqual(args[1], 'some_task')
self.assertEqual(args[2], (42,))
self.assertEqual(args[3], {'option': 'value'})
self.assertIsInstance(args[4], float)
self.assertGreater(args[4], 0.0)
def test_deffile(self):
self.assertEqual('/tmp/template.json', decorators.deffile('template.json'))
def test_setdefault(self):
decorators.setdefault('.active-stack', 'lax--ci')
with open('/tmp/.active-stack') as f:
self.assertEqual(f.read(), 'lax--ci')
@patch('buildercore.core.active_stack_names', return_value=['dummy1--ci'])
@patch('utils.get_input', return_value='1')
def test_requires_aws_project_stack(self, get_input, active_stack_names):
@decorators.requires_aws_project_stack('dummy1')
def some_task(stackname):
self.assertEqual('dummy1--ci', stackname)
return 'result'
self.assertEqual(some_task('dummy1--ci'), 'result')
@patch('buildercore.core.active_stack_names', return_value=['dummy1--ci', 'dummy1--end2end'])
@patch('utils.get_input', return_value='2')
def test_requires_aws_stack(self, get_input, active_stack_names):
@decorators.requires_aws_stack
def some_task(stackname):
self.assertEqual('dummy1--end2end', stackname)
return 'result'
self.assertEqual(some_task('dummy1--end2end'), 'result')
|
{
"content_hash": "3fb4511f557ccb779bccc2477b47c98d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 97,
"avg_line_length": 39.08695652173913,
"alnum_prop": 0.6312569521690767,
"repo_name": "elifesciences/builder",
"id": "87d904484d37e670cbfaefb4b8ccfa728d3a6106",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/test_decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "735556"
},
{
"name": "Shell",
"bytes": "33921"
},
{
"name": "Smarty",
"bytes": "142"
},
{
"name": "VCL",
"bytes": "4406"
}
],
"symlink_target": ""
}
|
"""S3 file system implementation for accessing files on AWS S3."""
# pytype: skip-file
from __future__ import absolute_import
from future.utils import iteritems
from apache_beam.io.aws import s3io
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
__all__ = ['S3FileSystem']
class S3FileSystem(FileSystem):
"""An S3 `FileSystem` implementation for accessing files on AWS S3
"""
CHUNK_SIZE = s3io.MAX_BATCH_OPERATION_SIZE
S3_PREFIX = 's3://'
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 's3'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all of the return nulled components
"""
if not basepath.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Basepath %r must be S3 path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the S3 prefix ('s3://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Path %r must be S3 path.' % path)
prefix_len = len(S3FileSystem.S3_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in iteritems(s3io.S3IO().list_prefix(dir_or_prefix)):
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = s3io.S3IO().open(path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to copy unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
return s3io.S3IO().copy_paths(src_dest_pairs)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to rename unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
results = s3io.S3IO().rename_files(src_dest_pairs)
exceptions = {(src, dest): error
for (src, dest, error) in results if error is not None}
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
try:
return s3io.S3IO().exists(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("exists() operation failed", {path: e})
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO().size(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("size() operation failed", {path: e})
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO().last_updated(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("last_updated operation failed", {path: e})
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return s3io.S3IO().checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path: e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
"""
results = s3io.S3IO().delete_paths(paths)
exceptions = {
path: error
for (path, error) in results.items() if error is not None
}
if exceptions:
raise BeamIOError("Delete operation failed", exceptions)
|
{
"content_hash": "514a57f68a42c0eb8c027248731ec04a",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 79,
"avg_line_length": 31.36996336996337,
"alnum_prop": 0.6719990658570761,
"repo_name": "iemejia/incubator-beam",
"id": "4bb0b6b4b218293db58f32afb138be8e73f7b22e",
"size": "9349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/aws/s3filesystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
import sys
import os
from ghosting_model import GhostingModel
from flask.ext.restful import Resource, reqparse
from flask import Flask, jsonify, request, make_response
import os
from database import db
from flask.ext.security import current_user
from json import dumps
class Ghosting_resource(Resource):
def post(self):
feature = request.args.get('metric')
username = current_user.email
new_ghost = GhostingModel(username=username,feature=feature)
db.session.add(new_ghost)
db.session.commit()
return make_response(dumps([{'msg':"Click added."}]))
|
{
"content_hash": "9cc48e2eb0d3e3217ce217c0089c6636",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 33.388888888888886,
"alnum_prop": 0.7271214642262895,
"repo_name": "wigginslab/lean-workbench",
"id": "762361104c58b7e5f9d4cefc7b51b777524fe9d3",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lean_workbench/ghosting/ghosting_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "CSS",
"bytes": "8523037"
},
{
"name": "HTML",
"bytes": "1204783"
},
{
"name": "JavaScript",
"bytes": "1385939"
},
{
"name": "Makefile",
"bytes": "1307"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "353032"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
}
|
"""
This module deals with managing packages installed on the host system.
It attempts to do so in a distribution-agnostic way, but actually only
support a strict set of distributions with well-known package managers.
"""
import logging
import subprocess
from .configuration import Configuration
from .utils import getYesNoResponse as getYN
class _MetaPackage(type):
"""
This factory is responsible for constructing a :class:`Package` class which properly
utilizes the host system's package management platform.
"""
def __new__(mcs, name, bases, dct) -> type:
"""
Customization point for distribution-dependent functionality
"""
from .configuration import DISTRO
pack = super().__new__(mcs, name, bases, dct)
if DISTRO in {'fedora', 'centos', 'rhel'}:
concat = '-'
pack.checkInstallList = lambda x: [ p
for p in
subprocess.Popen(["/bin/rpm", "-q", x.name],
stdout=subprocess.PIPE)
.communicate()[0].decode().splitlines()
if not p.endswith("is not installed")]
pack.installArgs = ["/bin/dnf", "install", "-y"]
pack.uninstallArgs = ["/bin/dnf", "remove", "-y"]
elif DISTRO in {'ubuntu', 'linuxmint', 'debian'}:
concat = '='
pack.checkInstallList = lambda x: [ "{1}={2}".format(*p.split())
for p in
subprocess.Popen(["/usr/bin/dpkg", "-l", x.name],
stdout=subprocess.PIPE)
.communicate()[0].decode().splitlines()[5:]
if p ]
pack.installArgs = ["/usr/bin/apt-get", "install", "-y"]
pack.uninstallArgs = ["/usr/bin/apt-get", "purge", "-y"]
# TODO - is this reasonable? I mean, I KNOW it's unreasonable because this whole module
# shouldn't exist, but is this a good fallback since it DOES exist?
else:
concat = " v"
pack.checkInstallList = lambda x: []
pack.installArgs = ["/bin/true"]
pack.uninstallArgs = ["/bin/true"]
pack.__str__ = lambda x: concat.join((x.name, x.version)) if x.version else x.name
return pack
class Package(metaclass=_MetaPackage):
"""
Represents a package installed (or about to be installed) on the host system
"""
#: The package's name
name = None
#: Optionally, a specific version of the package
version = None
def __init__(self, pkg:dict):
"""
Constructs a :class:`Package` object from a raw JSON response
:param pkg: a parsed JSON response, expected to contain a ``"name"`` key and optionally a
``"version"`` key.
:raises ValueError: if ``pkg`` does not faithfully represent a package
"""
if "name" not in pkg:
raise ValueError("%r does not represent a package!" % pkg)
self.name = pkg["name"]
self.version = pkg["version"] if "version" in pkg else ""
# These are defined in the metaclass based on the host system's Linux distribution, but are
# specified here for the benefit of static analysis tools
self.checkInstallList = getattr(self, 'checkInstallList', lambda: ())
self.installArgs = getattr(self, 'installArgs', None)
self.uninstallArgs = getattr(self, 'uninstallArgs', None)
def __repr__(self) -> str:
"""
Implements ``repr(self)``
"""
if self.version:
return "Package(name=%r, version=%r)" % (self.name, self.version)
return "Package(name=%r)" % (self.name,)
def isInstalled(self) -> bool:
"""
Checks if this package is already present on the system
:returns: whether or not the package could be found
"""
for pkg in self.checkInstallList():
if (self.version and pkg == str(self)) or pkg.startswith(self.name):
return True
return False
def install(self, conf:Configuration) -> int:
"""
Installs this package.
:param conf: An object containing the configuration for :program:`traffic_ops_ort`
:returns: the exit code of the install process
"""
if self.isInstalled():
logging.info("%s is already installed - nothing to do", self)
return 0
if conf.mode is Configuration.Modes.INTERACTIVE and not getYN("Install %s?" % self, 'Y'):
logging.warning("%s will not be installed, dependencies may be unsatisfied!", self)
return 0
logging.info("Installing %s", self)
if conf.mode is Configuration.Modes.REPORT:
return 0
try:
sub = subprocess.Popen(self.installArgs + [str(self)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub.communicate(timeout=60)
except subprocess.CalledProcessError as e:
logging.debug("%r", e, stack_info=True, exc_info=True)
logging.error("%s could not be installed!", self)
return -1
except TimeoutError as e:
logging.debug("%r", e, stack_info=True, exc_info=True)
logging.error("Package install timed out!")
return -1
logging.debug("STDOUT: %s", out.decode())
logging.debug("STDERR: %s", err.decode())
return sub.returncode
def uninstall(self, conf:Configuration) -> int:
"""
Uninstalls this package. I have no idea how one would make use of this from within ATC...
:returns: the exit code of the uninstall process
"""
if not self.isInstalled():
logging.info("%s is not installed - nothing to do", self)
return 0
if conf.mode is Configuration.Modes.INTERACTIVE and not getYN("Uninstall %s?" % self, 'Y'):
logging.warning("%s will not be installed, dependencies may be out of date!", self)
return 0
logging.info("Uninstalling %s", self)
if conf.mode is Configuration.Modes.REPORT:
return 0
try:
sub = subprocess.Popen(self.uninstallArgs + [str(self)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub.communicate(timeout=60)
except subprocess.CalledProcessError as e:
logging.debug("%r", e, stack_info=True, exc_info=True)
logging.error("%s could not be uninstalled!", self)
return -1
except TimeoutError as e:
logging.debug("%r", e, stack_info=True, exc_info=True)
logging.error("Package uninstall timed out!")
return -1
logging.debug("STDOUT: %s", out.decode())
logging.debug("STDERR: %s", err.decode())
return sub.returncode
|
{
"content_hash": "4dd4a3c94cd01b09b0131dc129c4ea6e",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 93,
"avg_line_length": 33.47894736842105,
"alnum_prop": 0.6305612325106116,
"repo_name": "hbeatty/incubator-trafficcontrol",
"id": "a5701036a0d7e2fdbc78997aaed0516d58663c1f",
"size": "7147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "infrastructure/cdn-in-a-box/ort/traffic_ops_ort/packaging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21929"
},
{
"name": "CSS",
"bytes": "194544"
},
{
"name": "Go",
"bytes": "1227299"
},
{
"name": "HTML",
"bytes": "664484"
},
{
"name": "Java",
"bytes": "1231693"
},
{
"name": "JavaScript",
"bytes": "1860092"
},
{
"name": "Makefile",
"bytes": "1047"
},
{
"name": "PLSQL",
"bytes": "3450"
},
{
"name": "PLpgSQL",
"bytes": "70798"
},
{
"name": "Perl",
"bytes": "2781374"
},
{
"name": "Perl 6",
"bytes": "630546"
},
{
"name": "Python",
"bytes": "11054"
},
{
"name": "Roff",
"bytes": "4011"
},
{
"name": "Ruby",
"bytes": "4090"
},
{
"name": "Shell",
"bytes": "165277"
}
],
"symlink_target": ""
}
|
import math
import bayeslite.core
from bayeslite import bql_quote_name
from bayeslite.exception import BayesLiteException as BLE
def extract_target_cols(bdb, generator, targets=None):
"""Extract target columns (helper for LL/KL query).
If targets is None, then a list of all sqlite3 quoted column names from
generator are returned.
Parameters
----------
bdb : bayeslite.BayesDB
Active BayesDB instance.
generator : str
Name of generator.
targets : list<str>, optional
List of columns in the table.
Returns
-------
target_cols : list<str>
"""
if targets is None:
generator_id = bayeslite.core.bayesdb_get_generator(bdb, generator)
targets = bayeslite.core.bayesdb_generator_column_names(bdb,
generator_id)
return map(bql_quote_name, targets)
def extract_given_cols_vals(givens=None):
"""Extract given columns and values (helper for LL/KL).
If givens is None, then an empty list is returned. Otherwise an appropriate
list of tuples is returned, where the first element is the sqlite3 quoted
name and the second element is the constraint value.
Parameters
----------
givens : list<str>, optional
List of columns in the table.
Returns
-------
given_cols_vals : list<tuple>
"""
if givens is None:
return []
given_cols = map(bql_quote_name, givens[::2])
given_vals = givens[1::2]
assert len(given_cols) == len(given_vals)
return zip(given_cols, given_vals)
def estimate_log_likelihood(bdb, table, generator, targets=None, givens=None,
n_samples=None):
"""Estimate the log likelihood for obsevations in a table.
Parameters
----------
bdb : bayeslite.BayesDB
Active BayesDB instance.
table : str
Name of table.
generator : str
Name of generator.
targets : list<str>, optional
List of columns in the table for which to compute the log-likelihood.
Defaults to all the columns.
givens : list<tuple>, optional
A list of [(column, value)] pairs on which to condition on. Defaults to
no conditionals. See example for more details.
n_samples : int, optional
Number of rows from table to use in the computation. Defaults to all
the rows.
Returns
-------
ll : float
The log likelihood of the table[columns] under the conditional
distribution (specified by givens) of generator.
Example:
estimate_log_likelihood(bdb, 'people', 'people_gen',
targets=['weight', 'height'],
givens=[('nationality', 'USA'), ('age', 17)])
"""
# Defaults to all columns if targets is None.
targets = extract_target_cols(bdb, generator, targets=targets)
# Defaults to no givens if givens is None
givens = extract_given_cols_vals(givens=givens)
givens = ','.join(['{}={}'.format(c,v) for (c,v) in givens])
# Obtain the dataset table.
table = bql_quote_name(table.strip(';'))
sql = '''
SELECT {} FROM {};
'''.format(','.join(targets), table)
dataset = bdb.execute(sql)
# Obtain number of rows in the dataset and samples to use.
n_samples = n_samples
n_rows = bdb.execute('''
SELECT COUNT(*) FROM {}'''.format(table)).fetchvalue()
if n_samples is None or n_rows < n_samples:
n_samples = n_rows
# Compute the log-likelihood of the targets, subject to givens.
# XXX This code is currently wrong due to shortcomings in BQL:
# - BQL cannot evaluate joint density. Assume that all the rows are IID,
# and that all the columns factor into their marginal density.
ll, i = 0, 0
for row in dataset:
if i > n_samples:
break
else:
i += 1
# XXX Wrong: assume joint factors into product of marginals.
for col, val in zip(targets, row):
if givens:
# XXX TODO write GIVEN in this query using bindings.
bql = '''
ESTIMATE PROBABILITY OF {}=? GIVEN ({}) FROM {} LIMIT 1
'''.format(col, givens, bql_quote_name(generator))
else:
bql = '''
ESTIMATE PROBABILITY OF {}=? FROM {} LIMIT 1
'''.format(col, bql_quote_name(generator))
ll += math.log(bdb.execute(bql, (val,)).fetchvalue())
return ll
def estimate_kl_divergence(bdb, generatorA, generatorB, targets=None,
givens=None, n_samples=None):
"""Estimate the KL divergence.
The KL divergence is a mesaure of the "information lost" when generatorB
(the approximating generator) is used to approximate generatorA (the base
generator). KL divergence is not symmetric in, and KL(genA||genB) is not
necessarily equal to KL(genB||genA).
TODO: Monte Carlo estimation is a terrible way to compute the KL divergence.
(Not to say there are better methods in general). One illustration of this
is that the estimated KL divergence has emperically been shown to obtain
negative realizations for high-dimensional data.
Computing the KL divergence in general (of high dimensional distributions)
is a very hard problem; most research uses the structure of the
distributions to find good estimators. Adaptive quadrature or exact methods
for numerical integration could outperform Monte Carlo?
TODO: More sophisticated algorithm for detecting cases where absolute
continuity could be a problem (currently have a heuristic).
As it stands, Monte Carlo estimates may have infinite variance depending
on simulated values from generatorA.
Parameters
----------
bdb : bayeslite.BayesDB
Active BayesDB instance.
generatorA : str
Name of base generator.
generatorB : str
Name of approximating generator.
targets : list<str>, optional
List of columns in the table for which to compute the log-likelihood.
Defaults to all the columns.
givens : list<tuple>, optional
A list of [(column, value)] pairs on which to condition on. Defaults to
no conditionals. See example for more details.
n_samples: int, optional
Number of simulated samples to use in the Monte Carlo estimate.
Returns
-------
kl : float
The KL divergence. May be infinity.
Example:
estimate_kl_divergence(bdb, 'crosscat_gen', 'baxcat_gen',
targets=['weight', 'height'],
givens=[('nationality', 'USA'), ('age', 17)])
"""
# XXX Default to 10,000 samples
if n_samples is None:
n_samples = 10000
# Defaults to all columns if targets is None.
targets = extract_target_cols(bdb, generatorA, targets=targets)
# Defaults to no givens if givens is None
givens = extract_given_cols_vals(givens=givens)
givens = ','.join(['{}={}'.format(c,v) for (c,v) in givens])
# Obtain samples from the base distribution.
if givens:
# XXX TODO write GIVEN in this query using bindings.
bql = '''
SIMULATE {} FROM {} GIVEN {} LIMIT {}
'''.format(','.join(targets), bql_quote_name(generatorA),
givens, n_samples)
else:
bql = '''
SIMULATE {} FROM {} LIMIT {}
'''.format(','.join(targets), bql_quote_name(generatorA),
n_samples)
samples = bdb.execute(bql)
kl = 0
for s in samples:
logp_a, logp_b = 0, 0
# XXX Assume joint probability factors by summing univariate
# (conditional) probability of each cell value. This is clearly wrong,
# until we can evaluate joint densities in BQL.
for col, val in zip(targets, s):
bql = '''
ESTIMATE PROBABILITY OF {}=? FROM {} LIMIT 1
'''.format(col, bql_quote_name(generatorA))
crs = bdb.execute(bql, (val,))
p_a = crs.fetchvalue()
bql = '''
ESTIMATE PROBABILITY OF {}=? FROM {} LIMIT 1
'''.format(col, bql_quote_name(generatorB))
crs = bdb.execute(bql, (val,))
p_b = crs.fetchvalue()
# XXX Heuristic to detect when genA is not absolutely
# continuous wrt genB
if p_a == 0:
# How on earth did we simulate a value from genA with zero
# density/prob under genA?
raise BLE(ValueError(
'Fatal error: simulated a (col,val)=({},{}) '
'from base generatorA ({}) with zero density. Check '
'implementation of simluate and/or logpdf of '
'generator.'.format(col,val,generatorA)))
if p_b == 0:
# Detected failure of absolute continuity
# (under assumption that joint factors into marginals)
return float('inf')
logp_a += math.log(p_a)
logp_b += math.log(p_b)
kl += (logp_a - logp_b)
# XXX Assertion may fail, see TODO in docstring.
# assert kl > 0
if kl < 0:
raise BLE(ValueError(
'Cannot compute reasonable value for KL divergence. '
'Try increasing the number of samples (currently using {}'
'samples).'.format(n_samples)))
return kl / n_samples
# TODO: Migrate from hooks/contrib_diagnostics. Need users run experiments?
# def run_bdb_experiment(bdb, exp_args):
# pass
|
{
"content_hash": "2635e4a3e95783fd4280dfe79fc523ea",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 80,
"avg_line_length": 35.63533834586466,
"alnum_prop": 0.613777824665049,
"repo_name": "probcomp/bdbcontrib",
"id": "ef0be1aed3a483ceb457385621a0446f22acfba6",
"size": "10134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/diagnostic_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "709"
},
{
"name": "Jupyter Notebook",
"bytes": "55896"
},
{
"name": "Makefile",
"bytes": "2016"
},
{
"name": "Python",
"bytes": "406288"
},
{
"name": "Shell",
"bytes": "1578"
}
],
"symlink_target": ""
}
|
from typing import Iterator, List
import uuid
from google.cloud import bigquery
import pytest
import authorized_view_tutorial
@pytest.fixture(scope="module")
def client() -> bigquery.Client:
return bigquery.Client()
@pytest.fixture
def datasets_to_delete(client: bigquery.Client) -> Iterator[List[str]]:
doomed: List[str] = []
yield doomed
for item in doomed:
client.delete_dataset(item, delete_contents=True, not_found_ok=True)
def test_authorized_view_tutorial(
client: bigquery.Client, datasets_to_delete: List[str]
) -> None:
override_values = {
"source_dataset_id": "github_source_data_{}".format(
str(uuid.uuid4()).replace("-", "_")
),
"shared_dataset_id": "shared_views_{}".format(
str(uuid.uuid4()).replace("-", "_")
),
}
source_dataset_ref = "{}.{}".format(
client.project, override_values["source_dataset_id"]
)
shared_dataset_ref = "{}.{}".format(
client.project, override_values["shared_dataset_id"]
)
datasets_to_delete.extend(
[override_values["source_dataset_id"], override_values["shared_dataset_id"]]
)
authorized_view_tutorial.run_authorized_view_tutorial(override_values)
source_dataset = client.get_dataset(source_dataset_ref)
shared_dataset = client.get_dataset(shared_dataset_ref)
analyst_email = "example-analyst-group@google.com"
analyst_entries = [
entry
for entry in shared_dataset.access_entries
if entry.entity_id == analyst_email
]
assert len(analyst_entries) == 1
assert analyst_entries[0].role == "READER"
authorized_view_entries = [
entry for entry in source_dataset.access_entries if entry.entity_type == "view"
]
expected_view_ref = {
"projectId": client.project,
"datasetId": override_values["shared_dataset_id"],
"tableId": "github_analyst_view",
}
assert len(authorized_view_entries) == 1
assert authorized_view_entries[0].entity_id == expected_view_ref
|
{
"content_hash": "955be42a9b3f624af486b6801a1f6046",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 87,
"avg_line_length": 31.12121212121212,
"alnum_prop": 0.6484907497565725,
"repo_name": "googleapis/python-bigquery",
"id": "cae8704864eb8884d078ed9174a1c8096c3ff636",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/authorized_view_tutorial_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import random
import time
import sys
import threading
sys.path.append(".")
import stackimpact
agent = stackimpact.start(
agent_key = 'agent key here',
app_name = 'MyPythonApp')
def simulate_cpu_work():
for j in range(0, 100000):
random.randint(1, 1000000)
def handle_some_event():
span = agent.profile('some event')
simulate_cpu_work()
span.stop()
response = {
"statusCode": 200,
"body": 'Done'
}
return response
# Simulate events
while True:
handle_some_event()
time.sleep(2)
|
{
"content_hash": "0d0b175c651411add113e84c1a2ddc24",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 38,
"avg_line_length": 15.894736842105264,
"alnum_prop": 0.6258278145695364,
"repo_name": "stackimpact/stackimpact-python",
"id": "b8566e960912b927b5e44d4e0cb421e95f197973",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/focused/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93356"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
}
|
import subprocess
import threading
class MKFlowSocat:
def __init__(self, port1, port2):
self.buffer = []
self.port1 = port1
self.port2 = port2
def start(self):
try:
self.alive = True
self.thread = threading.Thread(target=self.loop)
self.thread.daemon = True # never care about it anymore
self.thread.start()
except:
print 'error stopping thread'
def stop(self):
try:
self.close()
except:
print 'error stopping thread'
else:
self.alive = False
def join(self):
self.thread.join()
def open(self):
exe = 'socat -x %s,raw,echo=0,b38400,crnl %s,raw,echo=0,b38400,crnl' % (self.port1, self.port2)
self.popen = subprocess.Popen(exe.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def close(self):
try:
self.popen.terminate()
self.popen.wait()
except:
print "error closing socat"
raise
def loop(self):
for line in iter(self.popen.stdout.readline, b''):
self.buffer.append(line)
def read(self):
if self.buffer[0][0] == "<" or self.buffer[0][0] == ">":
return self.buffer.pop(0), self.buffer.pop(0)
elif len(self.buffer[0]) == 0:
# message 1 is empty. pop two messages
return self.buffer.pop(0), self.buffer.pop(0)
else:
# message 1 is missing in rpi's socat
return '', self.buffer.pop(0)
def isReady(self):
size = self.bufferSize()
if size > 30:
self.buffer = [self.buffer[-1]]
print "buffer overflow"
return True
return size > 0
def bufferSize(self):
return len(self.buffer)
def isAlive(self):
return self.alive
|
{
"content_hash": "71c5b8dfd67009f8d877e2200062ff90",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 27.897058823529413,
"alnum_prop": 0.5413811280969952,
"repo_name": "ukos-git/python-flowmeter",
"id": "655994303e2df69693f6c74d68ca571b374a67aa",
"size": "1920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/MKFlowSocat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93330"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
}
|
import logging
import warnings
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_messaging.notify import notifier
class LogDriver(notifier.Driver):
"Publish notifications via Python logging infrastructure."
# NOTE(dhellmann): For backwards-compatibility with configurations
# that may have modified the settings for this logger using a
# configuration file, we keep the name
# 'oslo.messaging.notification' even though the package is now
# 'oslo_messaging'.
LOGGER_BASE = 'oslo.messaging.notification'
def notify(self, ctxt, message, priority, retry):
logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE,
message['event_type']))
method = getattr(logger, priority.lower(), None)
if method:
method(jsonutils.dumps(strutils.mask_dict_password(message)))
else:
warnings.warn('Unable to log message as notify cannot find a '
'logger with the priority specified '
'%s' % priority.lower())
|
{
"content_hash": "2de6b74775aba531dca2a9a836a01635",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6483812949640287,
"repo_name": "ozamiatin/oslo.messaging",
"id": "7322f07b6ab6bb30416d4d94a72aefe3c57a6f59",
"size": "1781",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "oslo_messaging/notify/_impl_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1221796"
},
{
"name": "Shell",
"bytes": "8290"
}
],
"symlink_target": ""
}
|
"""This module tests the RDFValue implementation for performance."""
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
class StructGrrMessage(rdf_structs.RDFProtoStruct):
"""A serialization agnostic GrrMessage."""
type_description = type_info.TypeDescriptorSet(
type_info.ProtoString(
name="session_id", field_number=1,
description="Every Flow has a unique session id."),
type_info.ProtoUnsignedInteger(
name="request_id", field_number=2,
description="This message is in response to this request number"),
type_info.ProtoUnsignedInteger(
name="response_id", field_number=3,
description="Responses for each request."),
type_info.ProtoString(
name="name", field_number=4,
description=("This is the name of the client action that will be "
"executed. It is set by the flow and is executed by "
"the client.")),
type_info.ProtoBinary(
name="args", field_number=5,
description="This field contains an encoded rdfvalue."),
type_info.ProtoString(
name="source", field_number=6,
description=("Client name where the message came from (This is "
"copied from the MessageList)")),
)
class FastGrrMessageList(rdf_structs.RDFProtoStruct):
"""A Faster implementation of GrrMessageList."""
type_description = type_info.TypeDescriptorSet(
type_info.ProtoList(type_info.ProtoEmbedded(
name="job", field_number=1,
nested=StructGrrMessage))
)
class RDFValueBenchmark(test_lib.AverageMicroBenchmarks):
"""Microbenchmark tests for RDFProtos."""
REPEATS = 1000
units = "us"
USER_ACCOUNT = dict(
username=u"user", full_name=u"John Smith",
comment=u"This is a user", last_logon=10000,
domain=u"Some domain name",
homedir=u"/home/user",
sid=u"some sid")
def testObjectCreation(self):
"""Compare the speed of object creation to raw protobufs."""
test_proto = jobs_pb2.User(**self.USER_ACCOUNT)
test_proto = test_proto.SerializeToString()
def RDFStructCreateAndSerialize():
s = rdf_client.User(**self.USER_ACCOUNT)
s.SerializeToString()
def RDFStructCreateAndSerializeSetValue():
s = rdf_client.User()
for k, v in self.USER_ACCOUNT.iteritems():
setattr(s, k, v)
s.SerializeToString()
def RDFStructCreateAndSerializeFromProto():
s = rdf_client.User(test_proto)
s.SerializeToString()
def ProtoCreateAndSerialize():
s = jobs_pb2.User(**self.USER_ACCOUNT)
s.SerializeToString()
def ProtoCreateAndSerializeSetValue():
s = jobs_pb2.User()
for k, v in self.USER_ACCOUNT.iteritems():
setattr(s, k, v)
s.SerializeToString()
def ProtoCreateAndSerializeFromProto():
s = jobs_pb2.User()
s.ParseFromString(test_proto)
self.assertEqual(s.SerializeToString(), test_proto)
self.TimeIt(RDFStructCreateAndSerialize,
"SProto Create from keywords and serialize.")
self.TimeIt(RDFStructCreateAndSerializeSetValue,
"SProto Create, Set And Serialize")
self.TimeIt(RDFStructCreateAndSerializeFromProto,
"SProto from serialized and serialize.")
self.TimeIt(ProtoCreateAndSerialize,
"Protobuf from keywords and serialize.")
self.TimeIt(ProtoCreateAndSerializeSetValue,
"Protobuf Create, Set and serialize")
self.TimeIt(ProtoCreateAndSerializeFromProto,
"Protobuf from serialized and serialize.")
def testObjectCreation2(self):
def ProtoCreateAndSerialize():
s = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
return len(s.SerializeToString())
def RDFStructCreateAndSerialize():
s = StructGrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
return len(s.SerializeToString())
self.TimeIt(ProtoCreateAndSerialize,
"Protobuf from keywords")
self.TimeIt(RDFStructCreateAndSerialize,
"RDFStruct from keywords")
def testDecodeRepeatedFields(self):
"""Test decoding of repeated fields."""
repeats = self.REPEATS / 50
s = jobs_pb2.MessageList()
for i in range(self.REPEATS):
s.job.add(session_id="test", name="foobar", request_id=i)
test_data = s.SerializeToString()
def ProtoDecode():
s = jobs_pb2.MessageList()
s.ParseFromString(test_data)
self.assertEqual(s.job[100].request_id, 100)
def SProtoDecode():
s = FastGrrMessageList(test_data)
self.assertEqual(s.job[100].request_id, 100)
self.TimeIt(SProtoDecode, "SProto Repeated Decode",
repetitions=repeats)
self.TimeIt(ProtoDecode, "Protobuf Repeated Decode",
repetitions=repeats)
def testRepeatedFields(self):
"""Test serialization and construction of repeated fields."""
repeats = self.REPEATS / 50
def ProtoCreateAndSerialize():
s = jobs_pb2.MessageList()
for i in range(self.REPEATS):
s.job.add(session_id="test", name="foobar", request_id=i)
return len(s.SerializeToString())
def RDFStructCreateAndSerialize():
s = FastGrrMessageList()
for i in range(self.REPEATS):
s.job.Append(session_id="test", name="foobar", request_id=i)
return len(s.SerializeToString())
self.TimeIt(RDFStructCreateAndSerialize, "RDFStruct Repeated Fields",
repetitions=repeats)
self.TimeIt(ProtoCreateAndSerialize, "Protobuf Repeated Fields",
repetitions=repeats)
# Check that we can unserialize a protobuf encoded using the standard
# library.
s = jobs_pb2.MessageList()
for i in range(self.REPEATS):
s.job.add(session_id="test", name="foobar", request_id=i)
serialized = s.SerializeToString()
unserialized = FastGrrMessageList(serialized)
self.assertEqual(len(unserialized.job), len(s.job))
self.assertEqual(unserialized.job[134].session_id, "test")
self.assertEqual(unserialized.job[100].request_id, 100)
def testDecode(self):
"""Test decoding performance."""
s = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
data = s.SerializeToString()
def ProtoDecode():
new_s = jobs_pb2.GrrMessage()
new_s.ParseFromString(data)
self.assertEqual(new_s.session_id, "session")
self.assertEqual(new_s.session_id.__class__, unicode)
def RDFStructDecode():
new_s = StructGrrMessage()
new_s.ParseFromString(data)
self.assertEqual(new_s.session_id, "session")
self.assertEqual(new_s.session_id.__class__, unicode)
self.TimeIt(RDFStructDecode)
self.TimeIt(ProtoDecode)
def testDecode2(self):
"""Test decoding performance.
This benchmarks the lazy decoding feature where a large protobuf is decoded
but only a few fields are examined.
"""
s = jobs_pb2.User(**self.USER_ACCOUNT)
data = s.SerializeToString()
def ProtoDecode():
new_s = jobs_pb2.User()
new_s.ParseFromString(data)
self.assertEqual(new_s.username, "user")
self.assertEqual(new_s.username.__class__, unicode)
def RDFStructDecode():
new_s = rdf_client.User()
new_s.ParseFromString(data)
self.assertEqual(new_s.username, "user")
self.assertEqual(new_s.username.__class__, unicode)
self.TimeIt(RDFStructDecode)
self.TimeIt(ProtoDecode)
def testEncode(self):
"""Comparing encoding speed of a typical protobuf."""
s = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
serialized = s.SerializeToString()
def ProtoEncode():
s1 = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
test = s1.SerializeToString()
self.assertEqual(len(serialized), len(test))
def RDFStructEncode():
s2 = StructGrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
test = s2.SerializeToString()
self.assertEqual(len(serialized), len(test))
self.TimeIt(RDFStructEncode)
self.TimeIt(ProtoEncode)
def testEncodeDecode(self):
"""Test performance of encode/decode cycle."""
def Check(s, new_s):
self.assertEqual(s.name, new_s.name)
self.assertEqual(s.name, u"foo")
self.assertEqual(s.request_id, new_s.request_id)
self.assertEqual(s.request_id, 1)
self.assertEqual(s.response_id, new_s.response_id)
self.assertEqual(s.response_id, 1)
self.assertEqual(s.session_id, new_s.session_id)
self.assertEqual(s.session_id, u"session")
def ProtoEncodeDecode():
s = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
data = s.SerializeToString()
new_s = jobs_pb2.GrrMessage()
new_s.ParseFromString(data)
return s, new_s
def RDFStructEncodeDecode():
s = StructGrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
data = s.SerializeToString()
new_s = StructGrrMessage(initializer=data)
return s, new_s
# Make sure everything is sane first.
Check(*ProtoEncodeDecode())
Check(*RDFStructEncodeDecode())
self.TimeIt(RDFStructEncodeDecode)
self.TimeIt(ProtoEncodeDecode)
def testDecodeEncode(self):
"""Test performance of decode/encode cycle."""
s = jobs_pb2.GrrMessage(name=u"foo", request_id=1, response_id=1,
session_id=u"session")
data = s.SerializeToString()
def ProtoDecodeEncode():
new_s = jobs_pb2.GrrMessage()
new_s.ParseFromString(data)
new_s.SerializeToString()
def RDFStructDecodeEncode():
new_s = StructGrrMessage(initializer=data)
new_s.SerializeToString()
self.TimeIt(RDFStructDecodeEncode)
self.TimeIt(ProtoDecodeEncode)
|
{
"content_hash": "ae5a8947277db2ea4a0c78b612eebb93",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 79,
"avg_line_length": 30.783382789317507,
"alnum_prop": 0.650472334682861,
"repo_name": "pchaigno/grr",
"id": "fc9bc5ca348bcc3c102cc53b69a5a07a7461054d",
"size": "10396",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/rdfvalues/benchmark_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "276081"
},
{
"name": "CMake",
"bytes": "3044"
},
{
"name": "CSS",
"bytes": "12677"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "71587"
},
{
"name": "JavaScript",
"bytes": "228300"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "197889"
},
{
"name": "Python",
"bytes": "5172085"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43112"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photogram.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "17db95cb7af03d3f4233170f6a6642a4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.38095238095238,
"alnum_prop": 0.621656050955414,
"repo_name": "KirovVerst/photogram",
"id": "051f788c9085914b950530a0a6b4f2166788eeb0",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "221"
},
{
"name": "JavaScript",
"bytes": "1197"
},
{
"name": "Python",
"bytes": "11009"
}
],
"symlink_target": ""
}
|
from crawler import Crawler
class MSDManualCrawler(Crawler):
def get_p(self, url):
if 'msdmanual.pl' in url:
return 1
return 0
def filename_from_url(self, url):
return url.split('m=')[1] +'.txt'
if __name__ == '__main__':
crawler = MSDManualCrawler('http://www.msd-manual.de/msdmanual/htbin/msdmanual.pl?m=0-0')
crawler.crawl()
|
{
"content_hash": "a4f2ca0cc012b3582faa79346aea0888",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 93,
"avg_line_length": 25.866666666666667,
"alnum_prop": 0.6005154639175257,
"repo_name": "eonum/medtextcollector",
"id": "084fbcce62236e67f2d6669b3079972a24cd99d9",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msdmanual_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "963789"
},
{
"name": "Python",
"bytes": "38437"
},
{
"name": "Ruby",
"bytes": "4674"
}
],
"symlink_target": ""
}
|
"""
idflow.Docker
"""
from __future__ import print_function
import os
import json
import getpass
class Docker:
@staticmethod
def __print_line(line):
"""
"""
try:
line = line.decode('utf-8')
except:
print("Could not decode line")
return
try:
line = json.loads(line)
except:
print(" {0}".format(line), flush=True)
return
if "stream" in line:
print(" {0}".format(line["stream"]), end="", flush=True)
pass
elif "status" in line:
if line["status"] == "Downloading" or line["status"] == "Extracting":
return
o = line["status"]
if "progress" in line:
o += " " + line["progress"]
if "id" in line:
o = line["id"] + " " + o
print(" {0}".format(o), flush=True)
@staticmethod
def build(cli, dockerfile, tag):
print("#")
print("# Building Docker image from '{0}' with tag '{1}'".format(
dockerfile, tag))
print("#")
for line in cli.build(
dockerfile=dockerfile,
pull=True,
path=".",
rm=True,
tag=tag):
Docker.__print_line(line)
print()
@staticmethod
def execute(cli, container_id, cmd):
print("#")
print("# Executing on {1}: {0}".format(cmd, container_id))
print("#")
execute = cli.exec_create(
container=container_id,
cmd=cmd
# user='root' if CI else 'app'
)
for line in cli.exec_start(
exec_id=execute.get('Id'),
stream=True):
Docker.__print_line(line)
print()
inspect = cli.exec_inspect(execute.get('Id'))
exit_code = inspect.get('ExitCode')
if exit_code != 0:
cli.stop(container_id)
cli.remove_container(container_id)
raise Exception("Exit Code: {0}\n{1}".format(exit_code, inspect))
@staticmethod
def clean(cli, objs):
print("#")
print("# Cleaning files & directories: {0}".format(objs))
print("#")
cli.pull("alpine:latest")
container = cli.create_container(
image='alpine:latest',
volumes=[
'{0}:/app'.format(os.getcwd())
],
working_dir='/app',
host_config=cli.create_host_config(binds=[
'{0}:/app'.format(os.getcwd())
]),
command='/bin/sh -c "rm -rf {0}"'.format(" ".join(objs))
)
# print('/bin/sh -c "rm -rf {0}"'.format(" ".join(objs)))
response = cli.start(container=container.get('Id'))
cli.wait(container=container.get('Id'), timeout=600)
print(response)
cli.remove_container(container.get('Id'))
print()
@staticmethod
def push(cli, tags):
"""
"""
for tag in tags:
print("#")
print("# Pushing {0} to Registry".format(tag))
print("#")
for line in cli.push(tag, stream=True):
Docker.__print_line(line)
print()
@staticmethod
def login(cli):
"""
"""
if os.getenv('DOCKER_EMAIL') and os.getenv('DOCKER_USERNAME') and os.getenv('DOCKER_PASSWORD'):
email = os.getenv('DOCKER_EMAIL')
username = os.getenv('DOCKER_USERNAME')
password = os.getenv('DOCKER_PASSWORD')
else:
email = input('Docker email:')
username = input('Docker username:')
password = getpass.getpass('Docker password:')
cli.login(
username=username,
email=email,
password=password,
registry='https://index.docker.io/v1/'
)
print()
return cli, username
@staticmethod
def run(
cli,
tag,
command,
volumes=[],
working_dir="",
environment={}):
"""
"""
print("#")
print("# Running on {1}: {0}".format(command, tag))
print("#")
params = dict()
params['image'] = tag
params['command'] = command
if len(volumes) > 0:
params['volumes'] = volumes
params['host_config'] = cli.create_host_config(binds=volumes)
if working_dir != "":
params['working_dir'] = working_dir
if environment:
params['environment'] = environment
container = cli.create_container(**params)
cli.start(container.get('Id'))
for line in cli.attach(container=container.get('Id'), stream=True, logs=True):
Docker.__print_line(line)
exit_code = cli.wait(container=container.get('Id'))
if exit_code != 0:
raise Exception("Exit Code: {0}".format(exit_code))
|
{
"content_hash": "014e802d7884718d37f2b529f5e0d10b",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 103,
"avg_line_length": 28.443181818181817,
"alnum_prop": 0.4876148621654015,
"repo_name": "VJftw/invoke-tools",
"id": "a564ece16c1aa4f5ece2613e8259501e8ca52ba4",
"size": "5006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idflow/docker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38314"
},
{
"name": "Shell",
"bytes": "621"
}
],
"symlink_target": ""
}
|
"""
Chat object providing simple forum representation.
@author: Milos Prchlik
@contact: U{happz@happz.cz}
@license: DPL (U{http://www.php-suit.com/dpl})
"""
import time
import hlib.api
import hlib.events
import hlib.format
import hlib.pageable
import hlib.database
# Validators
from hlib.input import validator_factory, CommonString, MaxLength
# pylint: disable-msg=F0401
import hruntime # @UnresolvedImport
ValidateChatPost = validator_factory(CommonString(), MaxLength(65535))
class ChatPost(hlib.database.DBObject):
PACK_OBJECTS = ['user', 'stamp', 'message']
def __init__(self, entity, user, stamp, message):
hlib.database.DBObject.__init__(self)
self.id = None
self.entity = entity
self.user = user
self.stamp = stamp
self.message = message
self._v_formatted = None
@property
def formatted(self):
if not hasattr(self, '_v_formatted') or self._v_formatted == None:
self._v_formatted = hlib.format.tagize(self.message)
return self._v_formatted
def to_api(self):
return {
'id': self.id,
'user': hlib.api.User(self.user),
'stamp': self.stamp,
'time': time.strftime(self.user.date_format, time.localtime(self.stamp)),
'message': self.formatted,
'raw_message': self.message
}
class ChatPosts(hlib.database.IndexedMapping):
def __getattr__(self, name):
if name == 'total':
return len(self)
if name == 'length':
return len(self)
return hlib.database.IndexedMapping.__getattr__(self, name)
def get_posts(self, start, length):
if len(self) <= 0:
return []
l = len(self)
_start = l - start
_end = l - start - length
if start != 0:
_start -= 1
return self.values(min = _end, max = _start)
class ChatPager(hlib.pageable.Pageable):
def __init__(self, entity, accessed_by):
super(ChatPager, self).__init__(default_length = 20)
self._entity = entity
self._accessed_by = accessed_by
# pylint: disable-msg=W0212
entity = property(lambda self: self._entity and self._entity or hruntime.dbroot.server)
accessed_by = property(lambda self: self._accessed_by if self._accessed_by != None else hruntime.user)
unread = property(lambda self: 0 if self.total == 0 else max(list(self.entity.chat_posts.keys())) - self.accessed_by.last_board)
total = property(lambda self: len(self.entity.chat_posts))
length = property(lambda self: self.total)
def trigger_event(self):
pass
def add(self, text = None):
text = text or ''
cp = ChatPost(self.entity, hruntime.user, hruntime.time, text)
self.entity.chat_posts.push(cp)
self.trigger_event()
def update_last_access(self, new_last):
self.accessed_by.last_board = new_last
# Pageable interface implementation
def get_records(self, start, length):
records = self.entity.chat_posts.get_posts(start, length)
records = [cp for cp in reversed(records)]
last_access = None
if len(records) > 0 and self.accessed_by.last_board < records[0].id:
last_access = records[0].id
return (records, self.length, last_access)
class ChatPagerGame(ChatPager):
def __init__(self, game, accessed_by = None):
accessed_by = accessed_by or game.my_player
super(ChatPagerGame, self).__init__(game, accessed_by)
def update_last_access(self, new_last):
super(ChatPagerGame, self).update_last_access(new_last)
hruntime.cache.remove_for_users([p.user for p in self.accessed_by.game.players.values()], 'recent_events')
def trigger_event(self):
hlib.events.trigger('game.ChatPost', self.entity, hidden = True, user = hruntime.user, game = self.entity)
class ChatPagerTournament(ChatPager):
def __init__(self, tour, accessed_by = None):
accessed_by = accessed_by or tour.my_player
super(ChatPagerTournament, self).__init__(tour, accessed_by)
def update_last_access(self, new_last):
super(ChatPagerTournament, self).update_last_access(new_last)
hruntime.cache.remove_for_users([p.user for p in self.accessed_by.tournament.players.values()], 'recent_events')
def trigger_event(self):
hlib.events.trigger('tournament.ChatPost', self.entity, hidden = True, user = hruntime.user, tournament = self.entity)
class ChatPagerGlobal(ChatPager):
def __init__(self):
super(ChatPagerGlobal, self).__init__(None, None)
def trigger_event(self):
hlib.events.trigger('system.ChatPost', self.entity, hidden = True, user = hruntime.user)
|
{
"content_hash": "30488199bf4df1738883f16c906dbd37",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 131,
"avg_line_length": 30.09271523178808,
"alnum_prop": 0.670774647887324,
"repo_name": "happz/settlers",
"id": "c6b1e37a1c69277ded51e7ed71fd81fdd268063d",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/chat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75190"
},
{
"name": "CoffeeScript",
"bytes": "111627"
},
{
"name": "Inno Setup",
"bytes": "3439"
},
{
"name": "JavaScript",
"bytes": "30274"
},
{
"name": "Makefile",
"bytes": "5003"
},
{
"name": "Mako",
"bytes": "66564"
},
{
"name": "Python",
"bytes": "278777"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
}
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from smtplib import SMTP
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("skyline.analyzer.alerts")
"""
Create any alerter you want here. The function will be invoked from trigger_alert.
Two arguments will be passed, both of them tuples: alert and metric.
metric: the anomalous metric
datapoint: the anomalous datapoint (timestamp, value)
ensemble: the ensemble result dictionary
args: alert specific settings (e.g. notification recipient)
settings: general alert settings (e.g. authentication information)
"""
# This specifies the mailserver to connect to.
# If user or password are blank no authentication is used.
def alert_smtp(metric, datapoint, ensemble, args, settings):
# Connect to the mail server
conn = SMTP(settings["host"])
user = settings.get("user")
password = settings.get("password")
if user and password:
conn.login(user, password)
sender = settings['sender']
recipients = args['recipients']
for recipient in recipients:
msg = MIMEMultipart('alternative')
msg['Subject'] = '[Skyline] {0}'.format(metric)
msg['From'] = sender
msg['To'] = recipient
link = "" # settings.GRAPH_URL.format(metric)
# body = 'Anomalous metric: {0} (datapoint: {1})<br><a href="{2}"><img src="{2}"/></a>'.format(metric, datapoint, link)
body = 'Anomalous metric: {0} (datapoint: {1})'.format(metric, datapoint)
msg.attach(MIMEText(body, 'html'))
conn.sendmail(sender, recipients, msg.as_string())
conn.quit()
def alert_pagerduty(metric, datapoint, ensemble, args, settings):
import pygerduty
pager = pygerduty.PagerDuty(settings['subdomain'], settings['auth_token'])
pager.trigger_incident(settings['key'], "Anomalous metric: {0} (datapoint: {1})".format(metric, datapoint))
def alert_hipchat(metric, datapoint, ensemble, args, settings):
import hipchat
hipster = hipchat.HipChat(token=settings['auth_token'])
rooms = args['rooms']
link = "" # settings.GRAPH_URL.format(metric)
# body = 'Anomalous metric: {0} (datapoint: {1})<br><a href="{2}"><img src="{2}"/></a>'.format(metric, datapoint, link)
body = 'Anomalous metric: {0} (datapoint: {1})'.format(metric, datapoint)
for room in rooms:
hipster.method('rooms/message', method='POST', parameters={'room_id': room,
'from': 'Skyline',
'color': settings.get('color', 'red'),
'message': body})
def alert_stdout(metric, datapoint, ensemble, args, settings):
logger.warning("metric={0} datapoint={1}".format(metric, datapoint))
def alert_syslog(metric, datapoint, ensemble, args, settings):
import syslog
syslog.openlog("skyline", syslog.LOG_PID, syslog.LOG_LOCAL4)
syslog.syslog(syslog.LOG_LOCAL4, str("metric={0} datapoint={1}".format(metric, datapoint)))
|
{
"content_hash": "d306c77da3fccf1923822dd8e3bc6de0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 127,
"avg_line_length": 42.06666666666667,
"alnum_prop": 0.6481774960380349,
"repo_name": "klynch/skyline",
"id": "16d6a8c6c2790e254d2ab5a7640404d6db3896eb",
"size": "3155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skyline/analyzer/alerts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3890"
},
{
"name": "HTML",
"bytes": "1547"
},
{
"name": "JavaScript",
"bytes": "6108"
},
{
"name": "Python",
"bytes": "59605"
}
],
"symlink_target": ""
}
|
import calendar
import datetime
import time
import requests
import uuid
import random
from django.conf import settings
import simplejson as json
from dateutil import parser
ACTIVITY_EXPIRES = 5
def get_rdio_user_data(rdio_user_key):
response = requests.post('https://services.rdio.com/api/1/get', {
'keys': rdio_user_key,
'method': 'get',
'access_token': settings.RDIO_ACCESS_TOKEN
})
return json.loads(response.text)['result'][rdio_user_key]
def get_rdio_track_data(rdio_track_key):
response = requests.post('https://services.rdio.com/api/1/get', {
'keys': rdio_track_key,
'method': 'get',
'access_token': settings.RDIO_ACCESS_TOKEN
})
return json.loads(response.text)['result'][rdio_track_key]
class Party(object):
def __init__(self):
self.id = None
self.name = "unnamed"
self.playing_track_key = None
self.playing_track_start_time = datetime.datetime.utcnow()
self.playing_track_user_key = None
self.theme = 'Click me to change the theme!'
self._users = {}
self.queue = []
self.skippers = set()
self.messages = []
def add_message(self, message):
self.messages.append(message)
def active_users(self):
return [user for user in self._users.values() if user.is_active(self.id)]
def get_player_state_payload(self):
return {
'type': 'player',
'data': {
'playing_track_key': self.playing_track_key,
'playing_track_position': self.current_track_position,
'playing_track_user_key': self.playing_track_user_key
}
}
def get_queue_state_payload(self):
return {
'type': 'queue',
'data': self.queue_to_dict()
}
def get_user_list_state_payload(self):
return {
'type': 'user_list',
'data': self.users_to_dict()
}
def get_messages_state_payload(self, redis):
recent_messages = Message.get_recent(redis, self.id)
return {
'type': 'messages',
'data': [
message.to_dict() for message in recent_messages
]
}
def get_message_added_payload(self, message):
return {
'type': 'message_added',
'data': message.to_dict()
}
def get_theme_state_payload(self):
return {
'type': 'theme',
'data': self.theme_to_dict()
}
def theme_to_dict(self):
return {
'theme': self.theme
}
def broadcast_player_state(self, connection):
connection.publish('sutrofm:broadcast:parties:%s' % self.id, json.dumps(self.get_player_state_payload()))
def broadcast_queue_state(self, connection):
connection.publish('sutrofm:broadcast:parties:%s' % self.id, json.dumps(self.get_queue_state_payload()))
def broadcast_user_list_state(self, connection):
connection.publish('sutrofm:broadcast:parties:%s' % self.id, json.dumps(self.get_user_list_state_payload()))
def broadcast_messages_state(self, connection):
connection.publish(
'sutrofm:broadcast:parties:%s' % self.id,
json.dumps(self.get_messages_state_payload(connection))
)
def broadcast_message_added(self, connection, message):
connection.publish('sutrofm:broadcast:parties:%s' % self.id, json.dumps(self.get_message_added_payload(message)))
def broadcast_theme_state(self, connection):
connection.publish('sutrofm:broadcast:parties:%s' % self.id, json.dumps(self.get_theme_state_payload()))
@property
def current_track_position(self):
return (datetime.datetime.utcnow() - self.playing_track_start_time).seconds
def play_track(self, track_key, user):
self.playing_track_key = track_key
self.playing_track_start_time = datetime.datetime.utcnow()
def skip_stop(self):
self.playing_track_key = None
self.playing_track_start_time = datetime.datetime.utcnow()
self.playing_track_user_key = None
def play_next_track(self):
""" Dequeue the next song and play it """
next_track_entry = self.dequeue_next_song()
if next_track_entry:
self.play_track(next_track_entry.track_key, next_track_entry.submitter)
else:
self.skip_stop()
self.clear_skippers()
def clear_skippers(self):
"""GILLIGANNNNNN!!!"""
self.skippers = set()
def vote_to_skip(self, user):
self.skippers.add(user.id)
def should_skip(self):
return len(self.skippers) > (len(self.active_users()) / 2)
@staticmethod
def get(connection, id):
data = connection.hgetall('parties:%s' % id)
if data:
output = Party()
output.id = id
output.name = data.get('name', 'No name')
output.playing_track_key = data.get('playing_track_key', None)
output.playing_track_start_time = parser.parse(
data.get('playing_track_start_time', datetime.datetime.utcnow().isoformat()))
output.playing_track_user_key = data.get('playing_track_user_key', None)
# Get users
user_keys = connection.smembers('parties:%s:users' % id)
output._users = {
key: User.get(connection, key) for key in user_keys
}
# Get queue
queue_keys = connection.smembers('parties:%s:queue' % id)
output.queue = [QueueEntry.get(connection, id, key) for key in queue_keys if key]
# Get skippers
skippers = data.get('skippers', None)
output.skippers = set(skippers.split(',') if skippers else [])
# Get theme
output.theme = data.get('theme', '')
return output
else:
return None
@staticmethod
def getall(connection):
ids = connection.smembers('parties')
return [
Party.get(connection, i) for i in ids
]
def save(self, connection):
if not self.id:
self.id = uuid.uuid4().hex
connection.hmset("parties:%s" % self.id, {
"name": self.name,
"playing_track_key": self.playing_track_key or '',
"playing_track_start_time": self.playing_track_start_time,
"playing_track_user_key": self.playing_track_user_key,
"skippers": ",".join(self.skippers),
"theme": self.theme,
})
# Save users
def _save_users(pipe):
old_users = pipe.smembers('parties:%s:users' % self.id)
for old_user_id in old_users:
if old_user_id not in self._users:
pipe.srem('parties:%s:users' % self.id, old_user_id)
for user_id in self._users:
pipe.sadd('parties:%s:users' % self.id, user_id)
# Save queue
def _save_queue(pipe):
old_queue_entries = pipe.smembers('parties:%s:queue' % self.id)
for old_queue_entry_id in old_queue_entries:
if old_queue_entry_id not in self.queue:
pipe.srem('parties:%s:queue' % self.id, old_queue_entry_id)
for queue_entry in self.queue:
queue_entry.save(pipe)
pipe.sadd('parties:%s:queue' % self.id, queue_entry.id)
connection.transaction(_save_users, 'parties:%s:users' % self.id)
connection.transaction(_save_queue, 'parties:%s:queue' % self.id)
connection.sadd('parties', self.id)
def add_user(self, connection, user):
should_save = user.id not in self._users
self._users[user.id] = user
user.visit_party(self.id)
if should_save:
self.save(connection)
def enqueue_song(self, user, track_key):
qe = QueueEntry()
qe.track_key = track_key
qe.submitter = user
qe.party_id = self.id
# Assume the queueing user wants to upvote their own song
qe.upvote(user)
self.queue.append(qe)
return qe
def remove_queue_entry(self, queue_entry):
self.queue.remove(queue_entry)
def dequeue_next_song(self):
if self.queue:
self.queue.sort(reverse=True)
return self.queue.pop()
else:
return None
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"people": [user.to_dict() for user in self._users.values()],
"player": {
"playingTrack": {
"trackKey": self.playing_track_key
}
}
}
def to_json(self):
return json.dumps(self.to_dict())
def get_queue_entry(self, queue_entry_id):
for queue_entry in self.queue:
if queue_entry.id == queue_entry_id:
return queue_entry
return None
def queue_to_dict(self):
return [
{
'queue_entry_id': entry.id,
'track_key': entry.track_key,
'submitter': entry.submitter.to_dict(),
'upvotes': list(entry.upvotes),
'downvotes': list(entry.downvotes),
'timestamp': entry.timestamp.isoformat(),
} for entry in self.queue
]
def users_to_dict(self):
return [
user.to_dict() for user in self._users.values()
]
def messages_to_dict(self):
return [
m.to_dict() for m in self.messages
]
class QueueEntry(object):
def __init__(self):
self.id = None
self.upvotes = set() # Set of user ids
self.downvotes = set() # Set of user ids
self.track_key = ''
self.submitter = None
self.timestamp = datetime.datetime.utcnow()
self.party_id = ''
@staticmethod
def get(connection, party_id, id):
data = connection.hgetall('parties:%s:queue:%s' % (party_id, id))
if data:
output = QueueEntry()
output.id = id
output.party_id = party_id
output.track_key = data.get('track_key', '')
output.submitter = User.get(connection, data.get('submitter', ''))
output.upvotes = data.get('upvotes', '').split(",")
output.downvotes = data.get('downvotes', '').split(",")
# Filter out empty strings
output.upvotes = set(x for x in output.upvotes if x)
output.downvotes = set(x for x in output.downvotes if x)
output.timestamp = parser.parse(data.get('timestamp', datetime.datetime.utcnow().isoformat()))
return output
else:
return None
def save(self, connection):
if not self.id:
self.id = uuid.uuid4().hex
connection.hmset('parties:%s:queue:%s' % (self.party_id, self.id), {
'track_key': self.track_key,
'submitter': self.submitter.id,
'upvotes': ",".join((str(x) for x in self.upvotes if x)),
'downvotes': ",".join((str(x) for x in self.downvotes if x)),
'timestamp': self.timestamp.isoformat()
})
@property
def score(self):
return len(self.upvotes) - len(self.downvotes)
def upvote(self, user):
if user.id in self.downvotes:
self.downvotes.remove(user.id)
self.upvotes.add(user.id)
def downvote(self, user):
if user.id in self.upvotes:
self.upvotes.remove(user.id)
self.downvotes.add(user.id)
def __cmp__(self, other):
if isinstance(other, QueueEntry):
if other.score == self.score:
return cmp(self.timestamp, other.timestamp)
return cmp(other.score, self.score)
else:
return -1
def to_dict(self):
queue_dict = {
'track_key': self.track_key,
'submitter': self.submitter.id,
'upvotes': ",".join(self.upvotes),
'downvotes': ",".join(self.downvotes),
'timestamp': self.timestamp.isoformat()
}
return queue_dict
def to_json(self):
return json.dumps(self.to_dict())
class User(object):
def __init__(self):
self.id = None
self.display_name = None
self.icon_url = None
self.user_url = None
self.last_check_in = None
self.party_id = None
@property
def active(self):
return datetime.datetime.utcnow() - self.last_check_in > datetime.timedelta(minutes=5)
@staticmethod
def get(connection, id):
data = connection.hgetall('users:%s' % id)
if data:
output = User()
output.id = id
output.display_name = data.get('display_name', '')
output.icon_url = data.get('icon', '')
output.user_url = data.get('user_url', '')
output.last_check_in = parser.parse(data.get('last_check_in', datetime.datetime.utcnow().isoformat()))
output.party_id = data.get('party_id', '')
return output
else:
return None
@staticmethod
def getall(connection):
ids = connection.smembers('users')
return [
User.get(connection, i) for i in ids
]
@staticmethod
def from_request(connection, request):
uuid = request.session.get('uuid')
user = User.get(connection, uuid)
if not user:
user = User()
user.id = uuid
user.last_check_in = datetime.datetime.utcnow()
icons = [
'/static/img/icons/husky.jpeg',
'/static/img/icons/raccoon.jpeg',
'/static/img/icons/glasses_cat.jpeg',
'/static/img/icons/shepherd.jpeg',
'/static/img/icons/rhino.jpeg',
]
user.icon_url = random.choice(icons)
user.display_name = request.session.get('display_name')
user.save(connection)
return user
def checked_in_recently(self):
return datetime.datetime.utcnow() - self.last_check_in <= datetime.timedelta(seconds=ACTIVITY_EXPIRES)
def is_active(self, party_id):
return self.party_id == party_id and self.checked_in_recently()
def visit_party(self, party_id):
self.party_id = party_id
self.last_check_in = datetime.datetime.utcnow()
def save(self, connection):
if not self.id:
self.id = connection.scard('users') + 1
connection.hmset("users:%s" % self.id, {
"display_name": self.display_name,
"icon": self.icon_url,
"user_url": self.user_url,
"last_check_in": self.last_check_in,
"party_id": self.party_id
})
connection.sadd('users', self.id)
def to_dict(self):
return {
"id": self.id,
"display_name": self.display_name,
"icon": self.icon_url,
"user_url": self.user_url,
"last_check_in": self.last_check_in.isoformat(),
"is_active": self.is_active(self.party_id),
"party_id": self.party_id
}
def to_json(self):
return json.dumps(self.to_dict())
class Message(object):
def __init__(self):
self.message_type = None
self.timestamp = datetime.datetime.utcnow()
# For type == 'chat'
self.user_id = None
self.text = None
# For type == 'new_track'
self.track_key = None
self.track_title = None
self.track_artist = None
self.track_url = None
self.icon_url = None
@staticmethod
def get_recent(connection, party_id, count=50):
message_ids = connection.zrange('parties:%s:messages' % party_id, -count, -1)
messages = [
Message.get(connection, party_id, message_id) for message_id in message_ids
]
return messages
@staticmethod
def make_now_playing_message(connection, party, track_key):
output = Message.for_party(connection, party)
output.message_type = 'new_track'
output.track_key = track_key
if track_key:
track_info = get_rdio_track_data(track_key)
output.track_title = track_info['name']
output.track_artist = track_info['artist']
output.track_url = 'http://rdio.com%s' % track_info['url']
output.icon_url = track_info['dynamicIcon']
return output
@staticmethod
def for_party(connection, party):
m = Message()
m.id = Message.get_next_message_id(connection, party)
m.party_id = party.id
return m
@staticmethod
def get_next_message_id(connection, party):
return connection.incr('parties:%s:message_id' % party.id)
@staticmethod
def get(connection, party_id, message_id):
schema = {
'message_type': None,
'text': None,
'user_id': None,
'track': None,
'track_key': None,
'track_title': None,
'track_artist': None,
'track_url': None,
'icon_url': None,
'timestamp': None,
}
data = {}
values = connection.hmget('parties:%s:messages:%s' % (party_id, message_id), schema.keys())
output = Message()
output.id = message_id
for index, key in enumerate(schema.keys()):
data[key] = values[index]
setattr(output, key, values[index])
output.timestamp = parser.parse(data['timestamp']) if data['timestamp'] else datetime.datetime.utcnow()
return output
def save(self, connection):
redis_dict = {k: v for k, v in self.to_dict().iteritems() if v is not None}
# redis hmsets None as the string 'None', so delete those fields.
delete_fields = [k for k, v in self.to_dict().iteritems() if v is None]
connection.hdel('parties:%s:messages:%s' % (self.party_id, self.id), delete_fields)
connection.hmset('parties:%s:messages:%s' % (self.party_id, self.id), redis_dict)
connection.zadd('parties:%s:messages' % self.party_id, calendar.timegm(time.gmtime()), self.id)
def to_dict(self):
data = {
'message_type': self.message_type,
'timestamp': self.timestamp.isoformat(),
}
if (self.message_type == "chat"):
data.update({
'text': self.text,
'user_id': self.user_id,
})
elif (self.message_type == "new_track"):
data.update({
'track_key': self.track_key,
'track_title': self.track_title,
'track_artist': self.track_artist,
'track_url': self.track_url,
'icon_url': self.icon_url
})
return data
def to_json(self):
return json.dumps(self.to_dict())
|
{
"content_hash": "8fa88fbd3944fb6dc8e3a6ac3c263171",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 117,
"avg_line_length": 29.395147313691506,
"alnum_prop": 0.6275573374211426,
"repo_name": "mkapolka/rdiodj",
"id": "7574b1f720eb03aad9dcc340553934fe28ed20d7",
"size": "16961",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sutrofm/redis_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11614"
},
{
"name": "HTML",
"bytes": "19739"
},
{
"name": "JavaScript",
"bytes": "59301"
},
{
"name": "Python",
"bytes": "51584"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ExtcommentsConfig(AppConfig):
name = "extcomments"
|
{
"content_hash": "dac4fda88bcdf16144865c0b466abc3c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 19.4,
"alnum_prop": 0.7731958762886598,
"repo_name": "pbanaszkiewicz/amy",
"id": "e02c2aba8a8c5b3c14e32440cc5c73c71acb55ed",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amy/extcomments/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
}
|
import os
import sys
import warnings
from itertools import chain
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
sys.path.insert(0, './wlauto/core/')
from version import get_wa_version
# happends if falling back to distutils
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
try:
os.remove('MANIFEST')
except OSError:
pass
packages = []
data_files = {}
source_dir = os.path.dirname(__file__)
for root, dirs, files in os.walk('wlauto'):
rel_dir = os.path.relpath(root, source_dir)
data = []
if '__init__.py' in files:
for f in files:
if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
data.append(f)
package_name = rel_dir.replace(os.sep, '.')
package_dir = root
packages.append(package_name)
data_files[package_name] = data
else:
# use previous package name
filepaths = [os.path.join(root, f) for f in files]
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
params = dict(
name='wlauto',
description='A framework for automating workload execution and measurment collection on ARM devices.',
version=get_wa_version(),
packages=packages,
package_data=data_files,
scripts=scripts,
url='N/A',
license='Apache v2',
maintainer='ARM Architecture & Technology Device Lab',
maintainer_email='workload-automation@arm.com',
install_requires=[
'python-dateutil', # converting between UTC and local time.
'pexpect>=3.3', # Send/recieve to/from device
'pyserial', # Serial port interface
'colorama', # Printing with colors
'pyYAML', # YAML-formatted agenda parsing
],
extras_require={
'other': ['jinja2', 'pandas>=0.13.1'],
'test': ['nose'],
'mongodb': ['pymongo'],
'doc': ['sphinx'],
},
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
)
all_extras = list(chain(params['extras_require'].itervalues()))
params['extras_require']['everything'] = all_extras
setup(**params)
|
{
"content_hash": "2402c0a655ba30e4562920a8aab131b3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 106,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6350877192982456,
"repo_name": "rockyzhang/workload-automation",
"id": "4eb13f9893d8445bf7c7ec5a28d8da334613627b",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35633"
},
{
"name": "HTML",
"bytes": "8402"
},
{
"name": "Java",
"bytes": "91333"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Prolog",
"bytes": "31390"
},
{
"name": "Python",
"bytes": "968893"
},
{
"name": "Shell",
"bytes": "23204"
},
{
"name": "VimL",
"bytes": "901"
}
],
"symlink_target": ""
}
|
"""Local file storage."""
import errno
import hashlib
import os
import shutil
import jinja2
from grow.storage import base_storage
class FileStorage(base_storage.BaseStorage):
@staticmethod
def open(filename, mode=None):
if mode is None:
mode = 'r'
return open(filename, mode=mode)
@staticmethod
def read(filename):
fp = open(filename)
content = fp.read()
fp.close()
return content
@staticmethod
def modified(filename):
return os.stat(filename).st_mtime
@staticmethod
def size(filename):
return os.path.getsize(filename)
@staticmethod
def stat(filename):
return os.stat(filename)
@staticmethod
def hash(filename):
hash_digest = hashlib.sha256()
with open(filename, "rb") as source_file:
for chunk in iter(lambda: source_file.read(4096), b""):
hash_digest.update(chunk)
return hash_digest.hexdigest()
@staticmethod
def listdir(dirpath, recursive=True):
paths = []
for root, _, files in os.walk(dirpath, topdown=True, followlinks=True):
for filename in files:
path = os.path.join(root, filename)[len(dirpath):]
paths.append(path)
# if not recursive, break after walking top-level dir
if not recursive:
break
return paths
@staticmethod
def walk(dirpath):
return os.walk(dirpath, followlinks=True)
@staticmethod
def JinjaLoader(path):
return jinja2.FileSystemLoader(path)
@classmethod
def write(cls, path, content):
dirname = os.path.dirname(path)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dirname):
pass
else:
raise
with cls.open(path, mode='w') as fp:
fp.write(content)
@staticmethod
def exists(filename):
return os.path.exists(filename)
@staticmethod
def delete(filename):
return os.remove(filename)
@staticmethod
def delete_dir(dirpath):
shutil.rmtree(dirpath)
@staticmethod
def delete_files(dirpaths, recursive=False, pattern=None):
"""Delete files from within the dirpaths that match a pattern."""
for dirpath in dirpaths:
for root, _, files in os.walk(dirpath, followlinks=True):
for filename in files:
if not pattern or pattern.search(filename):
os.remove(os.path.join(root, filename))
if not recursive:
break
@staticmethod
def copy_to(paths, target_paths):
# TODO(jeremydw): Rename to bulk_copy_to.
for i, path in enumerate(paths):
target_path = target_paths[i]
shutil.copyfile(path, target_path)
shutil.copystat(path, target_path)
@staticmethod
def move_to(path, target_path):
os.rename(path, target_path)
|
{
"content_hash": "4a4eb1005dc92f078d121689410c7fb6",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 27.81081081081081,
"alnum_prop": 0.5892452218982831,
"repo_name": "grow/pygrow",
"id": "217450307df80ba91346dc6d479eb31250aadf74",
"size": "3087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/storage/file_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
}
|
import os
import sys
from tools import impl
parser = impl.argparser()
parser.add_argument("-o", dest="output", action=impl.StripQuotesAction)
parser.add_argument("-test-arg", action=impl.StripQuotesAction)
(options, args) = parser.parse_known_args()
assert os.path.exists(options.test_arg), options.test_arg
# ranlib may have hid the archive next to what buck thinks the archive is
input = args[-1] + ".secret"
if not os.path.exists(input):
input = args[-1]
with open(options.output, "w") as output:
output.write("linker:\n")
with open(input) as inputfile:
output.write(inputfile.read())
sys.exit(0)
|
{
"content_hash": "03cb16c4367c21a0b3e80ce78bed3b6d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7111111111111111,
"repo_name": "kageiit/buck",
"id": "963b2a50726f9ab101e461583f22d8a407c73e40",
"size": "654",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/com/facebook/buck/android/testdata/ndk_toolchain/ndk_toolchain/ndk_linker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1692"
},
{
"name": "C",
"bytes": "250514"
},
{
"name": "CSS",
"bytes": "56119"
},
{
"name": "Dockerfile",
"bytes": "2094"
},
{
"name": "HTML",
"bytes": "11770"
},
{
"name": "Java",
"bytes": "33114896"
},
{
"name": "JavaScript",
"bytes": "931240"
},
{
"name": "Kotlin",
"bytes": "310039"
},
{
"name": "Lex",
"bytes": "14469"
},
{
"name": "Makefile",
"bytes": "1704"
},
{
"name": "PowerShell",
"bytes": "2154"
},
{
"name": "Python",
"bytes": "2152087"
},
{
"name": "Shell",
"bytes": "43626"
},
{
"name": "Smalltalk",
"bytes": "194"
},
{
"name": "Thrift",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
"""
Auto-generated class for Cluster
"""
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .StorageServer import StorageServer
from . import client_support
class Cluster(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(clusterType, driveType, label, nodes, status, storageServers):
"""
:type clusterType: str
:type driveType: EnumClusterDriveType
:type label: str
:type nodes: list[str]
:type status: EnumClusterStatus
:type storageServers: list[StorageServer]
:rtype: Cluster
"""
return Cluster(
clusterType=clusterType,
driveType=driveType,
label=label,
nodes=nodes,
status=status,
storageServers=storageServers,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Cluster'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'clusterType'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.clusterType = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'driveType'
val = data.get(property_name)
if val is not None:
datatypes = [EnumClusterDriveType]
try:
self.driveType = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'label'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.label = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nodes'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.nodes = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [EnumClusterStatus]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'storageServers'
val = data.get(property_name)
if val is not None:
datatypes = [StorageServer]
try:
self.storageServers = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
|
{
"content_hash": "0a1f6764127a4446a3d161800d67737a",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 107,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.6013824884792627,
"repo_name": "g8os/grid",
"id": "74c5276d2c3d7f59ca4e38c6637ce513ffdfca98",
"size": "4340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyclient/zeroos/orchestrator/client/Cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from templar.api.config import ConfigBuilder
configuration = ConfigBuilder().build()
|
{
"content_hash": "d1eacf71b413d64b290fba82493cbdbc",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 42.5,
"alnum_prop": 0.8235294117647058,
"repo_name": "albert12132/templar",
"id": "aba1e4b9254b73c3bca8e0e9bca9677097b83960",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cli/test_data/no_variable_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "29"
},
{
"name": "Python",
"bytes": "174492"
}
],
"symlink_target": ""
}
|
import json
import time
import urllib
from ..auth import digest
from ..import rpc
# @gist PutPolicy
class PutPolicy(object):
scope = None # 可以是 bucketName 或者 bucketName:key
expires = 3600 # 默认是 3600 秒
callbackUrl = None
callbackBody = None
returnUrl = None
returnBody = None
endUser = None
asyncOps = None
saveKey = None
insertOnly = None
detectMime = None
fsizeLimit = None
persistentNotifyUrl = None
persistentOps = None
def __init__(self, scope):
self.scope = scope
# @endgist
def token(self, mac=None):
if mac is None:
mac = digest.Mac()
token = dict(
scope = self.scope,
deadline = int(time.time()) + self.expires,
)
if self.callbackUrl is not None:
token["callbackUrl"] = self.callbackUrl
if self.callbackBody is not None:
token["callbackBody"] = self.callbackBody
if self.returnUrl is not None:
token["returnUrl"] = self.returnUrl
if self.returnBody is not None:
token["returnBody"] = self.returnBody
if self.endUser is not None:
token["endUser"] = self.endUser
if self.asyncOps is not None:
token["asyncOps"] = self.asyncOps
if self.saveKey is not None:
token["saveKey"] = self.saveKey
if self.insertOnly is not None:
token["exclusive"] = self.insertOnly
if self.detectMime is not None:
token["detectMime"] = self.detectMime
if self.fsizeLimit is not None:
token["fsizeLimit"] = self.fsizeLimit
if self.persistentOps is not None:
token["persistentOps"] = self.persistentOps
if self.persistentNotifyUrl is not None:
token["persistentNotifyUrl"] = self.persistentNotifyUrl
b = json.dumps(token, separators=(',',':'))
return mac.sign_with_data(b)
class GetPolicy(object):
expires = 3600
def __init__(self):
pass
def make_request(self, base_url, mac=None):
'''
* return private_url
'''
if mac is None:
mac = digest.Mac()
deadline = int(time.time()) + self.expires
if '?' in base_url:
base_url += '&'
else:
base_url += '?'
base_url = '%se=%s' % (base_url, str(deadline))
token = mac.sign(base_url)
return '%s&token=%s' % (base_url, token)
def make_base_url(domain, key):
'''
* domain => str
* key => str
* return base_url
'''
key = rpc.encode_unicode(key)
return 'http://%s/%s' % (domain, urllib.quote(key))
|
{
"content_hash": "d186f0e4dcbac9702300177209b2cb36",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 60,
"avg_line_length": 21.49532710280374,
"alnum_prop": 0.6630434782608695,
"repo_name": "yobin/saepy-log",
"id": "fad90198119b83037cf2e291a61902d239e63177",
"size": "2342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qiniu/rs/rs_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16475"
},
{
"name": "HTML",
"bytes": "162448"
},
{
"name": "JavaScript",
"bytes": "34474"
},
{
"name": "Python",
"bytes": "2561292"
}
],
"symlink_target": ""
}
|
import io
import os
import sys
import random
import hashlib
import unittest
import pickle
import json
from heartbeat.exc import HeartbeatError
from heartbeat import Merkle
from GenericCorrectnessTests import GenericCorrectnessTests
class TestMerkleHelper(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_next_seed(self):
seed = os.urandom(32)
key = os.urandom(32)
self.assertEqual(Merkle.MerkleHelper.get_next_seed(key,seed),Merkle.MerkleHelper.get_next_seed(key,seed))
def test_get_file_hash(self):
seed = os.urandom(32)
key = os.urandom(32)
for i in range(0,10):
seed = Merkle.MerkleHelper.get_next_seed(key,seed)
with open('files/test.txt','rb') as file:
hash = Merkle.MerkleHelper.get_file_hash(file,seed)
with open('files/test2.txt','rb') as file:
hash2 = Merkle.MerkleHelper.get_file_hash(file,seed)
self.assertEqual(hash,hash2)
def test_get_chunk_hash(self):
seed = os.urandom(32)
key = os.urandom(32)
for i in range(0,100):
seed = Merkle.MerkleHelper.get_next_seed(key,seed)
with open('files/test.txt','rb') as file:
hash = Merkle.MerkleHelper.get_chunk_hash(file,seed)
with open('files/test2.txt','rb') as file:
hash2 = Merkle.MerkleHelper.get_chunk_hash(file,seed)
self.assertEqual(hash,hash2)
class TestMerkleTree(unittest.TestCase):
def test_build(self):
leaf_counts = [1, 9, 257]
for i in leaf_counts:
mt = Merkle.MerkleTree()
for j in range(0,i):
mt.add_leaf(os.urandom(32))
mt.build()
# check all the leaves
for j in range(0,i):
self.assertTrue(Merkle.MerkleTree.verify_branch(mt.leaves[j],mt.get_branch(j),mt.get_root()))
def test_invalid_leaf(self):
self.assertFalse(Merkle.MerkleTree.verify_branch([],[],None))
def test_get_partner(self):
for i in range(0,20):
j = random.randint(0,100)
p = Merkle.MerkleTree.get_partner(j)
if (Merkle.MerkleTree.is_left(j)):
p2 = j+1
else:
p2 = j-1
self.assertEqual(p,p2)
def test_invalid_root(self):
mt = Merkle.MerkleTree()
for i in range(0,10):
mt.add_leaf(os.urandom(32))
mt.build()
for i in range(0,10):
self.assertFalse(Merkle.MerkleTree.verify_branch(mt.leaves[i],mt.get_branch(i),os.urandom(32)))
def test_serialization(self):
mt = Merkle.MerkleTree()
for i in range(0,10):
mt.add_leaf(os.urandom(32))
mt.build()
d = mt.todict()
mt2 = Merkle.MerkleTree.fromdict(d)
self.assertEqual(mt,mt2)
class TestMerkle(unittest.TestCase):
def test_signing(self):
state = Merkle.State(0,os.urandom(32),256,os.urandom(32))
key = os.urandom(32)
state.sign(key)
state.checksig(key)
# modify
state.seed = os.urandom(32)
with self.assertRaises(HeartbeatError) as ex:
state.checksig(key)
ex_msg = ex.exception.message
self.assertEqual("Signature invalid on state.",ex_msg)
def test_init(self):
k = os.urandom(32)
beat = Merkle.Merkle(key=k)
self.assertEqual(k,beat.key)
def test_run_out_of_challenges(self):
beat = Merkle.Merkle()
# encode with 200 challenges
with open('files/test.txt','rb') as file:
(tag,state) = beat.encode(file,200)
with self.assertRaises(HeartbeatError) as ex:
for i in range(0,201):
chal = beat.gen_challenge(state)
ex_msg = ex.exception.message
self.assertEqual("Out of challenges.",ex_msg)
def test_comparison(self):
k = os.urandom(32)
k3 = os.urandom(32)
beat1 = Merkle.Merkle(key=k)
beat2 = Merkle.Merkle(key=k)
beat3 = Merkle.Merkle(key=k3)
s = os.urandom(32)
s3 = os.urandom(32)
with open('files/test.txt','rb') as file:
(tag1,state1) = beat1.encode(file,200,s)
file.seek(0)
(tag2,state2) = beat2.encode(file,200,s)
file.seek(0)
(tag3,state3) = beat3.encode(file,200,s3)
chal1 = beat1.gen_challenge(state1)
chal2 = beat2.gen_challenge(state2)
chal3 = beat3.gen_challenge(state3)
with open('files/test.txt','rb') as file:
proof1 = beat1.prove(file,chal1,tag1)
file.seek(0)
proof2 = beat2.prove(file,chal2,tag2)
file.seek(0)
proof3 = beat3.prove(file,chal3,tag3)
self.assertEqual(beat1,beat2)
self.assertNotEqual(beat1,beat3)
self.assertEqual(tag1,tag2)
self.assertNotEqual(tag1,tag3)
self.assertEqual(state1,state2)
self.assertNotEqual(state1,state3)
self.assertEqual(chal1,chal2)
self.assertNotEqual(chal1,chal3)
self.assertEqual(proof1,proof2)
self.assertNotEqual(proof1,proof3)
class TestCorrectness(unittest.TestCase):
def test_correctness(self):
GenericCorrectnessTests.generic_correctness_test(self,Merkle.Merkle)
def test_scheme(self):
GenericCorrectnessTests.generic_scheme_test(self,Merkle.Merkle)
def test_repeated(self):
GenericCorrectnessTests.generic_test_repeated_challenge(self,Merkle.Merkle)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0708cfbd2cc80ba62be1ef9ab4b311fc",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 113,
"avg_line_length": 32.12707182320442,
"alnum_prop": 0.5800515907136715,
"repo_name": "Storj/heartbeat",
"id": "e9db05891ed0904c2ec3e3ff44bb88f27c7ee629",
"size": "7148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests_unit_merkle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15181"
},
{
"name": "C++",
"bytes": "580197"
},
{
"name": "Python",
"bytes": "111727"
}
],
"symlink_target": ""
}
|
import networkx as nx
from bokeh.io import show, output_file
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral4
G=nx.karate_club_graph()
plot = Plot(plot_width=400, plot_height=400,
x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
plot.title.text = "Graph Interaction Demonstration"
plot.add_tools(HoverTool(tooltips=None), TapTool(), BoxSelectTool())
graph_renderer = from_networkx(G, nx.circular_layout, scale=1, center=(0,0))
graph_renderer.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC", line_alpha=0.8, line_width=5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = EdgesAndLinkedNodes()
plot.renderers.append(graph_renderer)
output_file("interactive_graphs.html")
show(plot)
|
{
"content_hash": "19e38f2f9737e7ebcca19c29a1040029",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 98,
"avg_line_length": 42.53125,
"alnum_prop": 0.7773695811903012,
"repo_name": "timsnyder/bokeh",
"id": "5c6da2e933d1924ad5f2c3042d064ffdee48102d",
"size": "1361",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "sphinx/source/docs/user_guide/examples/graph_interaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
}
|
import functools
import inspect
from .patch import monkey_patch
def wraps(wrapped):
""" a convenience function on top of functools.wraps:
- adds the original function to the wrapped function as __wrapped__ attribute."""
def new_decorator(f):
returned = functools.wraps(wrapped)(f)
returned.__wrapped__ = wrapped
return returned
return new_decorator
def inspect_getargspec_patch(func):
"""calls inspect's getargspec with func.__wrapped__ if exists, else with func"""
return inspect._infi_patched_getargspec(_get_innner_func(func))
def ipython_getargspec_patch(func):
return _ipython_inspect_module._infi_patched_getargspec(_get_innner_func(func))
def _get_innner_func(f):
while True:
wrapped = getattr(f, "__wrapped__", None)
if wrapped is None:
return f
f = wrapped
monkey_patch(inspect, "getargspec", inspect_getargspec_patch)
_ipython_inspect_module = None
try:
# ipython 0.11
from IPython.core import oinspect as _ipython_inspect_module
except ImportError:
try:
# ipython 0.10.2
from IPython import OInspect as _ipython_inspect_module
except ImportError:
pass
if _ipython_inspect_module is not None:
monkey_patch(_ipython_inspect_module, "getargspec", ipython_getargspec_patch)
|
{
"content_hash": "ee5069c29bb8d3f9b98ae5a77615a31b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 85,
"avg_line_length": 31.523809523809526,
"alnum_prop": 0.6910876132930514,
"repo_name": "Infinidat/infi.pyutils",
"id": "90e3939e5c016ee2d290480d58fb6799fc4756a2",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infi/pyutils/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "97746"
}
],
"symlink_target": ""
}
|
"""
Salt states to create and manage VMware vSphere datacenters (datacenters).
:codeauthor: `Alexandru Bleotu <alexandru.bleotu@morganstaley.com>`
Dependencies
============
- pyVmomi Python Module
States
======
datacenter_configured
---------------------
Makes sure a datacenter exists and is correctly configured.
If the state is run by an ``esxdatacenter`` minion, the name of the datacenter
is retrieved from the proxy details, otherwise the datacenter has the same name
as the state.
Supported proxies: esxdatacenter
Example:
1. Make sure that a datacenter named ``target_dc`` exists on the vCenter, using a
``esxdatacenter`` proxy:
Proxy minion configuration (connects passthrough to the vCenter):
.. code-block:: yaml
proxy:
proxytype: esxdatacenter
datacenter: target_dc
vcenter: vcenter.fake.com
mechanism: sspi
domain: fake.com
principal: host
State configuration:
.. code-block:: yaml
datacenter_state:
esxdatacenter.datacenter_configured
"""
import logging
from functools import wraps
import salt.exceptions
# Get Logging Started
log = logging.getLogger(__name__)
LOGIN_DETAILS = {}
def __virtual__():
return "esxdatacenter"
def _deprecation_message(function):
"""
Decorator wrapper to warn about azurearm deprecation
"""
@wraps(function)
def wrapped(*args, **kwargs):
salt.utils.versions.warn_until(
"Argon",
"The 'esxdatacenter' functionality in Salt has been deprecated and its "
"functionality will be removed in version 3008 in favor of the "
"saltext.vmware Salt Extension. "
"(https://github.com/saltstack/salt-ext-modules-vmware)",
category=FutureWarning,
)
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
return ret
return wrapped
@_deprecation_message
def mod_init(low):
return True
@_deprecation_message
def datacenter_configured(name):
"""
Makes sure a datacenter exists.
If the state is run by an ``esxdatacenter`` minion, the name of the
datacenter is retrieved from the proxy details, otherwise the datacenter
has the same name as the state.
Supported proxies: esxdatacenter
name:
Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
"""
proxy_type = __salt__["vsphere.get_proxy_type"]()
if proxy_type == "esxdatacenter":
dc_name = __salt__["esxdatacenter.get_details"]()["datacenter"]
else:
dc_name = name
log.info("Running datacenter_configured for datacenter '%s'", dc_name)
ret = {"name": name, "changes": {}, "result": None, "comment": "Default"}
comments = []
si = None
try:
si = __salt__["vsphere.get_service_instance_via_proxy"]()
dcs = __salt__["vsphere.list_datacenters_via_proxy"](
datacenter_names=[dc_name], service_instance=si
)
if not dcs:
if __opts__["test"]:
comments.append("State will create datacenter '{}'.".format(dc_name))
else:
log.debug("Creating datacenter '%s'", dc_name)
__salt__["vsphere.create_datacenter"](dc_name, si)
comments.append("Created datacenter '{}'.".format(dc_name))
log.info(comments[-1])
ret["changes"].update({"new": {"name": dc_name}})
else:
comments.append(
"Datacenter '{}' already exists. Nothing to be done.".format(dc_name)
)
log.info(comments[-1])
__salt__["vsphere.disconnect"](si)
ret["comment"] = "\n".join(comments)
ret["result"] = None if __opts__["test"] and ret["changes"] else True
return ret
except salt.exceptions.CommandExecutionError as exc:
log.error("Error: %s", exc)
if si:
__salt__["vsphere.disconnect"](si)
ret.update(
{"result": False if not __opts__["test"] else None, "comment": str(exc)}
)
return ret
|
{
"content_hash": "958a0f1cd569ac812f557f1bd7f440dd",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 85,
"avg_line_length": 27.944827586206898,
"alnum_prop": 0.6184600197433366,
"repo_name": "saltstack/salt",
"id": "3bc3d1dcc02ab0e9c22f19a7f1c60862e6aafe7f",
"size": "4052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/esxdatacenter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import flask
weberrors = flask.Blueprint('web_error_handlers', __name__)
|
{
"content_hash": "d1f336ea5ade96e26d866731ef385b41",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 17.875,
"alnum_prop": 0.7342657342657343,
"repo_name": "bretthandrews/marvin",
"id": "f56fc0489b9754f85876803b5e3d513b6b186232",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/marvin/web/error_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "210355"
},
{
"name": "HTML",
"bytes": "60149"
},
{
"name": "JavaScript",
"bytes": "207386"
},
{
"name": "Python",
"bytes": "921930"
},
{
"name": "SQLPL",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "1108"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
import lasio
def convert_version():
args = get_convert_version_parser().parse_args(sys.argv[1:])
assert os.path.isfile(args.input)
las = lasio.read(args.input, ignore_header_errors=args.ignore_header_errors)
if os.path.isfile(args.output) and not args.overwrite:
raise OSError("Output file already exists")
with open(args.output, "w") as f:
las.write(f, version=float(args.to))
def get_convert_version_parser():
parser = argparse.ArgumentParser(
"Convert LAS file version",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-t", "--to", default=2, help="Version to convert to")
parser.add_argument(
"--overwrite",
action="store_true",
default=False,
help="Overwrite output file if it already exists",
)
parser.add_argument(
"-i",
"--ignore-header-errors",
action="store_true",
help="Ignore header section errors.",
default=False,
)
parser.add_argument("input")
parser.add_argument("output")
return parser
|
{
"content_hash": "222067bce4e34fbf7624fbfc44670af7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 26.6046511627907,
"alnum_prop": 0.6433566433566433,
"repo_name": "kinverarity1/lasio",
"id": "085ee199b197388b27c94c892c22c6be9d56af69",
"size": "1144",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "lasio/convert_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "260"
},
{
"name": "Jupyter Notebook",
"bytes": "247701"
},
{
"name": "Python",
"bytes": "250503"
}
],
"symlink_target": ""
}
|
"""
Tests of simple concepts.
"""
import pytest
def test_one_equals_one():
assert 1 == 1
@pytest.mark.xfail
def test_one_equals_two():
assert 1 == 2
def test_ping(client):
res = client.get('/ping')
assert res.status_code == 200
assert res.text == 'pong'
|
{
"content_hash": "57ad05348451c8daab0d601b7e66ba4a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 33,
"avg_line_length": 14.05,
"alnum_prop": 0.6192170818505338,
"repo_name": "odarbelaeze/condor-api",
"id": "9c74af75d2dc78226991afd50745eea77417c051",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_concepts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16662"
}
],
"symlink_target": ""
}
|
from lib import actions
class SetHomeAction(actions.BaseAction):
def run(self, structure=None):
if structure:
s = self._get_structure(structure)
else:
s = self._get_default_structure()
s.away = False
return s.away
|
{
"content_hash": "4fc3079374aa5f086a0e83bf9845a2cf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 46,
"avg_line_length": 23.083333333333332,
"alnum_prop": 0.592057761732852,
"repo_name": "pinterb/st2contrib",
"id": "ce443cd588ee8680776addd22aa681007dc05292",
"size": "277",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "packs/nest/actions/set_home.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2124"
},
{
"name": "Python",
"bytes": "297511"
},
{
"name": "Shell",
"bytes": "2556"
}
],
"symlink_target": ""
}
|
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.structure.parametercontainer import ParameterContainer
from connection import Connection
from full import FullConnection
class OwnershipViolation(Exception):
"""Exception raised when one attempts to write-access the parameters of the
SharedConnection, instead of its mother."""
pass
class MotherConnection(ParameterContainer):
"""The container for the shared parameters of connections (just a container
with a constructor, actually)."""
hasDerivatives = True
nbparams = None
def __init__(self, nbparams, **args):
assert nbparams > 0
ParameterContainer.__init__(self, nbparams, **args)
self.setArgs(nbparams = self.paramdim)
class SharedConnection(Connection):
"""A shared connection can link different couples of modules, with a single
set of parameters (encapsulated in a MotherConnection)."""
#: pointer to MotherConnection
mother = None
def __init__(self, mother, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self._replaceParamsByMother(mother)
def _replaceParamsByMother(self, mother):
self.setArgs(mother = mother)
self.paramdim = self.mother.paramdim
def initParams(self, *args): raise OwnershipViolation
@property
def params(self): return self.mother.params
@property
def derivs(self): return self.mother.derivs
def _getName(self):
return self.mother.name if self._name is None else self._name
def _setName(self, newname):
self._name = newname
name = property(_getName, _setName)
class SharedFullConnection(SharedConnection, FullConnection):
"""Shared version of FullConnection."""
def _forwardImplementation(self, inbuf, outbuf):
FullConnection._forwardImplementation(self, inbuf, outbuf)
def _backwardImplementation(self, outerr, inerr, inbuf):
FullConnection._backwardImplementation(self, outerr, inerr, inbuf)
|
{
"content_hash": "161e90dd5aca872421630fa5883391b3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 32.06153846153846,
"alnum_prop": 0.6765834932821497,
"repo_name": "rbalda/neural_ocr",
"id": "6493a32b057f634b4b06dc4bee5143ff64e2a135",
"size": "2084",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/pybrain/structure/connections/shared.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "497604"
},
{
"name": "C++",
"bytes": "3309990"
},
{
"name": "CSS",
"bytes": "135235"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "215390"
},
{
"name": "JavaScript",
"bytes": "206780"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "26980034"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
import pandas as pd
import sct_utils as sct
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from spinalcordtoolbox.image import Image
from config_file import config
def run_optic(fname_in, contrast, ofolder):
cmd = ['sct_get_centerline', '-i', fname_in, '-c', contrast, '-ofolder', ofolder]
sct.run(cmd)
def run_flat(fname_in, fname_ctr, ofolder):
cmd = ['sct_flatten_sagittal', '-i', fname_in, '-s', fname_ctr]
try:
sct.run(cmd)
except:
pass
def create_qc(fname_in, fname_gt, fname_out):
img, gt = Image(fname_in), Image(fname_gt)
img.change_orientation('RPI')
gt.change_orientation('RPI')
coord_c2c3 = np.where(gt.data == 1)
y_c2c3, z_c2c3 = coord_c2c3[1][0], coord_c2c3[2][0]
sag_slice = img.data[0, :, :]
del img, gt
ax = plt.gca()
ax.imshow(sag_slice, interpolation='nearest', cmap='gray', aspect='auto')
circ = Circle((z_c2c3, y_c2c3), 2, facecolor='chartreuse')
ax.add_patch(circ)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(fname_out)
plt.close()
def run_crop(fname_in, fname_out, nb_slice_average=1.0):
img = Image(fname_in).change_orientation('RPI')
if len(list(np.where(img.data == 3)[2])) == 1: # if label file
x_start, x_end = str(np.where(img.data == 3)[0][0]), str(np.where(img.data == 3)[0][0])
nb_slice_average_each_side = 0
img.data[np.where(img.data != 3)] = 0
img.data[np.where(img.data == 3)] = 1
img.change_orientation('PIR')
img.save(fname_out)
del img
else: # if grayscale image file
x_med = int(np.rint(img.dim[0] * 1.0 / 2))
nb_slice_average_each_side = int(nb_slice_average / 2 / img.dim[4])
x_start, x_end = str(x_med-nb_slice_average_each_side), str(x_med+nb_slice_average_each_side)
del img
cmd_orient = ['sct_image', '-i', fname_in, '-setorient', 'PIR', '-o', fname_out]
sct.run(cmd_orient)
cmd_crop = ['sct_crop_image', '-i', fname_out, '-zmin', x_start, '-zmax', x_end, '-o', fname_out]
sct.run(cmd_crop)
if nb_slice_average_each_side:
cmd_mean = ['sct_maths', '-i', fname_out, '-mean', 'z', '-o', fname_out]
sct.run(cmd_mean)
def preprocessing(df, folder_out, contrast_centerline):
sct.printv("Preprocessing...")
qc_fold = os.path.join(folder_out, 'qc')
if not os.path.isdir(qc_fold):
os.makedirs(qc_fold)
for idx, row in df.iterrows():
if row.contrast.startswith(contrast_centerline):
sct.printv("\t" + row.subject)
img = row['img']
labels = row['labels']
img_head, img_tail = os.path.split(img)
img_basename = img_tail.split('.nii')[0]
folder_out_cur = os.path.join(folder_out, row.subject)
if not os.path.isdir(folder_out_cur):
os.makedirs(folder_out_cur)
ctr = os.path.join(folder_out_cur, img_basename + '_centerline_optic.nii.gz')
if not os.path.isfile(ctr):
run_optic(img, contrast_centerline, folder_out_cur)
flat_in = os.path.join(img_head, img_basename + '_flatten.nii.gz')
flat = os.path.join(folder_out_cur, img_basename + '_flatten.nii.gz')
if not os.path.isfile(flat) and os.path.isfile(ctr):
run_flat(img, ctr, folder_out_cur)
sct.mv(flat_in, flat)
oneslice = os.path.join(folder_out_cur, img_basename + '_oneslice.nii')
oneslice_gt = os.path.join(folder_out_cur, img_basename + '_oneslice_gt.nii')
if os.path.isfile(flat) and not os.path.isfile(oneslice):
run_crop(flat, oneslice, 7.0)
if os.path.isfile(labels) and not os.path.isfile(oneslice_gt):
run_crop(labels, oneslice_gt, 1.0)
if os.path.isfile(oneslice) and os.path.isfile(oneslice_gt):
df.loc[idx, 'train'] = os.path.abspath(oneslice)
df.loc[idx, 'gt'] = os.path.abspath(oneslice_gt)
qc_file = os.path.join(qc_fold, '_'.join([row.subject, row.contrast]) + '.png')
if not os.path.isfile(qc_file):
create_qc(oneslice, oneslice_gt, qc_file)
return df
# def train_model(df, model_name):
sct.printv("Training...")
train_txt = 'train_lst.txt'
train_gt_txt = 'train_gt_lst.txt'
if os.path.isfile(train_txt) or os.path.isfile(train_gt_txt):
sct.rm(train_txt)
sct.rm(train_gt_txt)
stg_train = '\n'.join([os.path.abspath(f).split('.nii')[0] for f in df['train'].values if str(f) != 'nan'])
stg_gt_train = '\n'.join([os.path.abspath(f).split('.nii')[0] for f in df['gt'].values if str(f) != 'nan'])
with open(train_txt, 'w') as text_file:
text_file.write(stg_train)
text_file.close()
with open(train_gt_txt, 'w') as text_file:
text_file.write(stg_gt_train)
text_file.close()
model_path = os.getcwd() + '/trained_model_t1.yml'
if os.path.isfile(model_path):
sct.rm(model_path)
cmd_train = 'isct_train_svm -hogsg -incr=20 ' + model_name + ' ' + train_txt + ' ' + train_gt_txt + ' --list True'
sct.run(cmd_train, verbose=0, raise_exception=False)
def main():
df = pd.read_pickle(config['dataframe_database'])
folder_out = config['folder_out']
model_name = config['model_name']
contrast_centerline = config['contrast_centerline']
df = preprocessing(df, folder_out, contrast_centerline)
# train_model(df, model_name)
if __name__ == '__main__':
main()
|
{
"content_hash": "1e5ba2984fc5fa7d377a1e3044e5ddd9",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 118,
"avg_line_length": 37.450331125827816,
"alnum_prop": 0.5906277630415562,
"repo_name": "neuropoly/spinalcordtoolbox",
"id": "9a3bb54b47e08935983ccf578ac8c5b6fa477a3b",
"size": "5655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/detect_c2c3/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5931"
},
{
"name": "C++",
"bytes": "629016"
},
{
"name": "CMake",
"bytes": "7000"
},
{
"name": "CSS",
"bytes": "1237"
},
{
"name": "Dockerfile",
"bytes": "293"
},
{
"name": "HTML",
"bytes": "11480"
},
{
"name": "JavaScript",
"bytes": "3171"
},
{
"name": "MATLAB",
"bytes": "120557"
},
{
"name": "Python",
"bytes": "2052822"
},
{
"name": "Rich Text Format",
"bytes": "1619"
},
{
"name": "Shell",
"bytes": "61227"
}
],
"symlink_target": ""
}
|
import logging
import logging.config
import os
from flask import Flask, render_template
from environments.environments import environments
from releases.releases import releases
app = Flask(__name__)
app.config.from_object("observatory.settings")
for setting, value in app.config.iteritems():
if setting in os.environ:
app.config[setting] = os.environ.get(setting, value)
logging_conf = app.config.get("LOGGING_CONF")
if logging_conf and os.path.exists(logging_conf):
logging.config.fileConfig(logging_conf)
logger_name = app.config.get("LOGGER_NAME")
app.register_blueprint(environments, url_prefix='/environments')
app.register_blueprint(releases, url_prefix='/releases')
|
{
"content_hash": "e261240220fecd1719908883cb0a9510",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 27.92,
"alnum_prop": 0.7679083094555874,
"repo_name": "lonnen/observatory",
"id": "8f92029608a76314a0742c509d2cda79c43a52d7",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "observatory/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11084"
}
],
"symlink_target": ""
}
|
def reverse_sentence(string):
string_list = string.split() # split string by word into list
output = ' '.join([word[::-1] for word in string_list]) # reverse each element/word in list and consolidate into single string
print output
# test cases
test = "Hey dude!"
reverse_sentence(test)
test2 = "dude"
reverse_sentence(test2)
|
{
"content_hash": "fec0b80fb6666d00354fc2590b073bff",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 127,
"avg_line_length": 30.09090909090909,
"alnum_prop": 0.7280966767371602,
"repo_name": "derekmpham/interview-prep",
"id": "1c8ca4faef1bf67268e3507f026983677189f28a",
"size": "366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "string/reverse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "42856"
},
{
"name": "Python",
"bytes": "27173"
}
],
"symlink_target": ""
}
|
import uuid
import mock
import six
import webob
from nova.api.openstack.compute import floating_ips as fips_v21
from nova.api.openstack.compute.legacy_v2.contrib import floating_ips \
as fips_v2
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
TEST_INST = 1
WRONG_INST = 9999
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': objects.Instance(
**{'uuid': FAKE_UUID})}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return objects.Instance(uuid=FAKE_UUID, id=instance_id,
instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return objects.Instance(**{
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'})
def stub_nw_info(stubs):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(stubs)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTestNeutronV21(test.NoDBTestCase):
floating_ips = fips_v21
def setUp(self):
super(FloatingIpTestNeutronV21, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.controller = self.floating_ips.FloatingIPController()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with test.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'disassociate_and_release_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, dis_and_del, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertFalse(disoc_fip.called)
self.assertFalse(rel_fip.called)
# Only disassociate_and_release_floating_ip is
# called if using neutron
self.assertTrue(dis_and_del.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
req = fakes.HTTPRequest.blank('')
with mock.patch.object(self.controller.network_api,
'get_floating_ip', side_effect=ex):
self.assertRaises(expect_ex,
self.controller.delete, req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
class FloatingIpTestNeutronV2(FloatingIpTestNeutronV21):
floating_ips = fips_v2
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class FloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
validation_error = exception.ValidationError
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(FloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTestV21, self).tearDown()
def test_floatingip_delete(self):
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with test.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, rel_fip, _, _):
self.controller.delete(self.fake_req, 1)
self.assertTrue(disoc_fip.called)
self.assertTrue(rel_fip.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
with mock.patch.object(self.controller.network_api,
'get_floating_ip', side_effect=ex):
self.assertRaises(expect_ex,
self.controller.delete, self.fake_req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
self.assertIsNone(view['floating_ip']['fixed_ip'])
self.assertIsNone(view['floating_ip']['instance_id'])
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_floating_ips_list(self):
res_dict = self.controller.index(self.fake_req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.fake_req, '9876')
self.assertIn("Floating ip not found for id 9876", ex.explanation)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(self.floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(self.floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
res = self.controller.delete(self.fake_req, '9876')
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
fips_v21.FloatingIPController):
status_int = self.controller.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(status_int, 202)
def test_floating_ip_show(self):
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertIsNone(res_dict['floating_ip']['instance_id'])
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.fake_req, '9876')
self.assertIn("Floating ip not found for id 9876", ex.explanation)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req)
self.assertIn('No more floating ips', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('No more floating ips in pool non_existent_pool',
ex.explanation)
@mock.patch.object(network.api.API, 'allocate_floating_ip',
side_effect=exception.FloatingIpBadRequest(
'Bad floatingip request: Network '
'c8f0e88f-ae41-47cb-be6c-d8256ba80576 does not contain any '
'IPv4 subnet'))
def test_floating_ip_allocate_no_ipv4_subnet(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn("does not contain any IPv4 subnet",
six.text_type(ex))
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_over_quota(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req)
self.assertIn('IP allocation over quota', ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpPoolNotFound())
def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('Floating ip pool not found.', ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
res_dict = self.controller.create(self.fake_req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
self.controller.delete(self.fake_req, 1)
def _test_floating_ip_associate(self, fixed_address):
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_associate(self):
self._test_floating_ip_associate(fixed_address='192.168.1.100')
@mock.patch.object(network.model.NetworkInfo, 'fixed_ips')
def test_associate_floating_ip_v4v6_fixed_ip(self, fixed_ips_mock):
fixed_address = '192.168.1.100'
fixed_ips_mock.return_value = [{'address': 'fc00:2001:db8::100'},
{'address': fixed_address}]
self._test_floating_ip_associate(fixed_address=fixed_address)
@mock.patch.object(network.model.NetworkInfo, 'fixed_ips',
return_value=[{'address': 'fc00:2001:db8::100'}])
def test_associate_floating_ip_v6_fixed_ip(self, fixed_ips_mock):
body = dict(addFloatingIp=dict(address=self.floating_ip))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_floating_ip_associate_invalid_instance(self):
def fake_get(self, context, id, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=id)
self.stubs.Set(compute.api.API, "get", fake_get)
body = dict(addFloatingIp=dict(address=self.floating_ip))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip, self.fake_req,
'test_inst', body=body)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("floating ip not found", ex.explanation)
@mock.patch.object(network.api.API, 'associate_floating_ip',
side_effect=exception.Forbidden)
def test_associate_floating_ip_forbidden(self, associate_mock):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_associate_floating_ip_bad_address_key(self):
body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_associate_floating_ip_bad_addfloatingip_key(self):
body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
rsp = self.manager._remove_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, 'test_inst', body=body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, wrong_uuid, body=body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return WRONG_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance, address):
raise exception.Forbidden()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
class FloatingIpTestV2(FloatingIpTestV21):
floating_ips = fips_v2
validation_error = webob.exc.HTTPBadRequest
def test_not_extended_floating_ip_associate_fixed(self):
# Check that fixed_address is ignored if os-extended-floating-ips
# is not loaded
fixed_address_requested = '192.168.1.101'
fixed_address_allocated = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address_allocated,
kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address_requested))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST, body)
self.assertEqual(202, rsp.status_int)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class ExtendedFloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(ExtendedFloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTestV21, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("Specified fixed address not assigned to instance",
ex.explanation)
class ExtendedFloatingIpTestV2(ExtendedFloatingIpTestV21):
floating_ips = fips_v2
class FloatingIPPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID)
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID)
class FloatingIPActionPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPActionPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPActionController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_add_policy_failed(self):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self._common_policy_check(
self.controller._add_floating_ip, self.req, FAKE_UUID, body=body)
def test_remove_policy_failed(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self._common_policy_check(
self.controller._remove_floating_ip, self.req,
FAKE_UUID, body=body)
|
{
"content_hash": "c61008d6249001f5cbad1f701e74f549",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 78,
"avg_line_length": 41.87739032620922,
"alnum_prop": 0.5870423594509656,
"repo_name": "apporc/nova",
"id": "67f4b42a76c73b2cc8aed8fd741bc470de86e46f",
"size": "37919",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_floating_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16560867"
},
{
"name": "Shell",
"bytes": "24210"
},
{
"name": "Smarty",
"bytes": "335237"
}
],
"symlink_target": ""
}
|
"""
Invoke tasks helper functions
=============================
"""
import logging
import os
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def download_file(
url,
local_filepath,
chunk_size=1024*512,
lock_timeout=10,
http_timeout=None,
session=None
):
# pylint: disable=too-many-arguments
"""
A helper function which can download a file from a specified ``url`` to a
local file ``local_filepath`` in chunks and using a file lock to prevent
a concurrent download of the same file.
"""
# Avoid unnecessary dependencies when the function is not used.
import lockfile
import requests
log.debug("Checking file existance in '%s'", local_filepath)
lock = lockfile.LockFile(local_filepath)
try:
lock.acquire(timeout=lock_timeout)
except lockfile.LockTimeout:
log.info(
"File '%s' is locked. Probably another instance is still downloading it.",
local_filepath
)
raise
try:
if not os.path.exists(local_filepath):
log.info("Downloading a file from '%s' to '%s'", url, local_filepath)
if session is None:
session = requests
response = session.get(url, stream=True, timeout=http_timeout)
if response.status_code != 200:
log.error("Download '%s' is failed: %s", url, response)
response.raise_for_status()
with open(local_filepath, 'wb') as local_file:
for chunk in response.iter_content(chunk_size=chunk_size):
# filter out keep-alive new chunks
if chunk:
local_file.write(chunk)
log.debug("File '%s' has been downloaded", local_filepath)
return local_filepath
finally:
lock.release()
|
{
"content_hash": "0abfbc578cb1bde092b29c97cfb554dd",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 86,
"avg_line_length": 32.96491228070175,
"alnum_prop": 0.5875465673230442,
"repo_name": "frol/flask-restplus-server-example",
"id": "29553dab4b128e5bf7fd827ff4f720d7d98d1909",
"size": "1879",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tasks/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "HTML",
"bytes": "6943"
},
{
"name": "Mako",
"bytes": "1637"
},
{
"name": "Python",
"bytes": "239920"
}
],
"symlink_target": ""
}
|
from lang import lang
from framework import framework
from header import headers
from cms import cms
from server import server
from waf import waf
from os import os
from lib.net import http
from lib.net import utils
from lib.utils import printer
class CheckAll():
""" Docstring for CheckAll """
def __init__(self,url,agent,proxy,redirect):
self.url = url
self.printer = printer.Printer()
self.request = http.Http(agent=agent,proxy=proxy,redirect=redirect)
self.checker = utils.Checker()
def Run(self):
info = {
'name' : 'CheckAll',
'author' : 'Momo Outaadi (@M4ll0k)',
'description' : 'Checking all fingerprints'
}
try:
resp = self.request.Send(self.url)
serv = server.Server(self.url).Run(resp.headers)
self.printer.plus('Server: %s'%serv)
f = ([x for x in waf.Waf(resp.headers)])
for x in f:
if x==None:pass
else:
self.printer.plus('Firewall: %s'%x);break
o = ([x for x in os.Os(resp.headers)])
for x in o:
if x==None:pass
else:
self.printer.plus('Operating System: %s'%x);break
l = ([x for x in lang.Lang(resp._content,resp.headers)])
for x in l:
if x==None:pass
else:
self.printer.plus('Language: %s'%x)
h = ([x for x in framework.Framework(resp.headers)])
for x in h:
if x==None:pass
else:
self.printer.plus('Web Framework: %s'%x)
c = ([x for x in cms.Cms(resp._content)])
for x in c:
if x==None:pass
else:
self.printer.plus('CMS: %s'%x)
headers.Headers().Run(resp.headers)
except Exception as Error:
print Error
|
{
"content_hash": "6571bc75a94b8b2625587cdf67a03984",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 69,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.6531531531531531,
"repo_name": "Yukinoshita47/Yuki-Chan-The-Auto-Pentest",
"id": "2e23a61215399e78fd9f010a5f93cf913bc1e094",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module/Spaghetti/modules/fingerprints/CheckAll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36211"
},
{
"name": "JavaScript",
"bytes": "3038"
},
{
"name": "Makefile",
"bytes": "1360"
},
{
"name": "Perl",
"bytes": "108876"
},
{
"name": "Python",
"bytes": "3034585"
},
{
"name": "Roff",
"bytes": "6738"
},
{
"name": "Ruby",
"bytes": "2693582"
},
{
"name": "Shell",
"bytes": "53755"
},
{
"name": "XSLT",
"bytes": "5475"
}
],
"symlink_target": ""
}
|
'''run_client.py - An example client using the python socket implementation of
the Google Protocol Buffers.
This module is an executable script demonstrating the usage of the python socket
implementation of the Google Protocol Buffers. To work correctly, the script
requires a server to be running first (i.e. run_server.py).
Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
Zach Walker (zwalker@lcogt.net)
May 2009
'''
# Add main protobuf module to classpath
import sys
sys.path.append('../../main')
import time_pb2 as proto
import protobuf
import logging
log = logging.getLogger(__name__)
hostname = 'localhost'
port = 8090
if __name__=='__main__':
logging.basicConfig(level=logging.DEBUG)
log.debug("test")
# Create request message
request = proto.TimeRequest()
service = protobuf.RpcService(proto.TimeService_Stub, port, hostname)
try:
response = service.getTime(request, timeout=1000)
log.info(response)
except Exception, ex:
log.exception(ex)
|
{
"content_hash": "aec4d1b78d451ade1838e895e590b0df",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 26.725,
"alnum_prop": 0.7090739008419084,
"repo_name": "nowelium/protobuf-socket-rpc",
"id": "c5eab9607d97ec9d75ef72b3f31012e210e7c168",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/example/time/run_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "169955"
},
{
"name": "Python",
"bytes": "142900"
}
],
"symlink_target": ""
}
|
import os
import sys
import glob
import pep8
from pyflakes.scripts import pyflakes
def findpy(path):
for cfile in glob.glob(os.path.join(path, '*')):
if os.path.isdir(cfile):
for py in findpy(cfile):
yield py
if cfile.endswith('.py'):
yield cfile
def check_pyflakes(srcdir):
print(">>> Running pyflakes...")
clean = True
for pyfile in findpy(srcdir):
if pyflakes.checkPath(pyfile) != 0:
clean = False
return clean
def check_pep8(srcdir):
print(">>> Running pep8...")
clean = True
pep8.process_options([''])
for pyfile in findpy(srcdir):
if pep8.Checker(pyfile).check_all() != 0:
clean = False
return clean
def main():
src = os.path.dirname(sys.argv[0])
if not check_pep8(src):
print
err = "ERROR: pep8 failed on some source files\n"
err += "ERROR: please fix the errors and re-run this script"
print(err)
elif not check_pyflakes(src):
print
err = "ERROR: pyflakes failed on some source files\n"
err += "ERROR: please fix the errors and re-run this script"
print(err)
else:
print(">>> Clean!")
if __name__ == '__main__':
main()
|
{
"content_hash": "00bde1e4c4fef72a4726d5bd84829416",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 68,
"avg_line_length": 23.867924528301888,
"alnum_prop": 0.5754940711462451,
"repo_name": "oubiwann/workerpool",
"id": "e526f696df5ff213fc807ba581b0bc496573b4b9",
"size": "1287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "check.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15873"
}
],
"symlink_target": ""
}
|
import random
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.contrib import rnn, seq2seq
from tensorflow.python.layers.core import Dense
import special_vocab as config
import util.vocabutils as vocab_utils
class ChatbotModel(object):
def __init__(self, vocab_size, hidden_size, dropout,
num_layers, max_gradient_norm, batch_size, learning_rate,
lr_decay_factor, max_target_length,
max_source_length, decoder_mode=False):
'''
vocab_size: number of vocab tokens
buckets: buckets of max sequence lengths
hidden_size: dimension of hidden layers
num_layers: number of hidden layers
max_gradient_norm: maximum gradient magnitude
batch_size: number of training examples fed to network at once
learning_rate: starting learning rate of network
lr_decay_factor: amount by which to decay learning rate
num_samples: number of samples for sampled softmax
decoder_mode: Whether to build backpass nodes or not
'''
GO_ID = config.GO_ID
EOS_ID = config.EOS_ID
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.vocab_size = vocab_size
self.batch_size = batch_size
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate = learning_rate
self.encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
self.source_lengths = tf.placeholder(shape=(None,), dtype=tf.int32, name='source_lengths')
self.decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
self.target_lengths = tf.placeholder(shape=(None,), dtype=tf.int32, name="target_lengths")
with tf.variable_scope('embeddings') as scope:
embeddings = tf.Variable(tf.random_uniform([vocab_size, hidden_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, self.encoder_inputs)
targets_embedding = tf.nn.embedding_lookup(embeddings, self.decoder_targets)
with tf.variable_scope('encoder') as scope:
encoder_cell = rnn.LSTMCell(hidden_size)
encoder_cell = rnn.DropoutWrapper(encoder_cell,
input_keep_prob=dropout)
encoder_cell = rnn.MultiRNNCell([encoder_cell] * num_layers)
encoder_outputs,encoder_state=tf.nn.dynamic_rnn(cell=encoder_cell,
inputs=encoder_inputs_embedded,
sequence_length=self.source_lengths,
dtype=tf.float32,
time_major=False)
with tf.variable_scope('decoder') as scope:
decoder_cell = rnn.LSTMCell(hidden_size)
decoder_cell = rnn.DropoutWrapper(decoder_cell,
input_keep_prob=dropout)
decoder_cell = rnn.MultiRNNCell([decoder_cell] * num_layers,state_is_tuple=True)
if decoder_mode:
beam_width=2
decoder = seq2seq.BeamSearchDecoder(embedding=embeddings,
start_tokens=tf.tile([GOD_ID], [batch_size]),
end_token=EOS_ID,
initial_state=encoder_state,
beam_width=2)
self.logits = final_outputs.predicted_ids
else:
helper=seq2seq.TrainingHelper(targets_embedding,self.target_lengths)
decoder=seq2seq.BasicDecoder(decoder_cell,helper,encoder_state,Dense(vocab_size))
final_outputs, final_state, final_sequence_lengths =\
seq2seq.dynamic_decode(decoder=decoder)
self.logits = final_outputs.rnn_output
if not decoder_mode:
with tf.variable_scope("loss") as scope:
#have to pad logits, dynamic decode produces results not consistent
#in shape with targets
pad_size = self.max_target_length - tf.reduce_max(final_sequence_lengths)
self.logits = tf.pad(self.logits, [[0, 0], [0,pad_size], [0, 0]])
weights = tf.sequence_mask(lengths=final_sequence_lengths,
maxlen=self.max_target_length,
dtype=tf.float32,
name='weights')
x_entropy_loss = seq2seq.sequence_loss(logits=self.logits,
targets=self.decoder_targets,
weights=weights)#cross-entropy loss function
self.loss = tf.reduce_mean(x_entropy_loss)
optimizer = tf.train.AdamOptimizer()#Adam optimization algorithm
gradients = optimizer.compute_gradients(x_entropy_loss)
capped_grads = [(tf.clip_by_value(grad, -max_gradient_norm, max_gradient_norm), var) for grad, var in gradients]
self.train_op = optimizer.apply_gradients(capped_grads,
global_step=self.global_step)
self.saver = tf.train.Saver(tf.global_variables())
def step(self, sess, inputs,
targets, source_lengths,
target_lengths, test_mode=False):
'''
'''
if test_mode:
loss = sess.run([self.loss],
{self.encoder_inputs : inputs,
self.source_lengths : source_lengths,
self.decoder_targets : targets,
self.target_lengths : target_lengths})
else:
_, loss = sess.run([self.train_op, self.loss],
{self.encoder_inputs : inputs,
self.source_lengths : source_lengths,
self.decoder_targets : targets,
self.target_lengths : target_lengths})
return loss
def test(self, sess, inputs,
targets, source_lengths,
target_lengths):
logits=sess.run([self.logits],
{self.encoder_inputs : inputs,
self.source_lengths : source_lengths,
self.decoder_targets : targets,
self.target_lengths : target_lengths})
return logits
def get_batch(self, dataset):
'''
Obtains batch from dataset
Inputs: dataset - list of [input, target] sentence pairs
Outputs:
source_batch_major- [batch_size x max_sequence_length] inputs
target_batch_major- [batch_size x max_sequence_length] targets
source_seq_lengths- list of input seq lengths
target_seq_lengths- list of target seq lengths
'''
source_seq_lengths = []
target_seq_lengths =[]
for seq in dataset:
source_seq_lengths.append(len(seq[0]))
target_seq_lengths.append(len(seq[1]))
#max_target_length = max(target_seq_lengths)
source_batch_major = np.zeros(shape=[len(dataset), self.max_source_length], dtype=np.int32)
target_batch_major = np.zeros(shape=[len(dataset), self.max_target_length], dtype=np.int32)
for i, seq in enumerate(dataset):
for j, element in enumerate(seq[0]):
source_batch_major[i, j] = element
for j, element in enumerate(seq[1]):
target_batch_major[i,j] = element
return source_batch_major, target_batch_major, source_seq_lengths, target_seq_lengths
|
{
"content_hash": "a7a86e785471dd7fa6f6742ce70f4c1f",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 124,
"avg_line_length": 44.94827586206897,
"alnum_prop": 0.5684695051783659,
"repo_name": "wangcan04/chatbot",
"id": "f6a3b3147ec9d8d99fd12e02709d5555903ca4ed",
"size": "7821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NeuralChat/models/seq2seq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98600"
}
],
"symlink_target": ""
}
|
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals)
from builtins import *
def {{cookiecutter.cli_entry_point_function_name }}():
""" Command-line entry point for {{ cookiecutter.repo_name }} """
print('{{ cookiecutter.repo_name }} placeholder CLI entry point')
def {{cookiecutter.gui_entry_point_function_name }}():
""" GUI entry point for {{ cookiecutter.repo_name }} """
print('{{ cookiecutter.repo_name }} placeholder GUI entry point')
if __name__ == 'main':
{{ cookiecutter.cli_entry_point_function_name }}()
|
{
"content_hash": "96284a8ae90f8187e2ba49d3a91ae57a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 29.5,
"alnum_prop": 0.6593220338983051,
"repo_name": "DC23/cookiecutter-dcpypackage",
"id": "2c57d04ccba195df63cd6202e8e28fa948364848",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "Python",
"bytes": "8146"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
}
|
import os
import stat
import config
import MySQLdb
def regenerate_keys():
db = None
try:
db = MySQLdb.connect(host=config.val('db.host'),
user=config.val('db.user'),
passwd=config.val('db.pwd'),
db=config.val('db.name'))
except:
return False
c = db.cursor()
c.execute("SELECT * FROM ssh_keys")
if not os.path.exists(os.path.join(config.val('home_dir'), '.ssh')):
os.makedirs(os.path.join(config.val('home_dir'), '.ssh'))
auth_keys = os.path.join(config.val('home_dir'), ".ssh/authorized_keys")
key_file = open(auth_keys, 'w')
for row in c:
key_file.write("command=\"bzr anv-serve %d --inet\" %s\n" % (row[1], row[2]))
key_file.close()
os.chmod(auth_keys, stat.S_IRWXU)
return True
|
{
"content_hash": "3d0ad7ec759e111cda76a3eda37e49bf",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 33.96,
"alnum_prop": 0.5512367491166078,
"repo_name": "Etenil/anvil",
"id": "1e1fc2f30c0c2677fef900df3b8e65b8ec35e0b0",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anvillib/ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "41953"
},
{
"name": "Python",
"bytes": "118009"
}
],
"symlink_target": ""
}
|
"""The Met Office integration."""
from __future__ import annotations
import asyncio
import logging
import re
from typing import Any
import datapoint
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import entity_registry
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY,
MODE_DAILY,
)
from .data import MetOfficeData
from .helpers import fetch_data, fetch_site
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [Platform.SENSOR, Platform.WEATHER]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a Met Office entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
coordinates = f"{latitude}_{longitude}"
@callback
def update_unique_id(
entity_entry: entity_registry.RegistryEntry,
) -> dict[str, Any] | None:
"""Update unique ID of entity entry."""
if entity_entry.domain != Platform.SENSOR:
return None
name_to_key = {
"Station Name": "name",
"Weather": "weather",
"Temperature": "temperature",
"Feels Like Temperature": "feels_like_temperature",
"Wind Speed": "wind_speed",
"Wind Direction": "wind_direction",
"Wind Gust": "wind_gust",
"Visibility": "visibility",
"Visibility Distance": "visibility_distance",
"UV Index": "uv",
"Probability of Precipitation": "precipitation",
"Humidity": "humidity",
}
match = re.search(f"(?P<name>.*)_{coordinates}.*", entity_entry.unique_id)
if match is None:
return None
if (name := match.group("name")) in name_to_key:
return {
"new_unique_id": entity_entry.unique_id.replace(name, name_to_key[name])
}
return None
await entity_registry.async_migrate_entries(hass, entry.entry_id, update_unique_id)
connection = datapoint.connection(api_key=api_key)
site = await hass.async_add_executor_job(
fetch_site, connection, latitude, longitude
)
if site is None:
raise ConfigEntryNotReady()
async def async_update_3hourly() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_3HOURLY
)
async def async_update_daily() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_DAILY
)
metoffice_hourly_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Hourly Coordinator for {site_name}",
update_method=async_update_3hourly,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_daily_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Daily Coordinator for {site_name}",
update_method=async_update_daily,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_HOURLY_COORDINATOR: metoffice_hourly_coordinator,
METOFFICE_DAILY_COORDINATOR: metoffice_daily_coordinator,
METOFFICE_NAME: site_name,
METOFFICE_COORDINATES: coordinates,
}
# Fetch initial data so we have data when entities subscribe
await asyncio.gather(
metoffice_hourly_coordinator.async_config_entry_first_refresh(),
metoffice_daily_coordinator.async_config_entry_first_refresh(),
)
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
def get_device_info(coordinates: str, name: str) -> DeviceInfo:
"""Return device registry information."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, coordinates)},
manufacturer="Met Office",
name=f"Met Office {name}",
)
|
{
"content_hash": "6416da88270286075481dc0bfe32e4c1",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 88,
"avg_line_length": 30.919254658385093,
"alnum_prop": 0.6597026918441141,
"repo_name": "mezz64/home-assistant",
"id": "057947d76e478bb10ea7aeb3c68508190f598df3",
"size": "4978",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/metoffice/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from .Command import Command
class Init(Command):
command = 'init'
help = "Initialize new document repository"
def set_args(self, subparser):
subparser.add_argument("--force", help="Overwrite existing document repository", action='store_true')
def run(self, args):
from ..Database import Database
Database.init(dataDir=args.data_dir, clobber=args.force)
|
{
"content_hash": "fdc4bbc727b25a9d73ec70f65d98ef03",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 109,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.690176322418136,
"repo_name": "tmearnest/sbd",
"id": "8b71ca45a1f47fa3ed165f7c221a0286060f7311",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfs/Commands/Init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4560"
},
{
"name": "HTML",
"bytes": "2544"
},
{
"name": "Python",
"bytes": "134132"
}
],
"symlink_target": ""
}
|
import io
import os
import pytest
from dvc.command.experiments import CmdExperimentsInit
from dvc.exceptions import DvcException
from dvc.main import main
from dvc.repo.experiments.init import init
from dvc.stage.exceptions import DuplicateStageName
# the tests may hang on prompts on failure
pytestmark = pytest.mark.timeout(2, func_only=True)
def test_init_simple(tmp_dir, scm, dvc, capsys):
tmp_dir.gen(
{
CmdExperimentsInit.CODE: {"copy.py": ""},
"data": "data",
"params.yaml": '{"foo": 1}',
"dvclive": {},
"plots": {},
}
)
code_path = os.path.join(CmdExperimentsInit.CODE, "copy.py")
script = f"python {code_path}"
capsys.readouterr()
assert main(["exp", "init", script]) == 0
out, err = capsys.readouterr()
assert not err
assert "Created default stage in dvc.yaml" in out
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": script,
"deps": ["data", "src"],
"metrics": [{"metrics.json": {"cache": False}}],
"outs": ["models"],
"params": ["foo"],
"plots": [{"plots": {"cache": False}}],
}
}
}
@pytest.mark.parametrize("interactive", [True, False])
@pytest.mark.parametrize("typ", ["default", "live"])
def test_when_stage_already_exists_with_same_name(
tmp_dir, dvc, interactive, typ
):
(tmp_dir / "dvc.yaml").dump({"stages": {typ: {"cmd": "test"}}})
with pytest.raises(DuplicateStageName) as exc:
init(
dvc,
interactive=interactive,
type=typ,
overrides={"cmd": "true"},
defaults=CmdExperimentsInit.DEFAULTS,
)
assert (
str(exc.value) == f"Stage '{typ}' already exists in 'dvc.yaml'. "
"Use '--force' to overwrite."
)
@pytest.mark.parametrize("typ", ["default", "live"])
def test_when_stage_force_if_already_exists(tmp_dir, dvc, typ):
(tmp_dir / "params.yaml").dump({"foo": 1})
(tmp_dir / "dvc.yaml").dump({"stages": {typ: {"cmd": "test"}}})
init(
dvc,
type=typ,
force=True,
overrides={"cmd": "true"},
defaults=CmdExperimentsInit.DEFAULTS,
)
d = (tmp_dir / "dvc.yaml").parse()
assert d["stages"][typ]["cmd"] == "true"
def test_with_a_custom_name(tmp_dir, dvc):
init(dvc, name="custom", overrides={"cmd": "cmd"})
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {"custom": {"cmd": "cmd"}}
}
def test_init_with_no_defaults_non_interactive(tmp_dir, scm, dvc):
init(dvc, defaults={}, overrides={"cmd": "python script.py"})
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {"default": {"cmd": "python script.py"}}
}
scm._reset()
assert not (tmp_dir / "dvc.lock").exists()
assert scm.is_tracked("dvc.yaml")
def test_abort_confirmation(tmp_dir, dvc):
(tmp_dir / "param").dump({"foo": 1})
inp = io.StringIO("./script\nscript\ndata\nmodel\nparam\nmetric\nplt\nn")
with pytest.raises(DvcException) as exc:
init(
dvc,
interactive=True,
defaults=CmdExperimentsInit.DEFAULTS,
stream=inp,
)
assert str(exc.value) == "Aborting ..."
assert not (tmp_dir / "dvc.yaml").exists()
assert not (tmp_dir / "dvc.lock").exists()
@pytest.mark.parametrize(
"extra_overrides, inp",
[
({"cmd": "cmd"}, io.StringIO()),
({}, io.StringIO("cmd")),
],
)
def test_init_interactive_when_no_path_prompts_need_to_be_asked(
tmp_dir, dvc, extra_overrides, inp
):
"""When we pass everything that's required of, it should not prompt us."""
(tmp_dir / "params.yaml").dump({"foo": 1})
init(
dvc,
interactive=True,
defaults=CmdExperimentsInit.DEFAULTS,
overrides={**CmdExperimentsInit.DEFAULTS, **extra_overrides},
stream=inp, # we still need to confirm
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "cmd",
"deps": ["data", "src"],
"live": {"dvclive": {"html": True, "summary": True}},
"metrics": [{"metrics.json": {"cache": False}}],
# we specify `live` through `overrides`,
# so it creates checkpoint-based output.
"outs": [{"models": {"checkpoint": True}}],
"params": ["foo"],
"plots": [{"plots": {"cache": False}}],
}
}
}
def test_when_params_is_omitted_in_interactive_mode(tmp_dir, scm, dvc):
(tmp_dir / "params.yaml").dump({"foo": 1})
inp = io.StringIO("python script.py\nscript.py\ndata\nmodels\nn")
init(
dvc, interactive=True, stream=inp, defaults=CmdExperimentsInit.DEFAULTS
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "python script.py",
"deps": ["data", "script.py"],
"metrics": [{"metrics.json": {"cache": False}}],
"outs": ["models"],
"plots": [{"plots": {"cache": False}}],
}
}
}
assert not (tmp_dir / "dvc.lock").exists()
scm._reset()
assert scm.is_tracked("dvc.yaml")
assert not scm.is_tracked("params.yaml")
assert scm.is_tracked(".gitignore")
assert scm.is_ignored("models")
def test_init_interactive_params_validation(tmp_dir, dvc, capsys):
tmp_dir.gen({"data": {"foo": "foo"}})
(tmp_dir / "params.yaml").dump({"foo": 1})
inp = io.StringIO(
"python script.py\nscript.py\ndata\nmodels\nparams.json\ndata\n"
)
init(
dvc, stream=inp, interactive=True, defaults=CmdExperimentsInit.DEFAULTS
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "python script.py",
"deps": ["data", "script.py"],
"metrics": [{"metrics.json": {"cache": False}}],
"outs": ["models"],
"params": ["foo"],
"plots": [{"plots": {"cache": False}}],
}
}
}
out, err = capsys.readouterr()
assert (
"Path to a parameters file [params.yaml, n to omit]: "
"'params.json' does not exist. "
"Please retry with an existing parameters file.\n"
"Path to a parameters file [params.yaml, n to omit]: "
"'data' is a directory. "
"Please retry with an existing parameters file.\n"
"Path to a parameters file [params.yaml, n to omit]:"
) in err
assert not out
def test_init_with_no_defaults_interactive(tmp_dir, dvc):
inp = io.StringIO(
"python script.py\n"
"script.py\n"
"data\n"
"model\n"
"n\n"
"metric\n"
"n\n"
)
init(
dvc,
defaults={},
overrides={"cmd": "python script.py"},
interactive=True,
stream=inp,
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "python script.py",
"deps": ["python script.py", "script.py"],
"metrics": [{"metric": {"cache": False}}],
"outs": ["data"],
}
}
}
@pytest.mark.parametrize(
"interactive, overrides, inp",
[
(False, {"cmd": "python script.py", "code": "script.py"}, None),
(
True,
{},
io.StringIO(
"python script.py\n"
"script.py\n"
"data\n"
"models\n"
"params.yaml\n"
"metrics.json\n"
"plots\n"
"y"
),
),
],
ids=["non-interactive", "interactive"],
)
def test_init_interactive_default(
tmp_dir, scm, dvc, interactive, overrides, inp, capsys
):
(tmp_dir / "params.yaml").dump({"foo": {"bar": 1}})
init(
dvc,
interactive=interactive,
defaults=CmdExperimentsInit.DEFAULTS,
overrides=overrides,
stream=inp,
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "python script.py",
"deps": ["data", "script.py"],
"metrics": [{"metrics.json": {"cache": False}}],
"outs": ["models"],
"params": ["foo"],
"plots": [{"plots": {"cache": False}}],
}
}
}
assert not (tmp_dir / "dvc.lock").exists()
scm._reset()
assert scm.is_tracked("dvc.yaml")
assert scm.is_tracked("params.yaml")
assert scm.is_tracked(".gitignore")
assert scm.is_ignored("models")
out, err = capsys.readouterr()
if interactive:
assert "'script.py' does not exist in the workspace." in err
assert "'data' does not exist in the workspace." in err
assert not out
@pytest.mark.parametrize(
"interactive, overrides, inp",
[
(False, {"cmd": "python script.py", "code": "script.py"}, None),
(
True,
{},
io.StringIO(
"python script.py\n"
"script.py\n"
"data\n"
"models\n"
"params.yaml\n"
"dvclive\n"
"y"
),
),
(
True,
{"cmd": "python script.py"},
io.StringIO(
"script.py\n"
"data\n"
"models\n"
"params.yaml\n"
"dvclive\n"
"y"
),
),
(
True,
{"cmd": "python script.py", "models": "models"},
io.StringIO("script.py\ndata\nparams.yaml\ndvclive\ny"),
),
],
ids=[
"non-interactive",
"interactive",
"interactive-cmd-provided",
"interactive-cmd-models-provided",
],
)
def test_init_interactive_live(
tmp_dir, scm, dvc, interactive, overrides, inp, capsys
):
(tmp_dir / "params.yaml").dump({"foo": {"bar": 1}})
init(
dvc,
type="live",
interactive=interactive,
defaults=CmdExperimentsInit.DEFAULTS,
overrides=overrides,
stream=inp,
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"live": {
"cmd": "python script.py",
"deps": ["data", "script.py"],
"live": {"dvclive": {"html": True, "summary": True}},
"outs": [{"models": {"checkpoint": True}}],
"params": ["foo"],
}
}
}
assert not (tmp_dir / "dvc.lock").exists()
scm._reset()
assert scm.is_tracked("dvc.yaml")
assert scm.is_tracked("params.yaml")
assert scm.is_tracked(".gitignore")
assert scm.is_ignored("models")
out, err = capsys.readouterr()
if interactive:
assert "'script.py' does not exist in the workspace." in err
assert "'data' does not exist in the workspace." in err
assert not out
@pytest.mark.parametrize(
"interactive, inp",
[
(False, None),
(True, io.StringIO()),
],
)
def test_init_with_type_live_and_models_plots_provided(
tmp_dir, dvc, interactive, inp
):
(tmp_dir / "params.yaml").dump({"foo": 1})
init(
dvc,
type="live",
interactive=interactive,
stream=inp,
defaults=CmdExperimentsInit.DEFAULTS,
overrides={"cmd": "cmd", "metrics": "m", "plots": "p"},
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"live": {
"cmd": "cmd",
"deps": ["data", "src"],
"live": {"dvclive": {"html": True, "summary": True}},
"metrics": [{"m": {"cache": False}}],
"outs": [{"models": {"checkpoint": True}}],
"params": ["foo"],
"plots": [{"p": {"cache": False}}],
}
}
}
@pytest.mark.parametrize(
"interactive, inp",
[
(False, None),
(True, io.StringIO()),
],
)
def test_init_with_type_default_and_live_provided(
tmp_dir, dvc, interactive, inp
):
(tmp_dir / "params.yaml").dump({"foo": 1})
init(
dvc,
interactive=interactive,
stream=inp,
defaults=CmdExperimentsInit.DEFAULTS,
overrides={"cmd": "cmd", "live": "live"},
)
assert (tmp_dir / "dvc.yaml").parse() == {
"stages": {
"default": {
"cmd": "cmd",
"deps": ["data", "src"],
"live": {"live": {"html": True, "summary": True}},
"metrics": [{"metrics.json": {"cache": False}}],
"outs": [{"models": {"checkpoint": True}}],
"params": ["foo"],
"plots": [{"plots": {"cache": False}}],
}
}
}
|
{
"content_hash": "db479068891a91e7c077b7e0975c77cd",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 79,
"avg_line_length": 29.14476614699332,
"alnum_prop": 0.49335167354424575,
"repo_name": "dmpetrov/dataversioncontrol",
"id": "05bf99bab83059206ac3e78d8ec1295c407dd71d",
"size": "13086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/func/experiments/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127601"
},
{
"name": "Shell",
"bytes": "1677"
}
],
"symlink_target": ""
}
|
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Question'
db.create_table('questions_question', (
('id', self.gf('django.db.models.fields.AutoField')(
primary_key=True)),
('subject', self.gf('django.db.models.fields.CharField')(
max_length=25)),
('content', self.gf('django.db.models.fields.TextField')(
default='', null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(
default='asked', max_length=10)),
))
db.send_create_signal('questions', ['Question'])
def backwards(self, orm):
# Deleting model 'Question'
db.delete_table('questions_question')
models = {
'questions.question': {
'Meta': {'object_name': 'Question'},
'content': ('django.db.models.fields.TextField', [],
{'default': "''", 'null': 'True',
'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [],
{'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [],
{'default': "'asked'", 'max_length': '10'}),
'subject': ('django.db.models.fields.CharField', [],
{'max_length': '25'})
}
}
complete_apps = ['questions']
|
{
"content_hash": "a1b4df0ffa6c9536ec55f055901abefb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 71,
"avg_line_length": 37.975,
"alnum_prop": 0.5016458196181699,
"repo_name": "reinbach/tutorus",
"id": "369d7b6b0d8baf7b533332f5a64e547db3cae03c",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorus/questions/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "160397"
},
{
"name": "Python",
"bytes": "168905"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.