text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from oozappa.config import OozappaSetting
_ = OozappaSetting
settings = _(
domain = 'tsuyukimakoto.com',
instance_type = 'c3.large',
)
|
{
"content_hash": "0cbf0d22f737f9c744f09e7d89456a14",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6944444444444444,
"repo_name": "frkwy/oozappa",
"id": "b87b6518ba8fedbe5748a0c1728fd64f3ae0594d",
"size": "166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sample/ops/production/vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "171966"
},
{
"name": "HTML",
"bytes": "19245"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "51449"
}
],
"symlink_target": ""
}
|
from pyston.serializer import Serializable, SerializableObj
from .models import User
class CountIssuesPerUserTable(Serializable):
def serialize(self, serialization_format, **kwargs):
return [
{
'email': user.email,
'created_issues_count': user.created_issues.count(),
}
for user in User.objects.all()
]
class CountWatchersPerIssue(SerializableObj):
def __init__(self, issue):
super(SerializableObj, self).__init__()
self.name = issue.name
self.watchers_count = issue.watched_by.count()
class RestMeta:
fields = ('name', 'watchers_count')
|
{
"content_hash": "e637be111030df0c8202a9aaaacc00ee",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.6097922848664689,
"repo_name": "druids/django-pyston",
"id": "4c9755689c0ca62349af109410d93acf6ca99b6a",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/dj/apps/app/serializable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9676"
},
{
"name": "Makefile",
"bytes": "3369"
},
{
"name": "Python",
"bytes": "405274"
}
],
"symlink_target": ""
}
|
import os
from subprocess import Popen, STDOUT, PIPE, call
import filecmp
import glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--mpi_exec', dest='mpi_exec', default='')
parser.add_option('--mpi_np', dest='mpi_np', default='3')
parser.add_option('--exe', dest='exe')
(opts, args) = parser.parse_args()
cwd = os.getcwd()
def test_run():
if opts.mpi_exec != '':
proc = Popen([opts.mpi_exec, '-np', opts.mpi_np, opts.exe, cwd],
stderr=STDOUT, stdout=PIPE)
else:
proc = Popen([opts.exe, cwd], stderr=STDOUT, stdout=PIPE)
print(proc.communicate()[0])
returncode = proc.returncode
assert returncode == 0, 'OpenMC did not exit successfully.'
def test_created_statepoint():
statepoint = glob.glob(cwd + '/statepoint.7.*')
assert len(statepoint) == 1, 'Either multiple or no statepoint files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint file is not a binary or hdf5 file.'
def test_results():
statepoint = glob.glob(cwd + '/statepoint.7.*')
call(['python', 'results.py', statepoint[0]])
compare = filecmp.cmp('results_test.dat', 'results_true.dat')
if not compare:
os.rename('results_test.dat', 'results_error.dat')
assert compare, 'Results do not agree.'
def teardown():
output = glob.glob(cwd + '/statepoint.7.*')
output.append(cwd + '/results_test.dat')
for f in output:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
# test for openmc executable
if opts.exe == None:
raise Exception('Must specify OpenMC executable from command line with --exe.')
# run tests
test_run()
test_created_statepoint()
test_results()
teardown()
|
{
"content_hash": "a380bebf82b4e05b3334aab1cd8bc0df",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 32.527272727272724,
"alnum_prop": 0.6411403018446059,
"repo_name": "bhermanmit/cdash",
"id": "1e61af4a2b734fbee1cc5fc6f3e87f25065099ee",
"size": "1812",
"binary": false,
"copies": "1",
"ref": "refs/heads/ctests",
"path": "tests/test_eigenvalue_genperbatch/test_eigenvalue_genperbatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "2289329"
},
{
"name": "Python",
"bytes": "311545"
}
],
"symlink_target": ""
}
|
__author__ = 'Douglas'
'''
A função cifraCesar(<mensagem>,<ndesloc>) retorna criptografada a mensagem passada como
parâmetro de entrada com deslocamento passado como segundo parâmetro.
Ex.: 'Aula de matemática: Capítulo nove'
Ret.: 'DXOD*GH*PDWHPÄWLFD=*FDSÐWXOR*QRYH'
'''
def cifraCesar(mensagem, ndesloc):
textoCifra = ''
mensagem = mensagem.upper()
for ch in mensagem:
if ch == ' ':
textoCifra += '*'
else:
textoCifra += chr(ord(ch) + ndesloc)
# fim if
# fim for
return textoCifra
# fim cifraCesar
'''
A função decifraCesar(<mensagem>,<ndesloc>) retorna descriptografada a mensagem passada como
parâmetro de entrada com deslocamento passado como segundo parâmetro.
Ex.: 'DXOD*GH*PDWHPÄWLFD=*FDSÐWXOR*QRYH'
Ret.: 'Aula de matemática: Capítulo nove'
'''
def decifraCesar(textoCifrado, ndesloc):
textoDecifra = ''
textoCifrado = textoCifrado.upper()
for ch in textoCifrado:
if ch == '*':
textoDecifra += ' '
else:
textoDecifra += chr(ord(ch) - ndesloc)
# fim if
# fim for
return textoDecifra.lower()
# fim decifraCesar
def cifraEspartana(mensagem):
return mensagem
# fim cifraEspartana
def decifraEspartana(mensagem):
return mensagem
# fim decifraEspartana
def criptografa(mensagem):
desloc = 13
return cifraEspartana(cifraCesar(mensagem, desloc))
# fim criptografa
def descriptografa(mensagem):
desloc = 13
return decifraCesar(decifraEspartana(mensagem), desloc)
# fim descriptorgafa
def main():
arqRead = open('cripto.txt', 'rt')
linha = arqRead.readline()
while linha != '':
print(descriptografa(criptografa(descriptografa(linha.strip()))))
linha = arqRead.readline()
# fim while
arqRead.close()
# fim main
main()
|
{
"content_hash": "3c917b2bb0097f0fcb9d1ca4826d537b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 93,
"avg_line_length": 21.0625,
"alnum_prop": 0.7222551928783383,
"repo_name": "douglasbolis/prog_2",
"id": "ff7d5d534fba1573b035ba4951fa41d13488fc77",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atividades/atividade_4/criptografia.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "96508"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
import tensorflow as tf
class DirichletTest(tf.test.TestCase):
def testSimpleShapes(self):
with self.test_session():
alpha = np.random.rand(3)
dist = tf.contrib.distributions.Dirichlet(alpha)
self.assertEqual(3, dist.event_shape().eval())
self.assertAllEqual([], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([3]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
alpha = np.random.rand(3, 2, 2)
dist = tf.contrib.distributions.Dirichlet(alpha)
self.assertEqual(2, dist.event_shape().eval())
self.assertAllEqual([3, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([2]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2]), dist.get_batch_shape())
def testAlphaProperty(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = tf.contrib.distributions.Dirichlet(alpha)
self.assertEqual([1, 3], dist.alpha.get_shape())
self.assertAllClose(alpha, dist.alpha.eval())
def testPdfXProper(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = tf.contrib.distributions.Dirichlet(alpha, validate_args=True)
dist.pdf([.1, .3, .6]).eval()
dist.pdf([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("Condition x > 0.*|Condition x < y.*"):
dist.pdf([-1., 1, 1]).eval()
with self.assertRaisesOpError("Condition x > 0.*"):
dist.pdf([0., .1, .9]).eval()
with self.assertRaisesOpError("Condition x ~= y.*"):
dist.pdf([.1, .2, .8]).eval()
def testPdfZeroBatches(self):
with self.test_session():
alpha = [1., 2]
x = [.5, .5]
dist = tf.contrib.distributions.Dirichlet(alpha)
pdf = dist.pdf(x)
self.assertAllClose(1., pdf.eval())
self.assertEqual((), pdf.get_shape())
def testPdfZeroBatchesNontrivialX(self):
with self.test_session():
alpha = [1., 2]
x = [.3, .7]
dist = tf.contrib.distributions.Dirichlet(alpha)
pdf = dist.pdf(x)
self.assertAllClose(7./5, pdf.eval())
self.assertEqual((), pdf.get_shape())
def testPdfUniformZeroBatches(self):
with self.test_session():
# Corresponds to a uniform distribution
alpha = [1., 1, 1]
x = [[.2, .5, .3], [.3, .4, .3]]
dist = tf.contrib.distributions.Dirichlet(alpha)
pdf = dist.pdf(x)
self.assertAllClose([2., 2.], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
with self.test_session():
alpha = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = tf.contrib.distributions.Dirichlet(alpha)
pdf = dist.pdf(x)
self.assertAllClose([1., 7./5], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
alpha = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = tf.contrib.distributions.Dirichlet(alpha).pdf(x)
self.assertAllClose([1., 8./5], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.test_session():
alpha = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = tf.contrib.distributions.Dirichlet(alpha).pdf(x)
self.assertAllClose([1., 3./2], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
alpha = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = tf.contrib.distributions.Dirichlet(alpha).pdf(x)
self.assertAllClose([1., 3./2], pdf.eval())
self.assertEqual((2), pdf.get_shape())
def testDirichletMean(self):
with self.test_session():
alpha = [1., 2, 3]
expected_mean = stats.dirichlet.mean(alpha)
dirichlet = tf.contrib.distributions.Dirichlet(alpha=alpha)
self.assertEqual(dirichlet.mean().get_shape(), (3,))
self.assertAllClose(dirichlet.mean().eval(), expected_mean)
def testDirichletVariance(self):
with self.test_session():
alpha = [1., 2, 3]
denominator = np.sum(alpha)**2 * (np.sum(alpha) + 1)
expected_variance = np.diag(stats.dirichlet.var(alpha))
expected_variance += [
[0., -2, -3], [-2, 0, -6], [-3, -6, 0]] / denominator
dirichlet = tf.contrib.distributions.Dirichlet(alpha=alpha)
self.assertEqual(dirichlet.variance().get_shape(), (3, 3))
self.assertAllClose(dirichlet.variance().eval(), expected_variance)
def testDirichletMode(self):
with self.test_session():
alpha = np.array([1.1, 2, 3])
expected_mode = (alpha - 1)/(np.sum(alpha) - 3)
dirichlet = tf.contrib.distributions.Dirichlet(alpha=alpha)
self.assertEqual(dirichlet.mode().get_shape(), (3,))
self.assertAllClose(dirichlet.mode().eval(), expected_mode)
def testDirichletModeInvalid(self):
with self.test_session():
alpha = np.array([1., 2, 3])
dirichlet = tf.contrib.distributions.Dirichlet(
alpha=alpha, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dirichlet.mode().eval()
def testDirichletModeEnableAllowNanStats(self):
with self.test_session():
alpha = np.array([1., 2, 3])
dirichlet = tf.contrib.distributions.Dirichlet(
alpha=alpha, allow_nan_stats=True)
expected_mode = (alpha - 1)/(np.sum(alpha) - 3)
expected_mode[0] = np.nan
self.assertEqual(dirichlet.mode().get_shape(), (3,))
self.assertAllClose(dirichlet.mode().eval(), expected_mode)
def testDirichletEntropy(self):
with self.test_session():
alpha = [1., 2, 3]
expected_entropy = stats.dirichlet.entropy(alpha)
dirichlet = tf.contrib.distributions.Dirichlet(alpha=alpha)
self.assertEqual(dirichlet.entropy().get_shape(), ())
self.assertAllClose(dirichlet.entropy().eval(), expected_entropy)
def testDirichletSample(self):
with self.test_session():
alpha = [1., 2]
dirichlet = tf.contrib.distributions.Dirichlet(alpha)
n = tf.constant(100000)
samples = dirichlet.sample_n(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 2))
self.assertTrue(np.all(sample_values > 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values[:, 0], stats.beta(a=1., b=2.).cdf)[0],
0.01)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "e666ea4baf56c7bfd413bccbd5879142",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 75,
"avg_line_length": 37.26923076923077,
"alnum_prop": 0.6240601503759399,
"repo_name": "tongwang01/tensorflow",
"id": "59a6216baaccc7e2155114bd55c445c2a1fe568d",
"size": "7472",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/kernel_tests/dirichlet_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "177722"
},
{
"name": "C++",
"bytes": "11252614"
},
{
"name": "CMake",
"bytes": "36462"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "968188"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10844"
},
{
"name": "Jupyter Notebook",
"bytes": "1974767"
},
{
"name": "Makefile",
"bytes": "21265"
},
{
"name": "Objective-C",
"bytes": "6942"
},
{
"name": "Objective-C++",
"bytes": "61636"
},
{
"name": "Protocol Buffer",
"bytes": "122032"
},
{
"name": "Python",
"bytes": "9724114"
},
{
"name": "Shell",
"bytes": "243989"
},
{
"name": "TypeScript",
"bytes": "429623"
}
],
"symlink_target": ""
}
|
'''
## License
The MIT License (MIT)
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
servo = 1
#set the dynamixel so it only returns back data for read commands.
print ("setting return status level to 1")
jetduino.dynamixelSetRegister(servo, jetduino.AX_RETURN_LEVEL, 1, 1)
jetduino.dynamixelSetRegister(servo, jetduino.AX_CW_ANGLE_LIMIT_L, 2, 0)
jetduino.dynamixelSetRegister(servo, jetduino.AX_CCW_ANGLE_LIMIT_L, 2, 1023)
jetduino.dynamixelSetEndless(servo, 0)
print ("Move to 0 at fastest speed")
jetduino.dynamixelMove(servo, 0, 0)
time.sleep(2)
print ("Move to 1023 slowly")
jetduino.dynamixelMove(servo, 1023, 200)
time.sleep(1)
print ("stop the servo at its current position.")
jetduino.dynamixelStop(servo)
time.sleep(1)
while True:
try:
print ("Moving to 1023 at 100")
jetduino.dynamixelMove(servo, 1023, 100)
for num in range(1, 30):
pos = jetduino.dynamixelGetRegister(servo, jetduino.AX_PRESENT_POSITION_L, 2)
print ("Pos: %d" % pos)
print ("Moving to 10 at 1000")
jetduino.dynamixelMove(servo, 10, 1000)
for num in range(1, 30):
pos = jetduino.dynamixelGetRegister(servo, jetduino.AX_PRESENT_POSITION_L, 2)
print ("Pos: %d" % pos)
except KeyboardInterrupt:
print("Exiting loop")
break
except IOError:
print ("Error")
print ("stopping servo.")
jetduino.dynamixelStop(servo)
|
{
"content_hash": "544ad0f9c91c0dafdc9ece593e86ea86",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 89,
"avg_line_length": 33.97435897435897,
"alnum_prop": 0.7362264150943396,
"repo_name": "NeuroRoboticTech/Jetduino",
"id": "24d066e1b5b424b32d4925dd7306709db108d67d",
"size": "3006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/Python/dynamixel_move.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "37042"
},
{
"name": "C",
"bytes": "38867"
},
{
"name": "C#",
"bytes": "33014"
},
{
"name": "C++",
"bytes": "101883"
},
{
"name": "CMake",
"bytes": "3553"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "JavaScript",
"bytes": "30142"
},
{
"name": "Python",
"bytes": "568027"
},
{
"name": "Shell",
"bytes": "17661"
}
],
"symlink_target": ""
}
|
import pytest
import sys
import cPickle as pickle
from test_base_class import TestBaseClass
import os
import shutil
import time
aerospike = pytest.importorskip("aerospike")
try:
from aerospike.exception import *
except:
print "Please install aerospike python client."
sys.exit(1)
from aerospike import predicates as p
class TestAggregate(TestBaseClass):
def setup_class(cls):
hostlist, user, password = TestBaseClass.get_hosts()
config = {
'hosts': hostlist,
'lua':{'user_path': '/tmp/',
'system_path':'../aerospike-client-c/lua/'}}
if user == None and password == None:
client = aerospike.client(config).connect()
else:
client = aerospike.client(config).connect(user, password)
TestAggregate.skip_old_server = True
versioninfo = client.info('version')
for keys in versioninfo:
for value in versioninfo[keys]:
if value != None:
versionlist = value[value.find("build") + 6:value.find("\n")].split(".")
if int(versionlist[0]) >= 3 and int(versionlist[1]) >= 6:
TestAggregate.skip_old_server = False
client.index_integer_create('test', 'demo', 'test_age',
'test_demo_test_age_idx')
client.index_integer_create('test', 'demo', 'age1', 'test_demo_age1_idx')
time.sleep(2)
filename = "stream_example.lua"
udf_type = aerospike.UDF_TYPE_LUA
status = client.udf_put(filename, udf_type)
shutil.copyfile(filename, config['lua']['user_path'] +
'stream_example.lua')
client.close()
def teardown_class(cls):
return
hostlist, user, password = TestBaseClass.get_hosts()
config = {
'hosts': hostlist,
'lua':{'user_path': '/tmp/',
'system_path':'../aerospike-client-c/lua/'}}
if user == None and password == None:
client = aerospike.client(config).connect()
else:
client = aerospike.client(config).connect(user, password)
client.index_remove('test', 'test_demo_test_age_idx')
client.index_remove('test', 'test_demo_age1_idx')
module = "stream_example.lua"
status = client.udf_remove(module)
os.remove(config['lua']['user_path'] + 'stream_example.lua')
client.close()
def setup_method(self, method):
"""
Setup method.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {
'hosts': hostlist,
'lua':{'user_path': '/tmp/',
'system_path':'../aerospike-client-c/lua/'}}
if TestBaseClass.user == None and TestBaseClass.password == None:
self.client = aerospike.client(config).connect()
else:
self.client = aerospike.client(config).connect(
TestBaseClass.user, TestBaseClass.password)
for i in xrange(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'test_age': i,
'no': i
}
self.client.put(key, rec)
def teardown_method(self, method):
"""
Teardown method.
"""
for i in xrange(5):
key = ('test', 'demo', i)
#self.client.remove(key)
self.client.close()
def test_aggregate_with_no_parameters(self):
"""
Invoke aggregate() without any mandatory parameters.
"""
try:
query = self.client.query()
query.select()
query.where()
except ParamError as exception:
assert exception.code == -2
assert exception.msg == 'query() expects atleast 1 parameter'
#assert "where() takes at least 1 argument (0 given)" in typeError.value
def test_aggregate_no_sec_index(self):
"""
Invoke aggregate() with no secondary index
"""
try:
query = self.client.query('test', 'demo')
query.select('name', 'no')
query.where(p.between('no', 1, 5))
query.apply('stream_example', 'count')
result = None
def user_callback(value):
result = value
query.foreach(user_callback)
except IndexNotFound as exception:
assert exception.code == 201L
assert exception.msg == 'AEROSPIKE_ERR_INDEX_NOT_FOUND'
def test_aggregate_with_incorrect_ns_set(self):
"""
Invoke aggregate() with incorrect ns and set
"""
try:
query = self.client.query('test1', 'demo1')
query.select('name', 'test_age')
query.where(p.equals('test_age', 1))
query.apply('stream_example', 'count')
result = 1
def user_callback(value):
result = value
query.foreach(user_callback)
except InvalidRequest as exception:
assert exception.code == 4L
assert exception.msg == 'AEROSPIKE_ERR_REQUEST_INVALID'
#@pytest.mark.xfail(reason="C client incorrectly sent status AEROSPIKE_ERR_UDF")
def test_aggregate_with_where_incorrect(self):
"""
Invoke aggregate() with where is incorrect
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.equals('test_age', 165))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records == []
def test_aggregate_with_where_none_value(self):
"""
Invoke aggregate() with where is null value
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
try:
query.where(p.equals('test_age', None))
query.apply('stream_example', 'count')
result = 1
def user_callback(value):
result = value
query.foreach(user_callback)
except ParamError as exception:
assert exception.code == -2L
assert exception.msg == 'predicate is invalid.'
#@pytest.mark.xfail(reason="C client incorrectly sent status AEROSPIKE_ERR_UDF")
def test_aggregate_with_where_bool_value(self):
"""
Invoke aggregate() with where is bool value
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', True, True))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 1
def test_aggregate_with_where_equals_value(self):
"""
Invoke aggregate() with where is equal
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.equals('test_age', 2))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 1
def test_aggregate_with_empty_module_function(self):
"""
Invoke aggregate() with empty module and function
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('', '')
result = None
def user_callback(value):
result = value
query.foreach(user_callback)
assert result == None
"""
def test_aggregate_with_incorrect_module(self):
#Invoke aggregate() with incorrect module
try:
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('streamwrong', 'count')
result = None
def user_callback(value):
result = value
query.foreach(user_callback)
except ClientError as exception:
assert exception.code == -1L
assert exception.msg == 'UDF: Execution Error 1'
def test_aggregate_with_incorrect_function(self):
#Invoke aggregate() with incorrect function
try:
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'countno')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
except ClientError as exception:
assert exception.code == -1L
assert exception.msg == 'UDF: Execution Error 2 : function not found'
"""
def test_aggregate_with_correct_parameters(self):
"""
Invoke aggregate() with correct arguments
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 4
def test_aggregate_with_policy(self):
"""
Invoke aggregate() with policy
"""
policy = {'timeout': 1000}
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback, policy)
assert records[0] == 4
def test_aggregate_with_extra_parameter(self):
"""
Invoke aggregate() with extra parameter
"""
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
result = None
def user_callback(value):
result = value
query.foreach(user_callback, policy, "")
assert "foreach() takes at most 2 arguments (3 given)" in typeError.value
def test_aggregate_with_extra_parameters_to_lua(self):
"""
Invoke aggregate() with extra arguments
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
stream = None
query.apply('stream_example', 'count', [stream])
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 4
def test_aggregate_with_extra_parameter_in_lua(self):
"""
Invoke aggregate() with extra parameter in lua
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count_extra')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 4
"""
def test_aggregate_with_less_parameter_in_lua(self):
#Invoke aggregate() with less parameter in lua
try:
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count_less')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
except ClientError as exception:
assert exception.code == -1L
"""
def test_aggregate_with_arguments_to_lua_function(self):
"""
Invoke aggregate() with unicode arguments to lua function.
"""
query = self.client.query('test', 'demo')
query.where(p.between('test_age', 0, 5))
query.apply('stream_example', 'group_count', [u"name", u"addr"])
rec = []
def callback(value):
rec.append(value)
query.foreach(callback)
assert rec == [
{u'name4': 1,
u'name2': 1,
u'name3': 1,
u'name0': 1,
u'name1': 1}
]
def test_aggregate_with_arguments_to_lua_function_having_float_value(self):
"""
Invoke aggregate() with unicode arguments to lua function having a
float value
"""
if TestAggregate.skip_old_server == True:
pytest.skip("Server does not support aggregate on float type as lua argument")
query = self.client.query('test', 'demo')
query.where(p.between('test_age', 0, 5))
query.apply('stream_example', 'double_group_count', [u"name", u"addr", 2.5])
rec = []
def callback(value):
rec.append(value)
query.foreach(callback)
assert rec == [
{u'name4': 3.5,
u'name2': 3.5,
u'name3': 3.5,
u'name0': 3.5,
u'name1': 3.5}
]
def test_aggregate_with_unicode_module_and_function_name(self):
"""
Invoke aggregate() with unicode module and function names
"""
query = self.client.query('test', 'demo')
query.select(u'name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply(u'stream_example', u'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 4
def test_aggregate_with_multiple_foreach_on_same_query_object(self):
"""
Invoke aggregate() with multiple foreach on same query object.
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
assert records[0] == 4
records = []
query.foreach(user_callback)
assert records[0] == 4
def test_aggregate_with_multiple_results_call_on_same_query_object(self):
"""
Invoke aggregate() with multiple foreach on same query object.
"""
query = self.client.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
records = []
records = query.results()
assert records[0] == 4
records = []
records = query.results()
assert records[0] == 4
def test_aggregate_with_correct_parameters_without_connection(self):
"""
Invoke aggregate() with correct arguments without connection
"""
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
try:
query = client1.query('test', 'demo')
query.select('name', 'test_age')
query.where(p.between('test_age', 1, 5))
query.apply('stream_example', 'count')
records = []
def user_callback(value):
records.append(value)
query.foreach(user_callback)
except ClusterError as exception:
assert exception.code == 11L
assert exception.msg == 'No connection to aerospike cluster'
|
{
"content_hash": "ee4627579ec047cdfb7fd34c26768e30",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 92,
"avg_line_length": 31.448742746615086,
"alnum_prop": 0.5456055107940218,
"repo_name": "trupty/aerospike-client-python",
"id": "cb64a90f751ada01564e17e0f1650727767f086a",
"size": "16284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_aggregate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "649569"
},
{
"name": "Lua",
"bytes": "6124"
},
{
"name": "Python",
"bytes": "524649"
},
{
"name": "Shell",
"bytes": "13148"
}
],
"symlink_target": ""
}
|
"""
Abstraction for an SSH2 channel.
"""
import binascii
import sys
import time
import threading
import socket
import os
from paramiko.common import *
from paramiko import util
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
from paramiko.file import BufferedFile
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
# lower bound on the max packet size we'll accept from the remote host
MIN_PACKET_SIZE = 1024
class Channel (object):
"""
A secure tunnel across an SSH L{Transport}. A Channel is meant to behave
like a socket, and has an API that should be indistinguishable from the
python socket API.
Because SSH2 has a windowing kind of flow control, if you stop reading data
from a Channel and its buffer fills up, the server will be unable to send
you any more data until you read some of it. (This won't affect other
channels on the same transport -- all channels on a single transport are
flow-controlled independently.) Similarly, if the server isn't reading
data you send, calls to L{send} may block, unless you set a timeout. This
is exactly like a normal network socket, so it shouldn't be too surprising.
"""
def __init__(self, chanid):
"""
Create a new channel. The channel is not associated with any
particular session or L{Transport} until the Transport attaches it.
Normally you would only call this method from the constructor of a
subclass of L{Channel}.
@param chanid: the ID of this channel, as passed by an existing
L{Transport}.
@type chanid: int
"""
self.chanid = chanid
self.remote_chanid = 0
self.transport = None
self.active = False
self.eof_received = 0
self.eof_sent = 0
self.in_buffer = BufferedPipe()
self.in_stderr_buffer = BufferedPipe()
self.timeout = None
self.closed = False
self.ultra_debug = False
self.lock = threading.Lock()
self.out_buffer_cv = threading.Condition(self.lock)
self.in_window_size = 0
self.out_window_size = 0
self.in_max_packet_size = 0
self.out_max_packet_size = 0
self.in_window_threshold = 0
self.in_window_sofar = 0
self.status_event = threading.Event()
self._name = str(chanid)
self.logger = util.get_logger('paramiko.transport')
self._pipe = None
self.event = threading.Event()
self.event_ready = False
self.combine_stderr = False
self.exit_status = -1
self.origin_addr = None
def __del__(self):
try:
self.close()
except:
pass
def __repr__(self):
"""
Return a string representation of this object, for debugging.
@rtype: str
"""
out = '<paramiko.Channel %d' % self.chanid
if self.closed:
out += ' (closed)'
elif self.active:
if self.eof_received:
out += ' (EOF received)'
if self.eof_sent:
out += ' (EOF sent)'
out += ' (open) window=%d' % (self.out_window_size)
if len(self.in_buffer) > 0:
out += ' in-buffer=%d' % (len(self.in_buffer),)
out += ' -> ' + repr(self.transport)
out += '>'
return out
def get_pty(self, term='vt100', width=80, height=24, width_pixels=0,
height_pixels=0):
"""
Request a pseudo-terminal from the server. This is usually used right
after creating a client channel, to ask the server to provide some
basic terminal semantics for a shell invoked with L{invoke_shell}.
It isn't necessary (or desirable) to call this method if you're going
to exectue a single command with L{exec_command}.
@param term: the terminal type to emulate (for example, C{'vt100'})
@type term: str
@param width: width (in characters) of the terminal screen
@type width: int
@param height: height (in characters) of the terminal screen
@type height: int
@param width_pixels: width (in pixels) of the terminal screen
@type width_pixels: int
@param height_pixels: height (in pixels) of the terminal screen
@type height_pixels: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('pty-req')
m.add_boolean(True)
m.add_string(term)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
m.add_string('')
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def invoke_shell(self):
"""
Request an interactive shell session on this channel. If the server
allows it, the channel will then be directly connected to the stdin,
stdout, and stderr of the shell.
Normally you would call L{get_pty} before this, in which case the
shell will operate through the pty, and the channel will be connected
to the stdin and stdout of the pty.
When the shell exits, the channel will be closed and can't be reused.
You must open a new channel if you wish to open another shell.
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('shell')
m.add_boolean(1)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def exec_command(self, command):
"""
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
@param command: a shell command to execute.
@type command: str
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exec')
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def invoke_subsystem(self, subsystem):
"""
Request a subsystem on the server (for example, C{sftp}). If the
server allows it, the channel will then be directly connected to the
requested subsystem.
When the subsystem finishes, the channel will be closed and can't be
reused.
@param subsystem: name of the subsystem being requested.
@type subsystem: str
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('subsystem')
m.add_boolean(True)
m.add_string(subsystem)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0):
"""
Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@param width_pixels: new width (in pixels) of the terminal screen
@type width_pixels: int
@param height_pixels: new height (in pixels) of the terminal screen
@type height_pixels: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('window-change')
m.add_boolean(False)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
self.transport._send_user_message(m)
def exit_status_ready(self):
"""
Return true if the remote process has exited and returned an exit
status. You may use this to poll the process status if you don't
want to block in L{recv_exit_status}. Note that the server may not
return an exit status in some cases (like bad servers).
@return: True if L{recv_exit_status} will return immediately
@rtype: bool
@since: 1.7.3
"""
return self.closed or self.status_event.isSet()
def recv_exit_status(self):
"""
Return the exit status from the process on the server. This is
mostly useful for retrieving the reults of an L{exec_command}.
If the command hasn't finished yet, this method will wait until
it does, or until the channel is closed. If no exit status is
provided by the server, -1 is returned.
@return: the exit code of the process on the server.
@rtype: int
@since: 1.2
"""
self.status_event.wait()
assert self.status_event.isSet()
return self.exit_status
def send_exit_status(self, status):
"""
Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
@param status: the exit code of the process
@type status: int
@since: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exit-status')
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m)
def request_x11(self, screen_number=0, auth_protocol=None, auth_cookie=None,
single_connection=False, handler=None):
"""
Request an x11 session on this channel. If the server allows it,
further x11 requests can be made from the server to the client,
when an x11 application is run in a shell session.
From RFC4254::
It is RECOMMENDED that the 'x11 authentication cookie' that is
sent be a fake, random cookie, and that the cookie be checked and
replaced by the real cookie when a connection request is received.
If you omit the auth_cookie, a new secure random 128-bit value will be
generated, used, and returned. You will need to use this value to
verify incoming x11 requests and replace them with the actual local
x11 cookie (which requires some knoweldge of the x11 protocol).
If a handler is passed in, the handler is called from another thread
whenever a new x11 connection arrives. The default handler queues up
incoming x11 connections, which may be retrieved using
L{Transport.accept}. The handler's calling signature is::
handler(channel: Channel, (address: str, port: int))
@param screen_number: the x11 screen number (0, 10, etc)
@type screen_number: int
@param auth_protocol: the name of the X11 authentication method used;
if none is given, C{"MIT-MAGIC-COOKIE-1"} is used
@type auth_protocol: str
@param auth_cookie: hexadecimal string containing the x11 auth cookie;
if none is given, a secure random 128-bit value is generated
@type auth_cookie: str
@param single_connection: if True, only a single x11 connection will be
forwarded (by default, any number of x11 connections can arrive
over this session)
@type single_connection: bool
@param handler: an optional handler to use for incoming X11 connections
@type handler: function
@return: the auth_cookie used
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
if auth_protocol is None:
auth_protocol = 'MIT-MAGIC-COOKIE-1'
if auth_cookie is None:
auth_cookie = binascii.hexlify(self.transport.rng.read(16))
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('x11-req')
m.add_boolean(True)
m.add_boolean(single_connection)
m.add_string(auth_protocol)
m.add_string(auth_cookie)
m.add_int(screen_number)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
self.transport._set_x11_handler(handler)
return auth_cookie
def request_forward_agent(self, handler):
"""
Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from openssh !!!
@param handler: a required handler to use for incoming SSH Agent connections
@type handler: function
@return: if we are ok or not (at that time we always return ok)
@rtype: boolean
@raise: SSHException in case of channel problem.
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('auth-agent-req@openssh.com')
m.add_boolean(False)
self.transport._send_user_message(m)
self.transport._set_forward_agent_handler(handler)
return True
def get_transport(self):
"""
Return the L{Transport} associated with this channel.
@return: the L{Transport} that was used to create this channel.
@rtype: L{Transport}
"""
return self.transport
def set_name(self, name):
"""
Set a name for this channel. Currently it's only used to set the name
of the channel in logfile entries. The name can be fetched with the
L{get_name} method.
@param name: new channel name
@type name: str
"""
self._name = name
def get_name(self):
"""
Get the name of this channel that was previously set by L{set_name}.
@return: the name of this channel.
@rtype: str
"""
return self._name
def get_id(self):
"""
Return the ID # for this channel. The channel ID is unique across
a L{Transport} and usually a small number. It's also the number
passed to L{ServerInterface.check_channel_request} when determining
whether to accept a channel request in server mode.
@return: the ID of this channel.
@rtype: int
"""
return self.chanid
def set_combine_stderr(self, combine):
"""
Set whether stderr should be combined into stdout on this channel.
The default is C{False}, but in some cases it may be convenient to
have both streams combined.
If this is C{False}, and L{exec_command} is called (or C{invoke_shell}
with no pty), output to stderr will not show up through the L{recv}
and L{recv_ready} calls. You will have to use L{recv_stderr} and
L{recv_stderr_ready} to get stderr output.
If this is C{True}, data will never show up via L{recv_stderr} or
L{recv_stderr_ready}.
@param combine: C{True} if stderr output should be combined into
stdout on this channel.
@type combine: bool
@return: previous setting.
@rtype: bool
@since: 1.1
"""
data = ''
self.lock.acquire()
try:
old = self.combine_stderr
self.combine_stderr = combine
if combine and not old:
# copy old stderr buffer into primary buffer
data = self.in_stderr_buffer.empty()
finally:
self.lock.release()
if len(data) > 0:
self._feed(data)
return old
### socket API
def settimeout(self, timeout):
"""
Set a timeout on blocking read/write operations. The C{timeout}
argument can be a nonnegative float expressing seconds, or C{None}. If
a float is given, subsequent channel read/write operations will raise
a timeout exception if the timeout period value has elapsed before the
operation has completed. Setting a timeout of C{None} disables
timeouts on socket operations.
C{chan.settimeout(0.0)} is equivalent to C{chan.setblocking(0)};
C{chan.settimeout(None)} is equivalent to C{chan.setblocking(1)}.
@param timeout: seconds to wait for a pending read/write operation
before raising C{socket.timeout}, or C{None} for no timeout.
@type timeout: float
"""
self.timeout = timeout
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with socket
operations, or C{None} if no timeout is set. This reflects the last
call to L{setblocking} or L{settimeout}.
@return: timeout in seconds, or C{None}.
@rtype: float
"""
return self.timeout
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode of the channel: if C{blocking} is 0,
the channel is set to non-blocking mode; otherwise it's set to blocking
mode. Initially all channels are in blocking mode.
In non-blocking mode, if a L{recv} call doesn't find any data, or if a
L{send} call can't immediately dispose of the data, an error exception
is raised. In blocking mode, the calls block until they can proceed. An
EOF condition is considered "immediate data" for L{recv}, so if the
channel is closed in the read direction, it will never block.
C{chan.setblocking(0)} is equivalent to C{chan.settimeout(0)};
C{chan.setblocking(1)} is equivalent to C{chan.settimeout(None)}.
@param blocking: 0 to set non-blocking mode; non-0 to set blocking
mode.
@type blocking: int
"""
if blocking:
self.settimeout(None)
else:
self.settimeout(0.0)
def getpeername(self):
"""
Return the address of the remote side of this Channel, if possible.
This is just a wrapper around C{'getpeername'} on the Transport, used
to provide enough of a socket-like interface to allow asyncore to work.
(asyncore likes to call C{'getpeername'}.)
@return: the address if the remote host, if known
@rtype: tuple(str, int)
"""
return self.transport.getpeername()
def close(self):
"""
Close the channel. All future read/write operations on the channel
will fail. The remote end will receive no more data (after queued data
is flushed). Channels are automatically closed when their L{Transport}
is closed or when they are garbage collected.
"""
self.lock.acquire()
try:
# only close the pipe when the user explicitly closes the channel.
# otherwise they will get unpleasant surprises. (and do it before
# checking self.closed, since the remote host may have already
# closed the connection.)
if self._pipe is not None:
self._pipe.close()
self._pipe = None
if not self.active or self.closed:
return
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def recv_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel. A C{False} result does not mean that the channel has closed;
it means you may need to wait before more data arrives.
@return: C{True} if a L{recv} call on this channel would immediately
return at least one byte; C{False} otherwise.
@rtype: boolean
"""
return self.in_buffer.read_ready()
def recv(self, nbytes):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by C{nbytes}. If a string of length zero
is returned, the channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}.
"""
try:
out = self.in_buffer.read(nbytes, self.timeout)
except PipeTimeout, e:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def recv_stderr_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel's stderr stream. Only channels using L{exec_command} or
L{invoke_shell} without a pty will ever have data on the stderr
stream.
@return: C{True} if a L{recv_stderr} call on this channel would
immediately return at least one byte; C{False} otherwise.
@rtype: boolean
@since: 1.1
"""
return self.in_stderr_buffer.read_ready()
def recv_stderr(self, nbytes):
"""
Receive data from the channel's stderr stream. Only channels using
L{exec_command} or L{invoke_shell} without a pty will ever have data
on the stderr stream. The return value is a string representing the
data received. The maximum amount of data to be received at once is
specified by C{nbytes}. If a string of length zero is returned, the
channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}.
@since: 1.1
"""
try:
out = self.in_stderr_buffer.read(nbytes, self.timeout)
except PipeTimeout, e:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def send_ready(self):
"""
Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a L{send} call will succeed immediately and return
the number of bytes actually written.
@return: C{True} if a L{send} call on this channel would immediately
succeed or fail
@rtype: boolean
"""
self.lock.acquire()
try:
if self.closed or self.eof_sent:
return True
return self.out_window_size > 0
finally:
self.lock.release()
def send(self, s):
"""
Send data to the channel. Returns the number of bytes sent, or 0 if
the channel stream is closed. Applications are responsible for
checking that all data has been sent: if only some of the data was
transmitted, the application needs to attempt delivery of the remaining
data.
@param s: data to send
@type s: str
@return: number of bytes actually sent
@rtype: int
@raise socket.timeout: if no data could be sent before the timeout set
by L{settimeout}.
"""
size = len(s)
self.lock.acquire()
try:
size = self._wait_for_send_window(size)
if size == 0:
# eof or similar
return 0
m = Message()
m.add_byte(chr(MSG_CHANNEL_DATA))
m.add_int(self.remote_chanid)
m.add_string(s[:size])
finally:
self.lock.release()
# Note: We release self.lock before calling _send_user_message.
# Otherwise, we can deadlock during re-keying.
self.transport._send_user_message(m)
return size
def send_stderr(self, s):
"""
Send data to the channel on the "stderr" stream. This is normally
only used by servers to send output from shell commands -- clients
won't use this. Returns the number of bytes sent, or 0 if the channel
stream is closed. Applications are responsible for checking that all
data has been sent: if only some of the data was transmitted, the
application needs to attempt delivery of the remaining data.
@param s: data to send.
@type s: str
@return: number of bytes actually sent.
@rtype: int
@raise socket.timeout: if no data could be sent before the timeout set
by L{settimeout}.
@since: 1.1
"""
size = len(s)
self.lock.acquire()
try:
size = self._wait_for_send_window(size)
if size == 0:
# eof or similar
return 0
m = Message()
m.add_byte(chr(MSG_CHANNEL_EXTENDED_DATA))
m.add_int(self.remote_chanid)
m.add_int(1)
m.add_string(s[:size])
finally:
self.lock.release()
# Note: We release self.lock before calling _send_user_message.
# Otherwise, we can deadlock during re-keying.
self.transport._send_user_message(m)
return size
def sendall(self, s):
"""
Send data to the channel, without allowing partial results. Unlike
L{send}, this method continues to send data from the given string until
either all data has been sent or an error occurs. Nothing is returned.
@param s: data to send.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@note: If the channel is closed while only part of the data hase been
sent, there is no way to determine how much data (if any) was sent.
This is irritating, but identically follows python's API.
"""
while s:
if self.closed:
# this doesn't seem useful, but it is the documented behavior of Socket
raise socket.error('Socket is closed')
sent = self.send(s)
s = s[sent:]
return None
def sendall_stderr(self, s):
"""
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1
"""
while s:
if self.closed:
raise socket.error('Socket is closed')
sent = self.send_stderr(s)
s = s[sent:]
return None
def makefile(self, *params):
"""
Return a file-like object associated with this channel. The optional
C{mode} and C{bufsize} arguments are interpreted the same way as by
the built-in C{file()} function in python.
@return: object which can be used for python file I/O.
@rtype: L{ChannelFile}
"""
return ChannelFile(*([self] + list(params)))
def makefile_stderr(self, *params):
"""
Return a file-like object associated with this channel's stderr
stream. Only channels using L{exec_command} or L{invoke_shell}
without a pty will ever have data on the stderr stream.
The optional C{mode} and C{bufsize} arguments are interpreted the
same way as by the built-in C{file()} function in python. For a
client, it only makes sense to open this file for reading. For a
server, it only makes sense to open this file for writing.
@return: object which can be used for python file I/O.
@rtype: L{ChannelFile}
@since: 1.1
"""
return ChannelStderrFile(*([self] + list(params)))
def fileno(self):
"""
Returns an OS-level file descriptor which can be used for polling, but
but I{not} for reading or writing. This is primaily to allow python's
C{select} module to work.
The first time C{fileno} is called on a channel, a pipe is created to
simulate real OS-level file descriptor (FD) behavior. Because of this,
two OS-level FDs are created, which will use up FDs faster than normal.
(You won't notice this effect unless you have hundreds of channels
open at the same time.)
@return: an OS-level file descriptor
@rtype: int
@warning: This method causes channel reads to be slightly less
efficient.
"""
self.lock.acquire()
try:
if self._pipe is not None:
return self._pipe.fileno()
# create the pipe and feed in any existing data
self._pipe = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(self._pipe)
self.in_buffer.set_event(p1)
self.in_stderr_buffer.set_event(p2)
return self._pipe.fileno()
finally:
self.lock.release()
def shutdown(self, how):
"""
Shut down one or both halves of the connection. If C{how} is 0,
further receives are disallowed. If C{how} is 1, further sends
are disallowed. If C{how} is 2, further sends and receives are
disallowed. This closes the stream in one or both directions.
@param how: 0 (stop receiving), 1 (stop sending), or 2 (stop
receiving and sending).
@type how: int
"""
if (how == 0) or (how == 2):
# feign "read" shutdown
self.eof_received = 1
if (how == 1) or (how == 2):
self.lock.acquire()
try:
m = self._send_eof()
finally:
self.lock.release()
if m is not None:
self.transport._send_user_message(m)
def shutdown_read(self):
"""
Shutdown the receiving side of this socket, closing the stream in
the incoming direction. After this call, future reads on this
channel will fail instantly. This is a convenience method, equivalent
to C{shutdown(0)}, for people who don't make it a habit to
memorize unix constants from the 1970s.
@since: 1.2
"""
self.shutdown(0)
def shutdown_write(self):
"""
Shutdown the sending side of this socket, closing the stream in
the outgoing direction. After this call, future writes on this
channel will fail instantly. This is a convenience method, equivalent
to C{shutdown(1)}, for people who don't make it a habit to
memorize unix constants from the 1970s.
@since: 1.2
"""
self.shutdown(1)
### calls from Transport
def _set_transport(self, transport):
self.transport = transport
self.logger = util.get_logger(self.transport.get_log_channel())
def _set_window(self, window_size, max_packet_size):
self.in_window_size = window_size
self.in_max_packet_size = max_packet_size
# threshold of bytes we receive before we bother to send a window update
self.in_window_threshold = window_size // 10
self.in_window_sofar = 0
self._log(DEBUG, 'Max packet in: %d bytes' % max_packet_size)
def _set_remote_channel(self, chanid, window_size, max_packet_size):
self.remote_chanid = chanid
self.out_window_size = window_size
self.out_max_packet_size = max(max_packet_size, MIN_PACKET_SIZE)
self.active = 1
self._log(DEBUG, 'Max packet out: %d bytes' % max_packet_size)
def _request_success(self, m):
self._log(DEBUG, 'Sesch channel %d request ok' % self.chanid)
self.event_ready = True
self.event.set()
return
def _request_failed(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def _feed(self, m):
if type(m) is str:
# passed from _feed_extended
s = m
else:
s = m.get_string()
self.in_buffer.feed(s)
def _feed_extended(self, m):
code = m.get_int()
s = m.get_string()
if code != 1:
self._log(ERROR, 'unknown extended_data type %d; discarding' % code)
return
if self.combine_stderr:
self._feed(s)
else:
self.in_stderr_buffer.feed(s)
def _window_adjust(self, m):
nbytes = m.get_int()
self.lock.acquire()
try:
if self.ultra_debug:
self._log(DEBUG, 'window up %d' % nbytes)
self.out_window_size += nbytes
self.out_buffer_cv.notifyAll()
finally:
self.lock.release()
def _handle_request(self, m):
key = m.get_string()
want_reply = m.get_boolean()
server = self.transport.server_object
ok = False
if key == 'exit-status':
self.exit_status = m.get_int()
self.status_event.set()
ok = True
elif key == 'xon-xoff':
# ignore
ok = True
elif key == 'pty-req':
term = m.get_string()
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
modes = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_pty_request(self, term, width, height, pixelwidth,
pixelheight, modes)
elif key == 'shell':
if server is None:
ok = False
else:
ok = server.check_channel_shell_request(self)
elif key == 'env':
name = m.get_string()
value = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_env_request(self, name, value)
elif key == 'exec':
cmd = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_exec_request(self, cmd)
elif key == 'subsystem':
name = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_subsystem_request(self, name)
elif key == 'window-change':
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_window_change_request(self, width, height, pixelwidth,
pixelheight)
elif key == 'x11-req':
single_connection = m.get_boolean()
auth_proto = m.get_string()
auth_cookie = m.get_string()
screen_number = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_x11_request(self, single_connection,
auth_proto, auth_cookie, screen_number)
elif key == 'auth-agent-req@openssh.com':
if server is None:
ok = False
else:
ok = server.check_channel_forward_agent_request(self)
else:
self._log(DEBUG, 'Unhandled channel request "%s"' % key)
ok = False
if want_reply:
m = Message()
if ok:
m.add_byte(chr(MSG_CHANNEL_SUCCESS))
else:
m.add_byte(chr(MSG_CHANNEL_FAILURE))
m.add_int(self.remote_chanid)
self.transport._send_user_message(m)
def _handle_eof(self, m):
self.lock.acquire()
try:
if not self.eof_received:
self.eof_received = True
self.in_buffer.close()
self.in_stderr_buffer.close()
if self._pipe is not None:
self._pipe.set_forever()
finally:
self.lock.release()
self._log(DEBUG, 'EOF received (%s)', self._name)
def _handle_close(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
### internals...
def _log(self, level, msg, *args):
self.logger.log(level, "[chan " + self._name + "] " + msg, *args)
def _event_pending(self):
self.event.clear()
self.event_ready = False
def _wait_for_event(self):
self.event.wait()
assert self.event.isSet()
if self.event_ready:
return
e = self.transport.get_exception()
if e is None:
e = SSHException('Channel closed.')
raise e
def _set_closed(self):
# you are holding the lock.
self.closed = True
self.in_buffer.close()
self.in_stderr_buffer.close()
self.out_buffer_cv.notifyAll()
# Notify any waiters that we are closed
self.event.set()
self.status_event.set()
if self._pipe is not None:
self._pipe.set_forever()
def _send_eof(self):
# you are holding the lock.
if self.eof_sent:
return None
m = Message()
m.add_byte(chr(MSG_CHANNEL_EOF))
m.add_int(self.remote_chanid)
self.eof_sent = True
self._log(DEBUG, 'EOF sent (%s)', self._name)
return m
def _close_internal(self):
# you are holding the lock.
if not self.active or self.closed:
return None, None
m1 = self._send_eof()
m2 = Message()
m2.add_byte(chr(MSG_CHANNEL_CLOSE))
m2.add_int(self.remote_chanid)
self._set_closed()
# can't unlink from the Transport yet -- the remote side may still
# try to send meta-data (exit-status, etc)
return m1, m2
def _unlink(self):
# server connection could die before we become active: still signal the close!
if self.closed:
return
self.lock.acquire()
try:
self._set_closed()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
def _check_add_window(self, n):
self.lock.acquire()
try:
if self.closed or self.eof_received or not self.active:
return 0
if self.ultra_debug:
self._log(DEBUG, 'addwindow %d' % n)
self.in_window_sofar += n
if self.in_window_sofar <= self.in_window_threshold:
return 0
if self.ultra_debug:
self._log(DEBUG, 'addwindow send %d' % self.in_window_sofar)
out = self.in_window_sofar
self.in_window_sofar = 0
return out
finally:
self.lock.release()
def _wait_for_send_window(self, size):
"""
(You are already holding the lock.)
Wait for the send window to open up, and allocate up to C{size} bytes
for transmission. If no space opens up before the timeout, a timeout
exception is raised. Returns the number of bytes available to send
(may be less than requested).
"""
# you are already holding the lock
if self.closed or self.eof_sent:
return 0
if self.out_window_size == 0:
# should we block?
if self.timeout == 0.0:
raise socket.timeout()
# loop here in case we get woken up but a different thread has filled the buffer
timeout = self.timeout
while self.out_window_size == 0:
if self.closed or self.eof_sent:
return 0
then = time.time()
self.out_buffer_cv.wait(timeout)
if timeout != None:
timeout -= time.time() - then
if timeout <= 0.0:
raise socket.timeout()
# we have some window to squeeze into
if self.closed or self.eof_sent:
return 0
if self.out_window_size < size:
size = self.out_window_size
if self.out_max_packet_size - 64 < size:
size = self.out_max_packet_size - 64
self.out_window_size -= size
if self.ultra_debug:
self._log(DEBUG, 'window down to %d' % self.out_window_size)
return size
class ChannelFile (BufferedFile):
"""
A file-like wrapper around L{Channel}. A ChannelFile is created by calling
L{Channel.makefile}.
@bug: To correctly emulate the file object created from a socket's
C{makefile} method, a L{Channel} and its C{ChannelFile} should be able
to be closed or garbage-collected independently. Currently, closing
the C{ChannelFile} does nothing but flush the buffer.
"""
def __init__(self, channel, mode = 'r', bufsize = -1):
self.channel = channel
BufferedFile.__init__(self)
self._set_mode(mode, bufsize)
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
return '<paramiko.ChannelFile from ' + repr(self.channel) + '>'
def _read(self, size):
return self.channel.recv(size)
def _write(self, data):
self.channel.sendall(data)
return len(data)
class ChannelStderrFile (ChannelFile):
def __init__(self, channel, mode = 'r', bufsize = -1):
ChannelFile.__init__(self, channel, mode, bufsize)
def _read(self, size):
return self.channel.recv_stderr(size)
def _write(self, data):
self.channel.sendall_stderr(data)
return len(data)
# vim: set shiftwidth=4 expandtab :
|
{
"content_hash": "e3628d2c3d7c30eeb243e8e8b9659df3",
"timestamp": "",
"source": "github",
"line_count": 1261,
"max_line_length": 96,
"avg_line_length": 36.45915939730373,
"alnum_prop": 0.5794453507340946,
"repo_name": "midma101/AndIWasJustGoingToBed",
"id": "d1e6333c99bde4f7085b3bdbb5f018077ffbd559",
"size": "46791",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/paramiko/channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29472"
},
{
"name": "JavaScript",
"bytes": "3277638"
},
{
"name": "PHP",
"bytes": "4548"
},
{
"name": "Python",
"bytes": "15564"
}
],
"symlink_target": ""
}
|
print("Loading tensorflow...")
import tensorflow as tf
from libs import utils, gif
# dja
import numpy as np
import matplotlib.pyplot as plt
import os
plt.style.use('bmh')
import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(5, 5))
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
def wait(n):
#plt.pause(n)
plt.pause(1)
#input("(press enter)")
fncontent="anu455.jpg"
#fnstyle="WP_000478.jpg"
fnstyle="Sharp_Scientific_Calculator_480x800.jpg"
#fncontent=os.path.expanduser("~/fot2.jpg")
#fnstyle="letters-beige.jpg"
# OJO! 500 MB
from libs import vgg16
print("DOWNLOADING VGG16")
net = vgg16.get_vgg_model()
g = tf.Graph()
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
tf.import_graph_def(net['graph_def'], name='vgg')
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
softmax = g.get_tensor_by_name(names[-2] + ':0')
#from skimage.data import coffee
#og = coffee()
og=plt.imread(fncontent)
#plt.imshow(og)
print("IMAGE CONTENT: ", fncontent)
wait(3)
img = vgg16.preprocess(og, dsize=(448,448))
#plt.imshow(vgg16.deprocess(img))
#wait(3)
img_4d = img[np.newaxis]
#
# Dropout
#
#
# Defining the Content Features
#
# DEFINES content_layer!
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_layer = 'vgg/conv4_2/conv4_2:0'
content_features = g.get_tensor_by_name(content_layer).eval(
session=sess,
feed_dict={x: img_4d,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]
})
print("content_features.shape: ", content_features.shape)
#
# Defining the Style Features
#
# Note: Unlike in the lecture, I've cropped the image a bit as the
# borders took over too much...
style_og = plt.imread(fnstyle)##[15:-15, 190:-190, :]
#plt.title(fnstyle)
#plt.imshow(style_og)
print("IMAGE STYLE: ", fnstyle)
wait(3)
style_img = vgg16.preprocess(style_og, dsize=(448,448))
style_img_4d = style_img[np.newaxis]
style_layers = ['vgg/conv1_1/conv1_1:0',
'vgg/conv2_1/conv2_1:0',
'vgg/conv3_1/conv3_1:0',
'vgg/conv4_1/conv4_1:0',
'vgg/conv5_1/conv5_1:0']
style_activations = []
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
for style_i in style_layers:
style_activation_i = g.get_tensor_by_name(style_i).eval(
feed_dict={
x: style_img_4d,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]})
style_activations.append(style_activation_i)
style_features = []
for style_activation_i in style_activations:
s_i = np.reshape(style_activation_i, [-1, style_activation_i.shape[-1]])
gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
style_features.append(gram_matrix.astype(np.float32))
#
# Remapping the Input
#
tf.reset_default_graph()
g = tf.Graph()
net = vgg16.get_vgg_model()
# net_input = tf.get_variable(
# name='input',
# shape=(1, 224, 224, 3),
# dtype=tf.float32,
# initializer=tf.random_normal_initializer(
# mean=np.mean(img), stddev=np.std(img)))
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
net_input = tf.Variable(img_4d)
tf.import_graph_def(
net['graph_def'],
name='vgg',
input_map={'images:0': net_input})
# Let's take a look at the graph now:
names = [op.name for op in g.get_operations()]
print("vgg graph names: ", names)
#
# Defining the Content Loss
#
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer)
- content_features) / content_features.size)
#
# Defining the Style Loss
#
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
style_loss = np.float32(0.0)
for style_layer_i, style_gram_i in zip(style_layers, style_features):
layer_i = g.get_tensor_by_name(style_layer_i)
layer_shape = layer_i.get_shape().as_list()
layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
gram_matrix = tf.matmul(tf.transpose(layer_flat), layer_flat) / layer_size
style_loss = tf.add(style_loss, tf.nn.l2_loss((gram_matrix - style_gram_i) / np.float32(style_gram_i.size)))
#
# Defining the Total Variation Loss
#
def total_variation_loss(x):
h, w = x.get_shape().as_list()[1], x.get_shape().as_list()[1]
dx = tf.square(x[:, :h-1, :w-1, :] - x[:, :h-1, 1:, :])
dy = tf.square(x[:, :h-1, :w-1, :] - x[:, 1:, :w-1, :])
return tf.reduce_sum(tf.pow(dx + dy, 1.25))
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
tv_loss = total_variation_loss(net_input)
#
# Training
#
# With both content and style losses, we can combine the two,
# optimizing our loss function, and creating a stylized coffee cup.
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
loss = 0.1 * content_loss + 5.0 * style_loss + 0.01 * tv_loss
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
print("Training...")
t1 = datetime.datetime.now()
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
sess.run(tf.initialize_all_variables())
# map input to noise
n_iterations = 200
og_img = net_input.eval()
imgs = []
for it_i in range(n_iterations):
_, this_loss, synth = sess.run([optimizer, loss, net_input],
feed_dict={
'vgg/dropout_1/random_uniform:0':
np.ones(g.get_tensor_by_name(
'vgg/dropout_1/random_uniform:0').get_shape().as_list()),
'vgg/dropout/random_uniform:0':
np.ones(g.get_tensor_by_name(
'vgg/dropout/random_uniform:0').get_shape().as_list())})
print("It: %d: loss: %f, (min: %f - max: %f)" %
(it_i, this_loss, np.min(synth), np.max(synth)))
if it_i % (n_iterations//50) == 0:
imgs.append(np.clip(synth[0], 0, 1))
#fig, ax = plt.subplots(1, 3, figsize=(22, 5))
#plt.imshow(vgg16.deprocess(img))
#plt.set_title('content image')
#plt.imshow(vgg16.deprocess(style_img))
#plt.set_title('style image')
plt.title('synthesis #'+str(it_i))
lastimg=vgg16.deprocess(synth[0])
plt.imshow(lastimg)
plt.show()
#wait(3)
plt.pause(1)
# ?fig.canvas.draw()
plt.imsave(fname='stylenet_last_synth_'+TID+'.png', arr=lastimg)
gif.build_gif(imgs, saveto='stylenet-test_'+TID+'.gif', interval=200)
t2 = datetime.datetime.now()
delta = t2 - t1
print(" Total animation time: ", delta.total_seconds())
# Ref: Xubuntu bisal: 408s
# eop
|
{
"content_hash": "5a16d499dec12e403c86c1cefb1c838a",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 116,
"avg_line_length": 25.902527075812273,
"alnum_prop": 0.6076655052264809,
"repo_name": "dariox2/CADL",
"id": "8c2ee684b8b6485d45f2156f4174c57b7ca24656",
"size": "7217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "session-4/teststylenet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "67315621"
},
{
"name": "Python",
"bytes": "940923"
},
{
"name": "Shell",
"bytes": "2596"
}
],
"symlink_target": ""
}
|
"""This module contains a client for interacting with the CloudSQL API.
"""
from datetime import datetime
from datetime import timedelta
import httplib2
import json
import logging
import os
import pprint
import time
from google.appengine.api import rdbms
class CloudSQLClient(object):
"""CloudSQL client.
"""
def __init__(self, instance, database):
"""Constructor.
Args:
instance (string): CloudSQL instance, eg "mlab-metrics:api".
database (string): CloudSQL database, eg "data".
"""
self._instance = instance
self._database = database
def Query(self, query):
"""Issues a query to CloudSQL.
Args:
query (string): The query to be issued.
Returns:
(dict) Dictionary of results, split among 'fields' which describe
the columns of the result and 'data' which contains rows of result
data.
"""
# Issue the query.
conn = rdbms.connect(instance=self._instance, database=self._database)
cursor = conn.cursor()
cursor.execute(query)
# Parse the response data into a more convenient dict, with members
# 'fields' for row names and 'data' for row data.
if cursor.description is None: # Probably not a SELECT.
result = None
else:
result = { 'fields': tuple(d[0] for d in cursor.description),
'data': cursor.fetchall() }
conn.commit()
conn.close()
return result
def Update(self, table_name, metric_name, data):
"""Updates data for a given table and metric name.
Basically an SQL 'UPDATE'.
Args:
table_name (string): The table to edit.
metric_name (string): The name of the metric to update.
data (dict): Dictionary of key-value pair data to update.
"""
new_settings = ['%s="%s"' % (k, v) for (k, v) in data.iteritems()]
self.Query('UPDATE %s'
' SET %s'
' WHERE name="%s"' %
(table_name, ', '.join(new_settings), metric_name))
def Create(self, table_name, metric_name, data):
"""Creates new metric data for the given name.
Basically an SQL 'INSERT'.
Args:
table_name (string): The table to edit.
metric_name (string): The name of the metric to create.
data (dict): Dictionary of key-value pair data to add.
"""
new_settings = ['%s="%s"' % (k, v) for (k, v) in data.iteritems()]
self.Query('INSERT INTO %s'
' SET name="%s", %s' %
(table_name, metric_name, ', '.join(new_settings)))
def Delete(self, table_name, metric_name):
"""Deletes the specified metric from the given table.
Basically an SQL 'DELETE'.
Args:
table_name (string): The table to edit.
metric_name (string): The name of the metric to delete.
"""
self.Query('DELETE FROM %s'
' WHERE name="%s"' %
(table_name, metric_name))
|
{
"content_hash": "09f1756d076f0a6c11191a108b92a785",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 31.405940594059405,
"alnum_prop": 0.5595838587641866,
"repo_name": "dcurley/mlab-metrics-api-server",
"id": "fa8d66da5f5254f916ecdf708dafac8324d003e3",
"size": "3794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/cloud_sql_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1936"
},
{
"name": "Python",
"bytes": "249888"
},
{
"name": "Smarty",
"bytes": "29365"
}
],
"symlink_target": ""
}
|
from flask import current_app
from flask_login import login_required
from whyis.blueprint.nanopub.nanopub_utils import get_nanopub_uri
from .nanopub_blueprint import nanopub_blueprint
#@nanopub_blueprint.route('/pub/<ident>', methods=['DELETE'])
#@login_required
def delete_nanopub(ident):
#print(request.method, 'delete_nanopub()', ident)
ident = ident.split("_")[0]
uri = get_nanopub_uri(ident)
if not current_app._can_edit(uri):
return '<h1>Not Authorized</h1>', 401
current_app.nanopub_manager.retire(uri)
return '', 204
|
{
"content_hash": "0196cd78500e11a9e617c42430538a95",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 32.94117647058823,
"alnum_prop": 0.7107142857142857,
"repo_name": "tetherless-world/graphene",
"id": "13a7b4c6c2d1f877bed522ff4579b8e43de1d8b4",
"size": "560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "whyis/blueprint/nanopub/delete_nanopub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "460"
},
{
"name": "HTML",
"bytes": "82771"
},
{
"name": "JavaScript",
"bytes": "65463"
},
{
"name": "Puppet",
"bytes": "14733"
},
{
"name": "Python",
"bytes": "80312"
},
{
"name": "Shell",
"bytes": "1982"
}
],
"symlink_target": ""
}
|
import socket
import gobject
class TrivialStream:
def __init__(self, socket_address=None):
self.socket_address = socket_address
def read_socket(self, s):
try:
data = s.recv(1024)
if len(data) > 0:
print "received:", data
except socket.error, e:
pass
return True
def write_socket(self, s, msg):
print "send:", msg
try:
s = s.send(msg)
except socket.error, e:
pass
return True
class TrivialStreamServer(TrivialStream):
def __init__(self):
TrivialStream.__init__(self)
self._socket = None
def run(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(1)
self._socket.settimeout(0.1)
self._socket.bind(("127.0.0.1", 0))
self.socket_address = self._socket.getsockname()
print "Trivial Server launched on socket", self.socket_address
self._socket.listen(1)
gobject.timeout_add(1000, self.accept_client, self._socket)
def accept_client(self, s):
try:
s2, addr = s.accept()
s2.setblocking(1)
s2.setblocking(0.1)
self.handle_client(s2)
return True
except socket.timeout:
return True
def handle_client(self, s):
pass
class TrivialStreamClient(TrivialStream):
def connect(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.socket_address)
print "Trivial client connected to", self.socket_address
|
{
"content_hash": "371eccbe52b79a78648c56ded0b66495",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 72,
"avg_line_length": 27.19672131147541,
"alnum_prop": 0.5786618444846293,
"repo_name": "opendreambox/python-coherence",
"id": "01bd93a1a92aa9bb766de07fc32942153834a6c7",
"size": "1659",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "coherence/extern/telepathy/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1247145"
},
{
"name": "Roff",
"bytes": "712"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
}
|
"""
MoinMoin - lists of translateable strings
MoinMoin uses some translateable strings that do not appear at other
places in the source code (and thus, are not found by gettext when
extracting translateable strings).
Also, some strings need to be organized somehow.
TODO i18n.strings / general:
* fix other translations (can be done using ##master-page, but help
from a native speaker would be the preferred solution)
* delete other SystemPagesInXXXGroup if their po file is complete
@copyright: 2009 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
_ = lambda x: x # dummy translation function
# Some basic pages used for every language, but we only need them once in English (don't translate!):
not_translated_system_pages = [
'LanguageSetup',
'InterWikiMap',
'BadContent',
'LocalBadContent',
'EditedSystemPages',
'LocalSpellingWords',
'SystemAdmin',
'SystemInfo',
'ProjectTemplate',
'ProjectGroupsTemplate',
'PermissionDeniedPage',
]
essential_system_pages = [
_('RecentChanges'),
_('WikiTipOfTheDay'), # used by RecentChanges
_('TitleIndex'),
_('WordIndex'),
_('FindPage'),
_('MissingPage'),
_('MissingHomePage'),
_('WikiHomePage'), # used by CategoryHomepage
# these are still in use, but should be killed:
_('WikiName'), # linked from misc. help/tips pages
_('WikiWikiWeb'), # used by FrontPage/WikiHomePage
]
optional_system_pages = [
_('FrontPage'),
_('WikiSandBox'),
_('InterWiki'),
_('AbandonedPages'),
_('OrphanedPages'),
_('WantedPages'),
_('EventStats'),
_('EventStats/HitCounts'),
_('EventStats/Languages'),
_('EventStats/UserAgents'),
_('PageSize'),
_('PageHits'),
_('RandomPage'),
_('XsltVersion'),
_('FortuneCookies'), # use by RandomQuote macro
_('WikiLicense'), # does not exist, but can be created by wiki admin
]
translated_system_pages = essential_system_pages + optional_system_pages
all_system_pages = not_translated_system_pages + translated_system_pages
essential_category_pages = [
_('CategoryCategory'),
_('CategoryHomepage'),
]
optional_category_pages = [
]
all_category_pages = essential_category_pages + optional_category_pages
essential_template_pages = [
_('CategoryTemplate'),
_('HomepageTemplate'),
]
optional_template_pages = [
_('HelpTemplate'),
_('HomepageReadWritePageTemplate'),
_('HomepageReadPageTemplate'),
_('HomepagePrivatePageTemplate'),
_('HomepageGroupsTemplate'),
_('SlideShowHandOutTemplate'),
_('SlideShowTemplate'),
_('SlideTemplate'),
_('SyncJobTemplate'),
]
all_template_pages = essential_template_pages + optional_template_pages
# Installation / Configuration / Administration Help:
admin_pages = [
_('HelpOnConfiguration'),
_('HelpOnConfiguration/EmailSupport'),
_('HelpOnConfiguration/SecurityPolicy'),
_('HelpOnConfiguration/FileAttachments'),
_('HelpOnConfiguration/SupplementationPage'),
_('HelpOnConfiguration/SurgeProtection'),
_('HelpOnConfiguration/UserPreferences'),
_('HelpOnPackageInstaller'),
_('HelpOnUpdatingPython'),
_('HelpOnAdministration'),
_('HelpOnAuthentication'),
_('HelpOnAuthentication/ExternalCookie'),
_('HelpOnMoinCommand'),
_('HelpOnMoinCommand/ExportDump'),
_('HelpOnNotification'),
_('HelpOnRobots'),
_('HelpOnSessions'),
_('HelpOnUserHandling'),
_('HelpOnXapian'),
]
# Stuff that should live on moinmo.in wiki:
obsolete_pages = [
]
essential_help_pages = [
_('HelpOnMoinWikiSyntax'), # used by edit action
_('HelpOnCreoleSyntax'), # used by edit action
# HelpOnParsers/ReStructuredText/RstPrimer could be renamed and used in a similar way
]
optional_help_pages = [
_('HelpOnFormatting'), # still needed?
_('MoinMoin'),
_('HelpContents'),
_('HelpForBeginners'),
_('HelpForUsers'),
_('HelpIndex'),
_('HelpOnAccessControlLists'),
_('HelpOnActions'),
_('HelpOnActions/AttachFile'),
_('HelpOnAdmonitions'),
_('HelpOnAutoAdmin'),
_('HelpOnCategories'),
_('HelpOnDictionaries'),
_('HelpOnDrawings'),
_('HelpOnEditLocks'),
_('HelpOnEditing'), # used by edit action!
_('HelpOnEditing/SubPages'),
_('HelpOnGraphicalEditor'),
_('HelpOnGroups'),
_('HelpOnHeadlines'),
_('HelpOnImages'),
_('HelpOnLanguages'),
_('HelpOnLinking'),
_('HelpOnLinking/NotesLinks'),
_('HelpOnLists'),
_('HelpOnLogin'),
_('HelpOnMacros'),
_('HelpOnMacros/EmbedObject'),
_('HelpOnMacros/Include'),
_('HelpOnMacros/MailTo'),
_('HelpOnMacros/MonthCalendar'),
_('HelpOnNavigation'),
_('HelpOnOpenIDProvider'),
_('HelpOnPageCreation'),
_('HelpOnPageDeletion'),
_('HelpOnParsers'),
_('HelpOnParsers/ReStructuredText'),
_('HelpOnParsers/ReStructuredText/RstPrimer'),
_('HelpOnProcessingInstructions'),
_('HelpOnRules'),
_('HelpOnSearching'),
_('HelpOnSlideShows'),
_('HelpOnSlideShows/000 Introduction'),
_('HelpOnSlideShows/100 Creating the slides'),
_('HelpOnSlideShows/900 Last but not least: Running your presentation'),
_('HelpOnSmileys'),
_('HelpOnSpam'),
_('HelpOnSpellCheck'),
_('HelpOnSuperUser'),
_('HelpOnSynchronisation'),
_('HelpOnTables'),
_('HelpOnTemplates'),
_('HelpOnThemes'),
_('HelpOnUserPreferences'),
_('HelpOnVariables'),
_('HelpOnXmlPages'),
_('HelpOnComments'),
_('HelpOnSubscribing'),
# these are still in use, but should be killed:
_('CamelCase'), # linked from misc. help/course pages
]
all_help_pages = essential_help_pages + optional_help_pages
# Wiki Course:
course_pages = [
_('WikiCourse'),
_('WikiCourse/01 What is a MoinMoin wiki?'),
_('WikiCourse/02 Finding information'),
_('WikiCourse/03 Staying up to date'),
_('WikiCourse/04 Creating a wiki account'),
_('WikiCourse/05 User preferences'),
_('WikiCourse/06 Your own wiki homepage'),
_('WikiCourse/07 The text editor'),
_('WikiCourse/08 Hot Keys'),
_('WikiCourse/10 Text layout with wiki markup'),
_('WikiCourse/11 Paragraphs'),
_('WikiCourse/12 Headlines'),
_('WikiCourse/13 Lists'),
_('WikiCourse/14 Text styles'),
_('WikiCourse/15 Tables'),
_('WikiCourse/16 Wiki internal links'),
_('WikiCourse/17 External links'),
_('WikiCourse/18 Attachments'),
_('WikiCourse/19 Symbols'),
_('WikiCourse/20 Dynamic content'),
_('WikiCourse/21 Macros'),
_('WikiCourse/22 Parsers'),
_('WikiCourse/23 Actions'),
_('WikiCourse/30 The graphical editor'),
_('WikiCourse/40 Creating more pages'),
_('WikiCourse/50 Wiki etiquette'),
_('WikiCourse/51 Applications'),
_('WikiCourse/52 Structure in the wiki'),
_('WikiCourseHandOut'),
]
essential_pages = (
essential_system_pages +
essential_category_pages +
essential_template_pages +
essential_help_pages
)
optional_pages = (
optional_system_pages +
optional_category_pages +
optional_template_pages +
optional_help_pages
)
all_pages = (
all_system_pages +
all_category_pages +
all_template_pages +
all_help_pages +
admin_pages +
obsolete_pages +
course_pages
)
# an list of page sets translators should look at,
# ordered in the order translators should look at them
pagesets = [
'not_translated_system_pages',
'essential_system_pages',
'essential_help_pages',
'essential_category_pages',
'essential_template_pages',
'essential_pages',
'optional_system_pages',
'optional_help_pages',
'optional_category_pages',
'optional_template_pages',
'optional_pages',
'translated_system_pages',
'all_system_pages',
'all_help_pages',
'all_category_pages',
'all_template_pages',
'admin_pages',
'course_pages',
'obsolete_pages',
'all_pages',
]
# we use Sun at index 0 and 7 to be compatible with EU and US day indexing
# schemes, like it is also done within crontab entries:
weekdays = [_('Sun'), _('Mon'), _('Tue'), _('Wed'), _('Thu'), _('Fri'), _('Sat'), _('Sun')]
actions = [
_('AttachFile'),
_('DeletePage'),
_('LikePages'),
_('LocalSiteMap'),
_('RenamePage'),
_('SpellCheck'),
]
misc = [
# the editbar link text of the default supplementation page link:
_('Discussion'),
]
del _ # delete the dummy translation function
|
{
"content_hash": "f43684c8ab4e7027525b0d92542d8a85",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 101,
"avg_line_length": 28.12251655629139,
"alnum_prop": 0.6480631107971271,
"repo_name": "RealTimeWeb/wikisite",
"id": "9fab71d2beb6c03f9b9fab6ab7f6ac3c3041e87f",
"size": "8522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/i18n/strings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
"Define basic subroutines useful for all AI players"
from ..board import black, white, empty, Board, InvalidMoveError
import numpy as np
import unittest
class Playerlibrary(object):
"""
A library class that holds basic subroutines that are useful for all
kinds of artificial-intelligence-type (AI-type) players, e.g. the
function ``win_if_possible`` that checks if the game can be won in
the next move.
All the functions are written to take the same arguments as
``Player.make_move`` such that the call from within ``make_move``
looks like e.g. ``self.win_if_possible(gui)``.
"""
def line_getter_functions(self, gui, length=5):
return [lambda x,y: gui.board.get_column(x,y,length=length), lambda x,y: gui.board.get_row(x,y, length=length),
lambda x,y: gui.board.get_diagonal_upleft_to_lowright(x,y, length=length),
lambda x,y: gui.board.get_diagonal_lowleft_to_upright(x,y, length=length)]
def random_move(self, gui):
moves_left = gui.board.moves_left
while moves_left == gui.board.moves_left:
x = np.random.randint(gui.board.width)
y = np.random.randint(gui.board.height)
try:
gui.board[y,x] = self.color
except InvalidMoveError:
continue
def extend_one(self, gui):
"Place a stone next to another one but only if extendable to five."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# search pattern: one of own color and four empty
if len(np.where(line == empty)[0]) == 4 and len(np.where(line == self.color)[0]) == 1:
index_own_color = np.where(line == self.color)[0][0]
if index_own_color == 0:
gui.board[positions[1]] = self.color
return True
else:
gui.board[positions[index_own_color - 1]] = self.color
return True
return False
def block_open_four(self, gui):
"Block a line of four stones if at least one end open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search four of opponent's color and one empty
if len(np.where(line == empty)[0]) == 1 and len(np.where(line == -self.color)[0]) == 4:
index_of_empty = np.where(line == empty)[0][0]
gui.board[positions[index_of_empty]] = self.color
return True
return False
def block_doubly_open_two(self, gui):
"Block a line of two if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# select pattern [<all empty>, <opponent's color>, <opponent's color>, <all empty>]
if ( line == (empty, -self.color, -self.color, empty, empty) ).all():
gui.board[positions[3]] = self.color
return True
elif ( line == (empty, empty, -self.color, -self.color, empty) ).all():
gui.board[positions[1]] = self.color
return True
return False
def block_twice_to_three_or_more(self, gui):
'Prevent opponent from closing two lines of three or more simultaneously.'
line_getter_functions = self.line_getter_functions(gui)
line_positions = []
getter_functions = []
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in line_getter_functions:
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of opponent's color and three empty in two crossing lines at an empty position
opponent_stones_in_line = len(np.where(line == -self.color)[0])
if opponent_stones_in_line >= 2 and len(np.where(line == empty)[0]) == 5 - opponent_stones_in_line:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def block_open_three(self, gui):
"Block a line of three."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of opponent's color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == -self.color)[0]) == 3:
indices_opponent = np.where(line == -self.color)[0]
if not (indices_opponent[1] == indices_opponent[0] + 1 and \
indices_opponent[2] == indices_opponent[1] + 1):
continue
if 0 not in indices_opponent:
gui.board[positions[indices_opponent[0] - 1]] = self.color
return True
else:
gui.board[positions[3]] = self.color
return True
return False
def block_open_two(self, gui):
"Block a line of two."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [<all empty or bpundary>, opponent, opponent, <all empty or boundary>]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 2:
indices_opponent = np.where(line == -self.color)[0]
if indices_opponent[1] == indices_opponent[0] + 1:
if indices_opponent[0] == 0:
gui.board[positions[3]] = self.color
return True
else:
gui.board[positions[indices_opponent[0]-1]] = self.color
return True
return False
def block_doubly_open_three(self, gui):
"Block a line of three but only if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
if ( line == (empty, -self.color, -self.color, -self.color, empty) ).all():
gui.board[positions[0]] = self.color
return True
return False
def extend_three_to_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of own color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if 0 not in indices_empty:
gui.board[positions[indices_empty[0]]] = self.color
return True
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def block_to_doubly_open_four(self, gui):
"""
Prevent the opponent from getting a line of four with both ends
open.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times opponent>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_three_to_doubly_open_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five ON BOTH SIDES.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times own>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_two_to_three(self, gui):
"""
Extend a line of two stones to a line of three stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search two of own color and three empty
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
indices_empty = np.where(line == empty)[0]
gui.board[positions[indices_empty[np.random.randint(3)]]] = self.color
return True
return False
def extend_twice_two_to_three(self, gui):
"""
Extend two crossing lines of two stones to two lines of three
stones but only if there is enough space to be completed to five.
"""
line_positions = []
getter_functions = []
for f in self.line_getter_functions(gui):
for i in range(gui.board.height):
for j in range(gui.board.width):
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of own color and three empty in two crossing lines at an empty position
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def check_if_immediate_win_possible(self, gui):
"""
Check if it is possible to place a stone such thath the player wins
immediately.
Return the position to place the stone if possible, otherwise return None.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection:
# - can only place stones where field is ``empty``
# - line must sum to "+" or "-" 4 (4 times black=+1 or white=-1 and once empty=0)
# place stone if that leads to winning the game
if empty in line and line.sum() == self.color * 4:
for pos in positions:
if gui.board[pos] == empty:
return pos
raise RuntimeError("Check the implementation of ``check_if_immediate_win_possible``.")
# control reaches this point only if no winning move is found => return None
def win_if_possible(self, gui):
"""
Place a stone where the player wins immediately if possible.
Return ``True`` if a stone has been placed, otherwise return False.
"""
pos = self.check_if_immediate_win_possible(gui)
if pos is None:
return False
else:
gui.board[pos] = self.color
return True
class PlayerTest(unittest.TestCase):
"""
Library class for testing AI players.
Usage:
Create a subclass and set the member variable ``Player`` to the
AI you want to test:
>>> class MyTest(PlayerTest):
... Player = <Your AI>
"""
Player = None
@classmethod
def build_board(self, board_array):
"""
Build up a valid ``GameBoard`` holding the desired ``board_array``.
.. note::
You probably rather need `.build_gui`
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
board_array = np.asarray(board_array, dtype=int)
assert len(board_array.shape) == 2
height = board_array.shape[0]
width = board_array.shape[1]
board = Board(width=width, height=height)
white_indices = []
black_indices = []
# find positions that are not empty
for i in range(height):
for j in range(width):
value = board_array[i,j]
if value == empty:
continue
elif value == white:
white_indices.append((i,j))
elif value == black:
black_indices.append((i,j))
else:
raise AssertionError("Invalid ``board_array``")
# in a valid board, there are equally many black and white stones or
# one more white that black stone since white begins
assert len(white_indices) == len(black_indices) or len(white_indices) == len(black_indices) + 1
while black_indices:
board[white_indices.pop()] = white
board[black_indices.pop()] = black
assert board.winner()[0] is None
# if there is one more white stone
if white_indices:
board[white_indices.pop()] = white
return board
@classmethod
def build_gui(self, board_array):
"""
Build up a valid ``GameBoard`` packed in a ``BoardGui`` holding
the desired ``board_array``. The returned instance of ``BoardGui``
is ready to use in ``Player.make_move()``.
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
from ..gui import BoardGui, tk
board = self.build_board(board_array)
gui = BoardGui(board, tk.Tk())
gui.in_game = True
return gui
def base_test(self):
width = 20
height = 10
board = Board(height, width)
from ..gui import BoardGui, tk
board_gui = BoardGui(board, tk.Tk())
board_gui.in_game = True
if self.Player is not None:
white_player = self.Player(white)
black_player = self.Player(black)
while board_gui.board.winner()[0] is None and not board_gui.board.full():
white_player.make_move(board_gui)
black_player.make_move(board_gui)
|
{
"content_hash": "2b5a0e786bf01bbd60f4f789519183a3",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 119,
"avg_line_length": 41.98845265588915,
"alnum_prop": 0.49496727352730874,
"repo_name": "jPhy/Gomoku",
"id": "9ec75a0650b669b94b46825bef70073ed36db893",
"size": "18181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/player/lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3032"
},
{
"name": "Python",
"bytes": "85896"
}
],
"symlink_target": ""
}
|
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from __future__ import unicode_literals
from collections import defaultdict
from functools import partial
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.sql.where import Constraint
from django.forms import ModelForm, ALL_FIELDS
from django.forms.models import (BaseModelFormSet, modelformset_factory,
modelform_defines_fields)
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import smart_text
class RenameGenericForeignKeyMethods(RenameMethodsBase):
renamed_methods = (
('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning),
)
class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to, related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_reverse_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def get_joining_columns(self, reverse_join=False):
if not reverse_join:
# This error message is meant for the user, and from user
# perspective this is a reverse join along the GenericRelation.
raise ValueError('Joining in reverse direction not allowed.')
return super(GenericRelation, self).get_joining_columns(reverse_join)
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
cond.add((Constraint(remote_alias, field.column, field), 'exact', contenttype_pk), 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model=rel_model,
instance=instance,
source_col_name=qn(join_cols[0]),
target_col_name=qn(join_cols[1]),
content_type=content_type,
content_type_field_name=self.field.content_type_field_name,
object_id_field_name=self.field.object_id_field_name,
prefetch_cache_name=self.field.attname,
)
return manager
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s__exact' % object_id_field_name: instance._get_pk_val(),
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__)
return manager_class(
model=self.model,
instance=self.instance,
symmetrical=self.symmetrical,
source_col_name=self.source_col_name,
target_col_name=self.target_col_name,
content_type=self.content_type,
content_type_field_name=self.content_type_field_name,
object_id_field_name=self.object_id_field_name,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.delete()
else:
with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(field, to, related_name, limit_choices_to)
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join(
(opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name)
)
def save_new(self, form, commit=True):
setattr(form.instance, self.ct_field.get_attname(),
ContentType.objects.get_for_model(self.instance).pk)
setattr(form.instance, self.ct_fk_field.get_attname(),
self.instance.pk)
return form.save(commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=None, validate_max=False,
for_concrete_model=True):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
{
"content_hash": "27a1544657a000cd11d49cc9dfacaaf7",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 116,
"avg_line_length": 41.77355072463768,
"alnum_prop": 0.6037122164881391,
"repo_name": "Beeblio/django",
"id": "1ec5475f5b3b9ef994a0a296c043892f1bb74401",
"size": "23059",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/contenttypes/generic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42830"
},
{
"name": "HTML",
"bytes": "173915"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "9172420"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from .views import index
from .views import mypage
from .views import login_view
from .views import signup
from .views import new_server_group
from .views import add_player
from .views import server_group_detail
from .views import player_detail
from .views import start_server
from .views import stop_server
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^mypage/$', mypage, name='mypage'),
url(r'^login/$', login_view, name='login'),
url(r'^signup/$', signup, name='signup'),
url(r'^new_server_group/$', new_server_group, name='new_server_group'),
url(r'^server_group/(?P<pk>[0-9]+)/$', server_group_detail, name='server_group_detail'),
url(r'^add_player/$', add_player, name='add_player'),
url(r'^player_detail/(?P<pk>[0-9]+)/$', player_detail, name='player_detail'),
url(r'^start_server/(?P<pk>[0-9]+)/$', start_server, name='start_server'),
url(r'^stop_server/(?P<pk>[0-9]+)/$', stop_server, name='stop_server'),
]
|
{
"content_hash": "e15f261a20dfe23a8bd4020d94c812b7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 38.57692307692308,
"alnum_prop": 0.6630109670987039,
"repo_name": "uehara1414/serverctl-prototype",
"id": "b8bef0da994d1a298ffc5c9d854aff517270cd0b",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serverctl/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4918"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "32067"
}
],
"symlink_target": ""
}
|
"""Example of streaming a file and printing status updates.
python stream.py 10.0.0.4 file.mp3
"""
import asyncio
import sys
import pyatv
from pyatv.interface import Playing, PushListener
LOOP = asyncio.get_event_loop()
class PushUpdatePrinter(PushListener):
"""Print push updates to console."""
def playstatus_update(self, updater, playstatus: Playing) -> None:
"""Inform about changes to what is currently playing."""
print(30 * "-" + "\n", playstatus)
def playstatus_error(self, updater, exception: Exception) -> None:
"""Inform about an error when updating play status."""
print("Error:", exception)
async def stream_with_push_updates(
address: str, filename: str, loop: asyncio.AbstractEventLoop
):
"""Find a device and print what is playing."""
print("* Discovering device on network...")
atvs = await pyatv.scan(loop, hosts=[address], timeout=5)
if not atvs:
print("* Device found", file=sys.stderr)
return
conf = atvs[0]
print("* Connecting to", conf.address)
atv = await pyatv.connect(conf, loop)
listener = PushUpdatePrinter()
atv.push_updater.listener = listener
atv.push_updater.start()
try:
print("* Starting to stream", filename)
await atv.stream.stream_file(filename)
await asyncio.sleep(1)
finally:
atv.close()
if __name__ == "__main__":
LOOP.run_until_complete(stream_with_push_updates(sys.argv[1], sys.argv[2], LOOP))
|
{
"content_hash": "49ea5d186f06b7d2fe0e4b5c344b95ae",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 26.803571428571427,
"alnum_prop": 0.6568954030646236,
"repo_name": "postlund/pyatv",
"id": "a6a0a465764b64edd1d806b6266630e091cdc001",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "456"
},
{
"name": "Python",
"bytes": "1432120"
},
{
"name": "Shell",
"bytes": "2108"
}
],
"symlink_target": ""
}
|
from urllib import parse
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
LOG = logging.getLogger(__name__)
class APIResponse(object):
"""Decoded API Response
This provides a decoded version of the Requests response which
include a json decoded body, far more convenient for testing that
returned structures are correct, or using parts of returned
structures in tests.
This class is a simple wrapper around dictionaries for API
responses in tests. It includes extra attributes so that they can
be inspected in addition to the attributes.
All json responses from Nova APIs are dictionary compatible, or
blank, so other possible base classes are not needed.
"""
status = 200
"""The HTTP status code as an int"""
content = ""
"""The Raw HTTP response body as a string"""
body = {}
"""The decoded json body as a dictionary"""
headers = {}
"""Response headers as a dictionary"""
def __init__(self, response):
"""Construct an API response from a Requests response
:param response: a ``requests`` library response
"""
super(APIResponse, self).__init__()
self.status = response.status_code
self.content = response.content
if self.content:
# The Compute API and Placement API handle error responses a bit
# differently so we need to check the content-type header to
# figure out what to do.
content_type = response.headers.get('content-type')
if 'application/json' in content_type:
self.body = response.json()
elif 'text/html' in content_type:
self.body = response.text
else:
raise ValueError('Unexpected response content-type: %s' %
content_type)
self.headers = response.headers
def __str__(self):
# because __str__ falls back to __repr__ we can still use repr
# on self but add in the other attributes.
return "<Response body:%r, status_code:%s>" % (self.body, self.status)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status_code
_body = response.content
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s' %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, base_url, project_id=None):
super(TestOpenStackClient, self).__init__()
self.auth_user = auth_user
self.base_url = base_url
if project_id is None:
self.project_id = "6f70656e737461636b20342065766572"
else:
self.project_id = project_id
self.microversion = None
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
response = requests.request(method, url, data=body, headers=_headers)
return response
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
base_uri = self.base_url
if strip_version:
# The base_uri is either http://%(host)s:%(port)s/%(api_version)s
# or http://%(host)s:%(port)s/%(api_version)s/%(project_id)s
# NOTE(efried): Using urlparse was not easier :)
chunked = base_uri.split('/')
base_uri = '/'.join(chunked[:3])
# Restore the project ID if present
if len(chunked) == 5:
base_uri += '/' + chunked[-1]
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
if ('X-OpenStack-Nova-API-Version' in headers or
'OpenStack-API-Version' in headers):
raise Exception('Microversion should be set via '
'microversion attribute in API client.')
elif self.microversion:
headers['X-OpenStack-Nova-API-Version'] = self.microversion
headers['OpenStack-API-Version'] = 'compute %s' % self.microversion
headers.setdefault('X-Auth-User', self.auth_user)
headers.setdefault('X-User-Id', self.auth_user)
headers.setdefault('X-Auth-Project-Id', self.project_id)
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code: %s" % response.text,
response=response)
return response
def _decode_json(self, response):
resp = APIResponse(status=response.status_code)
if response.content:
resp.body = jsonutils.loads(response.content)
return resp
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 201, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
#####################################
#
# Convenience methods
#
# The following are a set of convenience methods to get well known
# resources, they can be helpful in setting up resources in
# tests. All of these convenience methods throw exceptions if they
# get a non 20x status code, so will appropriately abort tests if
# they fail.
#
# They all return the most relevant part of their response body as
# decoded data structure.
#
#####################################
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id).body['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in search_opts.items():
qparams[opt] = val
if qparams:
query_string = "?%s" % parse.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url).body['servers']
def post_server(self, server):
response = self.api_post('/servers', server).body
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server).body
def post_server_action(self, server_id, data, **kwargs):
return self.api_post(
'/servers/%s/action' % server_id, data, **kwargs).body
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def force_down_service(self, host, binary, forced_down):
req = {
"host": host,
"binary": binary,
"forced_down": forced_down
}
return self.api_put('/os-services/force-down', req).body['service']
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id).body['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url).body['images']
def post_image(self, image):
return self.api_post('/images', image).body['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def put_image_meta_key(self, image_id, key, value):
"""Creates or updates a given image metadata key/value pair."""
req_body = {
'meta': {
key: value
}
}
return self.api_put('/images/%s/metadata/%s' % (image_id, key),
req_body)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id).body['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url).body['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor).body['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def get_extra_specs(self, flavor_id):
return self.api_get(
'/flavors/%s/os-extra_specs' % flavor_id
).body['extra_specs']
def get_extra_spec(self, flavor_id, spec_id):
return self.api_get(
'/flavors/%s/os-extra_specs/%s' % (flavor_id, spec_id),
).body
def post_extra_spec(self, flavor_id, body, **_params):
url = '/flavors/%s/os-extra_specs' % flavor_id
if _params:
query_string = '?%s' % parse.urlencode(list(_params.items()))
url += query_string
return self.api_post(url, body)
def put_extra_spec(self, flavor_id, spec_id, body, **_params):
url = '/flavors/%s/os-extra_specs/%s' % (flavor_id, spec_id)
if _params:
query_string = '?%s' % parse.urlencode(list(_params.items()))
url += query_string
return self.api_put(url, body)
def get_volume(self, volume_id):
return self.api_get('/os-volumes/%s' % volume_id).body['volume']
def get_volumes(self, detail=True):
rel_url = '/os-volumes/detail' if detail else '/os-volumes'
return self.api_get(rel_url).body['volumes']
def post_volume(self, volume):
return self.api_post('/os-volumes', volume).body['volume']
def delete_volume(self, volume_id):
return self.api_delete('/os-volumes/%s' % volume_id)
def get_snapshot(self, snap_id):
return self.api_get('/os-snapshots/%s' % snap_id).body['snapshot']
def get_snapshots(self, detail=True):
rel_url = '/os-snapshots/detail' if detail else '/os-snapshots'
return self.api_get(rel_url).body['snapshots']
def post_snapshot(self, snapshot):
return self.api_post('/os-snapshots', snapshot).body['snapshot']
def delete_snapshot(self, snap_id):
return self.api_delete('/os-snapshots/%s' % snap_id)
def get_server_volume(self, server_id, volume_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, volume_id)
).body['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id)).body['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
).body['volumeAttachment']
def put_server_volume(self, server_id, original_volume_id, volume_id):
return self.api_put('/servers/%s/os-volume_attachments/%s' %
(server_id, original_volume_id),
{"volumeAttachment": {"volumeId": volume_id}})
def delete_server_volume(self, server_id, volume_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
def post_server_metadata(self, server_id, metadata):
post_body = {'metadata': {}}
post_body['metadata'].update(metadata)
return self.api_post('/servers/%s/metadata' % server_id,
post_body).body['metadata']
def delete_server_metadata(self, server_id, key):
return self.api_delete('/servers/%s/metadata/%s' %
(server_id, key))
def get_server_groups(self, all_projects=None):
if all_projects:
return self.api_get(
'/os-server-groups?all_projects').body['server_groups']
else:
return self.api_get('/os-server-groups').body['server_groups']
def get_server_group(self, group_id):
return self.api_get('/os-server-groups/%s' %
group_id).body['server_group']
def post_server_groups(self, group):
response = self.api_post('/os-server-groups', {"server_group": group})
return response.body['server_group']
def delete_server_group(self, group_id):
self.api_delete('/os-server-groups/%s' % group_id)
def create_server_external_events(self, events):
body = {'events': events}
return self.api_post('/os-server-external-events', body).body['events']
def get_instance_actions(self, server_id):
return self.api_get('/servers/%s/os-instance-actions' %
(server_id)).body['instanceActions']
def get_instance_action_details(self, server_id, request_id):
return self.api_get('/servers/%s/os-instance-actions/%s' %
(server_id, request_id)).body['instanceAction']
def post_aggregate(self, aggregate):
return self.api_post('/os-aggregates', aggregate).body['aggregate']
def delete_aggregate(self, aggregate_id):
self.api_delete('/os-aggregates/%s' % aggregate_id)
def add_host_to_aggregate(self, aggregate_id, host):
return self.api_post('/os-aggregates/%s/action' % aggregate_id,
{'add_host': {'host': host}})
def remove_host_from_aggregate(self, aggregate_id, host):
return self.api_post('/os-aggregates/%s/action' % aggregate_id,
{'remove_host': {'host': host}})
def get_limits(self):
return self.api_get('/limits').body['limits']
def get_server_tags(self, server_id):
"""Get the tags on the given server.
:param server_id: The server uuid
:return: The list of tags from the response
"""
return self.api_get('/servers/%s/tags' % server_id).body['tags']
def put_server_tags(self, server_id, tags):
"""Put (or replace) a list of tags on the given server.
Returns the list of tags from the response.
"""
return self.api_put('/servers/%s/tags' % server_id,
{'tags': tags}).body['tags']
def get_port_interfaces(self, server_id):
return self.api_get('/servers/%s/os-interface' %
(server_id)).body['interfaceAttachments']
def attach_interface(self, server_id, post):
return self.api_post('/servers/%s/os-interface' % server_id, post)
def detach_interface(self, server_id, port_id):
return self.api_delete('/servers/%s/os-interface/%s' %
(server_id, port_id))
def get_services(self, binary=None, host=None):
url = '/os-services?'
if binary:
url += 'binary=%s&' % binary
if host:
url += 'host=%s&' % host
return self.api_get(url).body['services']
def put_service(self, service_id, req):
return self.api_put(
'/os-services/%s' % service_id, req).body['service']
def post_keypair(self, keypair):
return self.api_post('/os-keypairs', keypair).body['keypair']
def delete_keypair(self, keypair_name):
self.api_delete('/os-keypairs/%s' % keypair_name)
def post_aggregate_action(self, aggregate_id, body):
return self.api_post(
'/os-aggregates/%s/action' % aggregate_id, body).body['aggregate']
def get_active_migrations(self, server_id):
return self.api_get('/servers/%s/migrations' %
server_id).body['migrations']
def get_migrations(self, user_id=None, project_id=None):
url = '/os-migrations?'
if user_id:
url += 'user_id=%s&' % user_id
if project_id:
url += 'project_id=%s&' % project_id
return self.api_get(url).body['migrations']
def force_complete_migration(self, server_id, migration_id):
return self.api_post(
'/servers/%s/migrations/%s/action' % (server_id, migration_id),
{'force_complete': None})
def delete_migration(self, server_id, migration_id):
return self.api_delete(
'/servers/%s/migrations/%s' % (server_id, migration_id))
def put_aggregate(self, aggregate_id, body):
return self.api_put(
'/os-aggregates/%s' % aggregate_id, body).body['aggregate']
def get_hypervisor_stats(self):
return self.api_get(
'/os-hypervisors/statistics').body['hypervisor_statistics']
def get_service_id(self, binary_name):
for service in self.get_services():
if service['binary'] == binary_name:
return service['id']
raise OpenStackApiNotFoundException('Service cannot be found.')
def put_service_force_down(self, service_id, forced_down):
req = {
'forced_down': forced_down
}
return self.api_put('os-services/%s' % service_id, req).body['service']
def get_server_diagnostics(self, server_id):
return self.api_get('/servers/%s/diagnostics' % server_id).body
def get_quota_detail(self, project_id=None, user_id=None):
if not project_id:
project_id = self.project_id
url = '/os-quota-sets/%s/detail'
if user_id:
url += '?user_id=%s' % user_id
return self.api_get(url % project_id).body['quota_set']
def update_quota(self, quotas, project_id=None, user_id=None):
if not project_id:
project_id = self.project_id
url = '/os-quota-sets/%s'
if user_id:
url += '?user_id=%s' % user_id
body = {'quota_set': {}}
body['quota_set'].update(quotas)
return self.api_put(url % project_id, body).body['quota_set']
|
{
"content_hash": "3bfc4cfdd6fff369fc2dd9f6e38b71ca",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 79,
"avg_line_length": 38.02201834862385,
"alnum_prop": 0.5848373709101438,
"repo_name": "klmitch/nova",
"id": "21028d56017be05136d323e9602840ac0d302523",
"size": "21342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "851"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "44222"
},
{
"name": "Python",
"bytes": "22328409"
},
{
"name": "Shell",
"bytes": "29138"
},
{
"name": "Smarty",
"bytes": "405441"
}
],
"symlink_target": ""
}
|
r"""
QSeq format (:mod:`skbio.io.qseq`)
==================================
.. currentmodule:: skbio.io.qseq
The QSeq format (`qseq`) is a record-based, plain text output format produced
by some DNA sequencers for storing biological sequence data, quality scores,
per-sequence filtering information, and run-specific metadata.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |No |generator of :mod:`skbio.sequence.Sequence` objects |
+------+------+---------------------------------------------------------------+
|Yes |No |:mod:`skbio.alignment.SequenceCollection` |
+------+------+---------------------------------------------------------------+
|Yes |No |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |No |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |No |:mod:`skbio.sequence.RNA` |
+------+------+---------------------------------------------------------------+
|Yes |No |:mod:`skbio.sequence.Protein` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
A QSeq file is composed of single-line records, delimited by tabs. There are
11 fields in a record:
- Machine name
- Run number
- Lane number (positive int)
- Tile number (positive int)
- X coordinate (integer)
- Y coordinate (integer)
- Index
- Read number (1-3)
- Sequence data (typically IUPAC characters)
- Quality scores (quality scores encoded as printable ASCII)
- Filter boolean (1 if sequence has passed CASAVA's filter, 0 otherwise)
For more details please refer to the CASAVA documentation [1]_.
.. note:: scikit-bio allows for the filter field to be ommitted, but it is not
clear if this is part of the original format specification.
Format Parameters
-----------------
The following parameters are the same as in FASTQ format
(:mod:`skbio.io.fastq`):
- ``variant``: see ``variant`` parameter in FASTQ format
- ``phred_offset``: see ``phred_offset`` parameter in FASTQ format
The following additional parameters are the same as in FASTA format
(:mod:`skbio.io.fasta`):
- ``constructor``: see ``constructor`` parameter in FASTA format
- ``seq_num``: see ``seq_num`` parameter in FASTA format
SequenceCollection and Generators Only
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- ``filter``: If `True`, excludes sequences that did not pass filtering
(i.e., filter field is 0). Default is `True`.
Examples
--------
Suppose we have the following QSeq file::
illumina 1 3 34 -30 30 0 1 ACG....ACGTAC ruBBBBrBCEFGH 1
illumina 1 3 34 30 -30 0 1 CGGGCATTGCA CGGGCasdGCA 0
illumina 1 3 35 -30 30 0 2 ACGTA.AATAAAC geTaAafhwqAAf 1
illumina 1 3 35 30 -30 0 3 CATTTAGGA.TGCA tjflkAFnkKghvM 0
Let's define this file in-memory as a ``StringIO``, though this could be a real
file path, file handle, or anything that's supported by scikit-bio's I/O
registry in practice:
>>> from StringIO import StringIO
>>> fs = '\n'.join([
... 'illumina\t1\t3\t34\t-30\t30\t0\t1\tACG....ACGTAC\truBBBBrBCEFGH\t1',
... 'illumina\t1\t3\t34\t30\t-30\t0\t1\tCGGGCATTGCA\tCGGGCasdGCA\t0',
... 'illumina\t1\t3\t35\t-30\t30\t0\t2\tACGTA.AATAAAC\tgeTaAafhwqAAf\t1',
... 'illumina\t1\t3\t35\t30\t-30\t0\t3\tCATTTAGGA.TGCA\ttjflkAFnkKghvM\t0'
... ])
>>> fh = StringIO(fs)
To load the sequences into a ``SequenceCollection``, we run:
>>> from skbio import SequenceCollection
>>> sc = SequenceCollection.read(fh, variant='illumina1.3')
>>> sc
<SequenceCollection: n=2; mean +/- std length=13.00 +/- 0.00>
Note that only two sequences were loaded because the QSeq reader filters out
sequences whose filter field is 0 (unless ``filter=False`` is supplied).
References
----------
.. [1] http://biowulf.nih.gov/apps/CASAVA_UG_15011196B.pdf
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip, range
from skbio.io import register_reader, register_sniffer, QSeqFormatError
from skbio.io._base import _decode_qual_to_phred, _get_nth_sequence
from skbio.alignment import SequenceCollection
from skbio.sequence import Sequence, DNA, RNA, Protein
_default_phred_offset = None
_default_variant = None
_will_filter = True
@register_sniffer('qseq')
def _qseq_sniffer(fh):
empty = True
try:
for _, line in zip(range(10), fh):
_record_parser(line)
empty = False
return not empty, {}
except QSeqFormatError:
return False, {}
@register_reader('qseq')
def _qseq_to_generator(fh, constructor=Sequence, filter=_will_filter,
phred_offset=_default_phred_offset,
variant=_default_variant, **kwargs):
for line in fh:
(machine_name, run, lane, tile, x, y, index, read, seq, raw_qual,
filtered) = _record_parser(line)
if not filter or not filtered:
phred = _decode_qual_to_phred(raw_qual, variant, phred_offset)
seq_id = '%s_%s:%s:%s:%s:%s#%s/%s' % (
machine_name, run, lane, tile, x, y, index, read)
yield constructor(seq, metadata={'id': seq_id,
'machine_name': machine_name,
'run_number': int(run),
'lane_number': int(lane),
'tile_number': int(tile),
'x': int(x),
'y': int(y),
'index': int(index),
'read_number': int(read)},
positional_metadata={'quality': phred},
**kwargs)
@register_reader('qseq', SequenceCollection)
def _qseq_to_sequence_collection(fh, constructor=Sequence,
filter=_will_filter,
phred_offset=_default_phred_offset,
variant=_default_variant):
return SequenceCollection(list(_qseq_to_generator(
fh, constructor=constructor, filter=filter, phred_offset=phred_offset,
variant=variant)))
@register_reader('qseq', Sequence)
def _qseq_to_biological_sequence(fh, seq_num=1,
phred_offset=_default_phred_offset,
variant=_default_variant):
return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
phred_offset=phred_offset, variant=variant,
constructor=Sequence), seq_num)
@register_reader('qseq', DNA)
def _qseq_to_dna_sequence(fh, seq_num=1,
phred_offset=_default_phred_offset,
variant=_default_variant, **kwargs):
return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
phred_offset=phred_offset, variant=variant,
constructor=DNA, **kwargs),
seq_num)
@register_reader('qseq', RNA)
def _qseq_to_rna_sequence(fh, seq_num=1,
phred_offset=_default_phred_offset,
variant=_default_variant, **kwargs):
return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
phred_offset=phred_offset, variant=variant,
constructor=RNA, **kwargs),
seq_num)
@register_reader('qseq', Protein)
def _qseq_to_protein_sequence(fh, seq_num=1,
phred_offset=_default_phred_offset,
variant=_default_variant, **kwargs):
return _get_nth_sequence(_qseq_to_generator(fh, filter=False,
phred_offset=phred_offset, variant=variant,
constructor=Protein, **kwargs),
seq_num)
def _record_parser(line):
fields = line.rstrip('\n')
if fields:
fields = fields.split('\t')
else:
raise QSeqFormatError('Found blank line.')
f_len = len(fields)
if not (10 <= f_len <= 11):
raise QSeqFormatError('Expected 10 or 11 fields, found %d.' % f_len)
# If the filter field was ommitted, assume that it passed filtering:
if f_len == 10:
fields.append('1')
(machine, run, lane, tile, x, y, index, read, seq, raw_qaul,
filter) = fields
_test_fields([('filter', filter)], lambda x: x in '01',
"0 or 1")
_test_fields([('read', read)], lambda x: x in '123',
"in the range [1, 3]")
_test_fields([('x', x), ('y', y)], lambda x: int(x) is not None,
"an integer")
_test_fields([('lane', lane), ('tile', tile)], lambda x: int(x) >= 0,
"a positive integer")
return (machine, run, lane, tile, x, y, index, read, seq, raw_qaul,
filter == '0')
def _test_fields(iterkv, test, efrag):
try:
for k, v in iterkv:
if not test(v):
raise ValueError()
except ValueError:
raise QSeqFormatError('Field %r is not %s.' % (k, efrag))
|
{
"content_hash": "8dd6f5f0b843bb998597e8cd7fe5be03",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 39.34901960784314,
"alnum_prop": 0.5262108829978075,
"repo_name": "Achuth17/scikit-bio",
"id": "8609a528af6237478b6ac6708c8ef0bfd67030e7",
"size": "10034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/io/qseq.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "585"
},
{
"name": "Python",
"bytes": "1852175"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
from pprint import pprint
import hpOneView as hpov
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getservers(srv):
ret = srv.get_servers()
pprint(ret)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Server Resources
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
getservers(srv)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
{
"content_hash": "ca07be0f406b6265ef491211397e7ea5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 105,
"avg_line_length": 29.15625,
"alnum_prop": 0.5973561986423723,
"repo_name": "andreadean5/python-hpOneView",
"id": "0b293d3441590df9b630f5483af1e4d1b5e6cb2b",
"size": "3956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/scripts/get-servers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "920844"
}
],
"symlink_target": ""
}
|
extensions = [
'reno.sphinxext',
'openstackdocstheme',
]
# openstackdocsthem options
openstackdocs_repo_name = 'openstack/manila-ui'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'manila-ui'
openstackdocs_bug_tag = 'release notes'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'manila-ui Release Notes'
copyright = '2016, Manila Developers'
# Release notes are version independent
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManilaUIReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ManilaUIReleaseNotes.tex',
'manila-ui Release Notes Documentation',
'Manila Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'manilauireleasenotes',
'manila-ui Release Notes Documentation',
['Manila Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ManilaUIReleaseNotes',
'manila-ui Release Notes Documentation',
'Manila Developers', 'ManilaUIReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
{
"content_hash": "a1e08d1457db9b3af27bbe56ccb2b684",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 79,
"avg_line_length": 31.143459915611814,
"alnum_prop": 0.6988212979271101,
"repo_name": "openstack/manila-ui",
"id": "ed96703c024bc02cf9e9f605f78a32f99e798307",
"size": "8964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "releasenotes/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "72666"
},
{
"name": "Python",
"bytes": "756045"
},
{
"name": "Shell",
"bytes": "20977"
}
],
"symlink_target": ""
}
|
"""Define metadata constants."""
LABEL_COLUMN = 'labelArray'
KEY_COLUMN = 'fullVisitorId'
# columns to omit from model features
NON_FEATURE_COLUMNS = [LABEL_COLUMN, KEY_COLUMN]
NUM_INTERVALS = 4 # number of bounded churn duration intervals
SEED = 123
|
{
"content_hash": "2f68f54676a06cdda0ff5a61d67be608",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 63,
"avg_line_length": 25.5,
"alnum_prop": 0.7450980392156863,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "085a6aba2415fd3765bb263e82edf75b0706f9b2",
"size": "852",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "examples/cloudml-churn-prediction/trainer/trainer/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
from marvin.web.controllers.galaxy import make_nsa_dict
from marvin.web.controllers.galaxy import getWebMap
from marvin.tools.cube import Cube
from marvin.tests.conftest import set_the_config
import pytest
@pytest.fixture()
def cube(galaxy, mode):
set_the_config(galaxy.release)
cube = Cube(plateifu=galaxy.plateifu, mode=mode, release=galaxy.release)
cube.exp_nsa_plotcols = galaxy.nsa_data
return cube
@pytest.fixture()
def params(galaxy):
return {'release': galaxy.release}
@pytest.mark.parametrize('page', [('galaxy_page', 'Galaxy:index')], ids=['galaxy'], indirect=True)
class TestGalaxyPage(object):
def test_assert_galaxy_template_used(self, page, get_templates):
page.load_page('get', page.url)
assert '' == page.data
template, context = get_templates[0]
assert 'galaxy.html' == template.name, 'Template used should be galaxy.html'
@pytest.mark.parametrize('page', [('galaxy_page', 'initnsaplot')], ids=['initnsa'], indirect=True)
class TestNSA(object):
#@marvin_test_if(mark='skip', cube=dict(nsa=[None]))
def test_nsadict_correct(self, cube, page):
nsa, cols = make_nsa_dict(cube.nsa)
for value in cube.exp_nsa_plotcols.values():
assert set(value.keys()).issubset(set(cols))
page.assert_dict_contains_subset(value, nsa)
page.assertListIn(value.keys(), cols)
@pytest.mark.skip('these magically worked when they should not have and now they actually do not')
def test_initnsa_method_not_allowed(self, page, params, get_templates):
page.load_page('get', page.url, params=params)
template, context = get_templates[0]
assert template.name == 'errors/method_not_allowed.html'
def test_initnsa_no_plateifu(self, page, get_templates):
errmsg = 'Field may not be null.'
page.load_page('post', page.url)
template, context = get_templates[0]
page.route_no_valid_webparams(template, context, 'plateifu', reqtype='post', errmsg=errmsg)
class TestWebMap(object):
@pytest.mark.parametrize('parameter, channel',
[('emline_gflux', 'ha_6564'),
('emline_gsigma', 'ha_6564'),
('stellar_sigma', None)],
ids=['gflux', 'gsigma', 'stellarsigma'])
def test_getmap(self, cube, parameter, channel):
webmap, mapmsg = getWebMap(cube, parameter=parameter, channel=channel)
assert isinstance(webmap, dict)
assert 'values' in webmap
assert isinstance(webmap['values'], list)
assert parameter in mapmsg
if 'sigma' in parameter and cube.release != 'MPL-6':
assert 'Corrected' in mapmsg
def test_getmap_failed(self, cube):
webmap, mapmsg = getWebMap(cube, parameter='crap')
assert webmap is None
assert 'Could not get map' in mapmsg
|
{
"content_hash": "ceb30b847fcb56831ebde52e2b1d916a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 102,
"avg_line_length": 36.95061728395062,
"alnum_prop": 0.6475108586702305,
"repo_name": "albireox/marvin",
"id": "d4a1ca019eeaabf5beec85b165fccea9059d1e0b",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/marvin/tests/web/test_galaxy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "210343"
},
{
"name": "HTML",
"bytes": "68596"
},
{
"name": "JavaScript",
"bytes": "217699"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1390874"
},
{
"name": "SQLPL",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
}
|
from ginga.misc import Bunch
__all__ = ['PluginError', 'GlobalPlugin', 'LocalPlugin']
class PluginError(Exception):
"""Plugin related error."""
pass
class BasePlugin(object):
"""Base class for all plugins."""
def __init__(self, fv):
super(BasePlugin, self).__init__()
self.fv = fv
self.logger = fv.logger
# Holds GUI widgets
self.w = Bunch.Bunch()
# def build_gui(self, container):
# """
# If a plugin defines this method, it will be called with a
# container object in which to build its GUI. It should finish
# by packing into this container. This will be called every
# time the plugin is activated.
# """
# pass
def start(self):
"""
This method is called to start the plugin.
It is called after build_gui().
"""
pass
def stop(self):
"""This method is called to stop the plugin."""
pass
def _help_docstring(self):
import inspect
# Insert section title at the beginning
plg_name = self.__class__.__name__
plg_mod = inspect.getmodule(self)
plg_doc = ('{}\n{}\n'.format(plg_name, '=' * len(plg_name)) +
plg_mod.__doc__)
self.fv.help_text(plg_name, plg_doc, text_kind='rst', trim_pfx=4)
def help(self):
"""Display help for the plugin."""
if not self.fv.gpmon.has_plugin('WBrowser'):
self._help_docstring()
return
self.fv.start_global_plugin('WBrowser')
# need to let GUI finish processing, it seems
self.fv.update_pending()
obj = self.fv.gpmon.get_plugin('WBrowser')
obj.show_help(plugin=self, no_url_callback=self._help_docstring)
class GlobalPlugin(BasePlugin):
"""Class to handle a global plugin."""
def __init__(self, fv):
super(GlobalPlugin, self).__init__(fv)
def redo(self, channel, image):
"""This method is called when an image is set in a channel."""
pass
def blank(self, channel):
"""This method is called when a channel is no longer displaying any object."""
pass
class LocalPlugin(BasePlugin):
"""Class to handle a local plugin."""
def __init__(self, fv, fitsimage):
super(LocalPlugin, self).__init__(fv)
self.fitsimage = fitsimage
# find our channel info
if self.fitsimage is not None:
self.chname = self.fv.get_channel_name(self.fitsimage)
self.channel = self.fv.get_channel(self.chname)
# TO BE DEPRECATED
self.chinfo = self.channel
def modes_off(self):
"""Turn off any mode user may be in."""
bm = self.fitsimage.get_bindmap()
bm.reset_mode(self.fitsimage)
def pause(self):
"""
This method is called when the plugin is defocused.
The plugin should disable any user input that it responds to.
"""
pass
def resume(self):
"""
This method is called when the plugin is focused.
The plugin should enable any user input that it responds to.
"""
pass
def redo(self):
"""
This method is called when a new image arrives in the channel
associated with the plugin. It can optionally redo whatever operation
it is doing.
"""
pass
def blank(self):
"""
This method is called when no object is displayed in the channel
associated with the plugin. It can optionally clear whatever operation
it is doing.
"""
pass
# END
|
{
"content_hash": "3ceeb87fbe5574b5003ca704aed30268",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 86,
"avg_line_length": 28.286821705426355,
"alnum_prop": 0.5787887092354069,
"repo_name": "pllim/ginga",
"id": "c90fc41d0cfcac5a75e23334633bf8cb932e55d2",
"size": "3834",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ginga/GingaPlugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4359761"
}
],
"symlink_target": ""
}
|
from unipath import Path
from lino.api import dd, rt
from lino_xl.lib.mailbox.models import get_new_mail
def objects():
Mailbox = rt.models.django_mailbox.Mailbox
mp = rt.settings.SITE.cache_dir.child("media", "mailbox")
rt.settings.SITE.makedirs_if_missing(mp)
dd.logger.info("Mailbox path is %s", mp)
for (protocol, name, origin) in dd.plugins.mailbox.mailbox_templates:
filename = mp.child(name)
origin.copy(filename)
yield Mailbox(
name=name,
uri=protocol + "://" + filename)
name = 'team.mbox'
origin = Path(__file__).parent.child(name)
filename = mp.child(name)
origin.copy(filename)
mbx = Mailbox(name=name, uri="mbox://" + filename)
yield mbx
get_new_mail()
|
{
"content_hash": "01214c3394b9cfe0d162de26bace11be",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 33.391304347826086,
"alnum_prop": 0.6393229166666666,
"repo_name": "khchine5/xl",
"id": "d3bd0d489f49913475597808f2c23b6f6ea073f1",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/mailbox/fixtures/demo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1412921"
},
{
"name": "JavaScript",
"bytes": "1630816"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2113065"
}
],
"symlink_target": ""
}
|
"""
multicast.py
Created by Thomas Morin on 2014-06-23.
Copyright (c) 2014-2015 Orange. All rights reserved.
"""
from exabgp.protocol.ip import IP
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.bgp.message.update.nlri.qualifier import EthernetTag
from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN
# +---------------------------------------+
# | RD (8 octets) |
# +---------------------------------------+
# | Ethernet Tag ID (4 octets) |
# +---------------------------------------+
# | IP Address Length (1 octet) |
# +---------------------------------------+
# | Originating Router's IP Addr |
# | (4 or 16 octets) |
# +---------------------------------------+
# ===================================================================== EVPNNLRI
@EVPN.register
class Multicast (EVPN):
CODE = 3
NAME = "Inclusive Multicast Ethernet Tag"
SHORT_NAME = "Multicast"
def __init__ (self, rd, etag, ip, packed=None,nexthop=None,action=None,addpath=None):
EVPN.__init__(self,action,addpath)
self.nexthop = nexthop
self.rd = rd
self.etag = etag
self.ip = ip
self._pack(packed)
def __ne__ (self, other):
return not self.__eq__(other)
def __str__ (self):
return "%s:%s:%s:%s" % (
self._prefix(),
self.rd._str(),
self.etag,
self.ip,
)
def __hash__ (self):
return hash((self.afi,self.safi,self.CODE,self.rd,self.etag,self.ip))
def _pack (self, packed=None):
if self._packed:
return self._packed
if packed:
self._packed = packed
return packed
self._packed = '%s%s%s%s' % (
self.rd.pack(),
self.etag.pack(),
chr(len(self.ip)*8),
self.ip.pack()
)
return self._packed
@classmethod
def unpack (cls, data):
rd = RouteDistinguisher.unpack(data[:8])
etag = EthernetTag.unpack(data[8:12])
iplen = ord(data[12])
if iplen not in (4*8,16*8):
raise Exception("IP len is %d, but EVPN route currently support only IPv4" % iplen)
ip = IP.unpack(data[13:13+iplen/8])
return cls(rd,etag,ip,data)
def json (self, compact=None):
content = ' "code": %d, ' % self.CODE
content += '"parsed": true, '
content += '"raw": "%s", ' % self._raw()
content += '"name": "%s", ' % self.NAME
content += '%s, ' % self.rd.json()
content += self.etag.json()
if self.ip:
content += ', "ip": "%s"' % str(self.ip)
return '{%s }' % content
|
{
"content_hash": "251aa952f702b1bd5e3cacace7d2552e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 86,
"avg_line_length": 26.65934065934066,
"alnum_prop": 0.5412201154163232,
"repo_name": "benagricola/exabgp",
"id": "c180ea920bcdc06d60ccb01a4196c4f48e9aa7d4",
"size": "2426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/update/nlri/evpn/multicast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1225011"
},
{
"name": "Shell",
"bytes": "18795"
}
],
"symlink_target": ""
}
|
"""
.. _opt-conv-tensorcore:
How to optimize convolution using TensorCores
=============================================
**Author**: `Siyuan Feng <https://github.com/Hzfengsy>`_
In this tutorial, we will demonstrate how to write a high performance convolution
schedule using TensorCores in TVM. In this example, we assume the input to
convolution has a large batch. We strongly recommend covering the :ref:`opt-conv-gpu` tutorial first.
"""
################################################################
# TensorCore Introduction
# -----------------------
# Each Tensor Core provides a 4x4x4 matrix processing array that operates
# :code:`D = A * B + C`, where A, B, C and D are 4x4 matrices as Figure shows.
# The matrix multiplication inputs A and B are FP16 matrices, while the accumulation
# matrices C and D may be FP16 or FP32 matrices.
#
# However, CUDA programmers can only use warp-level primitive
# :code:`wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag)` to perform
# 16x16x16 half-precision matrix multiplication on tensor cores. Before invoking
# the matrix multiplication, programmers must load data from memory into registers
# with primitive :code:`wmma::load_matrix_sync`, explicitly. The NVCC compiler translates
# that primitive into multiple memory load instructions. At run time, every thread loads
# 16 elements from matrix A and 16 elements from B.
################################################################
# Preparation and Algorithm
# -------------------------
# We use the fixed size for input tensors with 256 channels and 14 x 14 dimensions.
# The batch size is 256. Convolution filters contain 512 filters of size 3 x 3.
# We use stride size 1 and padding size 1 for the convolution. In the example, we use
# NHWCnc memory layout.The following code defines the convolution algorithm in TVM.
import tvm
from tvm import te
import numpy as np
from tvm.contrib import nvcc
# The sizes of inputs and filters
batch_size = 256
height = 14
width = 14
in_channels = 256
out_channels = 512
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
# TensorCore shape
block_size = 16
assert batch_size % block_size == 0
assert in_channels % block_size == 0
assert out_channels % block_size == 0
# Input feature map: (N, H, W, IC, n, ic)
data_shape = (
batch_size // block_size,
height,
width,
in_channels // block_size,
block_size,
block_size,
)
# Kernel: (H, W, IC, OC, ic, oc)
kernel_shape = (
kernel_h,
kernel_w,
in_channels // block_size,
out_channels // block_size,
block_size,
block_size,
)
# Output feature map: (N, H, W, OC, n, oc)
output_shape = (
batch_size // block_size,
height,
width,
out_channels // block_size,
block_size,
block_size,
)
# Reduction axes
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // block_size), name="ic")
ii = te.reduce_axis((0, block_size), name="ii")
# Algorithm
A = te.placeholder(data_shape, name="A", dtype="float16")
W = te.placeholder(kernel_shape, name="W", dtype="float16")
Apad = te.compute(
(
batch_size // block_size,
height + 2 * pad_h,
width + 2 * pad_w,
in_channels // block_size,
block_size,
block_size,
),
lambda n, h, w, i, nn, ii: tvm.tir.if_then_else(
tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width),
A[n, h - pad_h, w - pad_w, i, nn, ii],
tvm.tir.const(0.0, "float16"),
),
name="Apad",
)
Conv = te.compute(
output_shape,
lambda n, h, w, o, nn, oo: te.sum(
Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32")
* W[kh, kw, ic, o, ii, oo].astype("float32"),
axis=[ic, kh, kw, ii],
),
name="Conv",
)
s = te.create_schedule(Conv.op)
s[Apad].compute_inline()
###############################################################################
# Memory Scope
# ------------
# In traditional GPU schedule, we have global, shared and local memory scope.
# To support TensorCores, we add another three special memory scope: :code:`wmma.matrix_a`,
# :code:`wmma.matrix_b` and :code:`wmma.accumulator`. On hardware, all fragments scope
# stores at the on-chip registers level, the same place with local memory.
# Designate the memory hierarchy
AS = s.cache_read(Apad, "shared", [Conv])
WS = s.cache_read(W, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
###############################################################################
# Define Tensor Intrinsic
# -----------------------
# In fact, TensorCore is a special hardware operation. So, we can just use tensorize
# to replace a unit of computation with the TensorCore instruction. The first thing is
# that we need to define tensor intrinsic.
#
# There are four basic operation in TensorCore: :code:`fill_fragment`, :code:`load_matrix`,
# :code:`mma_sync` and :code:`store_matrix`. Since :code:`fill_fragment` and :code:`mma_sync`
# are both used in matrix multiplication, so we can just write following three intrinsics.
def intrin_wmma_load_matrix(scope):
n = 16
A = te.placeholder((n, n), name="A", dtype="float16")
BA = tvm.tir.decl_buffer(A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=256)
C = te.compute((n, n), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=256)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
n,
n,
n,
BC.elem_offset // 256,
BA.access_ptr("r"),
n,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm():
n = 16
A = te.placeholder((n, n), name="A", dtype="float16")
B = te.placeholder((n, n), name="B", dtype="float16")
k = te.reduce_axis((0, n), name="k")
C = te.compute(
(n, n),
lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k),
name="C",
)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=256
)
BB = tvm.tir.decl_buffer(
B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=256
)
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, name="BC", scope="wmma.accumulator", data_alignment=32, offset_factor=256
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle", "tir.tvm_fill_fragment", BC.data, n, n, n, BC.elem_offset // 256, 0.0
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
BC.elem_offset // 256,
BA.data,
BA.elem_offset // 256,
BB.data,
BB.elem_offset // 256,
BC.data,
BC.elem_offset // 256,
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
def intrin_wmma_store_matrix():
n = 16
A = te.placeholder((n, n), name="A", dtype="float32")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=256
)
C = te.compute((n, n), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=256)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
n,
n,
n,
BA.elem_offset // 256,
BC.access_ptr("w"),
n,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
###############################################################################
# Scheduling the Computation
# --------------------------
# To use TensorCores in TVM, we must schedule the computation into specific structure
# to match the tensor intrinsic. The same as traditional GPU programs, we can also use
# shared memory to boost the speed. If you have any questions about blocking and shared
# memory, please refer :ref:`opt-conv-gpu`.
#
# In this example, each block contains 2x4 warps, and each warp calls 4x2 TensorCore
# instructions. Thus, the output shape of each warp is 64x32 and each block outputs
# 128x128 titles. Due to the limit of shared memory space, we only load 2 blocks (2x128x128 tiles)
# one time.
#
# .. note::
#
# *Warp-level Operation*
#
# Note that all TensorCore instructions are warp-level instructions, which means all 32 threads
# in a warp should do this instruction simultaneously. Making theadIdx.x extent=32 is one of the
# easiest way to solve this. Then We can bind threadIdx.x to any loops except those contain
# TensorCore intrinsics directly or indirectly. Also note that it is not the unique solution.
# The only thing we should do is to make sure all threads in a warp can call TensorCore at the same time.
# Define tiling sizes
block_row_warps = 4
block_col_warps = 2
warp_row_tiles = 2
warp_col_tiles = 4
warp_size = 32
chunk = 2
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
nc, hc, wc, oc, nnc, ooc = Conv.op.axis
block_k = s[Conv].fuse(hc, wc)
s[Conv].bind(block_k, block_z)
nc, nci = s[Conv].split(nc, factor=warp_row_tiles)
block_i, nc = s[Conv].split(nc, factor=block_row_warps)
oc, oci = s[Conv].split(oc, factor=warp_col_tiles)
block_j, oc = s[Conv].split(oc, factor=block_col_warps)
s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
s[Conv].bind(block_i, block_x)
s[Conv].bind(block_j, block_y)
s[Conv].bind(nc, thread_y)
s[Conv].bind(oc, thread_z)
# Schedule local computation
s[ConvF].compute_at(s[Conv], oc)
n, h, w, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
# Move intermediate computation into each output compute tile
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
# Schedule for A's share memory
s[AS].compute_at(s[ConvF], kh)
n, h, w, i, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, yo = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, factor=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(ti, thread_x)
# Schedule for W's share memory
s[WS].compute_at(s[ConvF], kh)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, yo = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
print(tvm.lower(s, [A, W, Conv], simple_mode=True))
###############################################################################
# Lowering Computation to Intrinsics
# ----------------------------------
# The last phase is to lower the computation loops down to TensorCore hardware intrinsics
# by mapping the 2D convolution to tensor intrinsics
s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_a"))
s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_b"))
s[Conv].tensorize(nnc, intrin_wmma_store_matrix())
s[ConvF].tensorize(nnf, intrin_wmma_gemm())
print(tvm.lower(s, [A, W, Conv], simple_mode=True))
###############################################################################
# Generate CUDA Kernel
# --------------------
# Finally we use TVM to generate and compile the CUDA kernel, and evaluate the latency of convolution.
# Since TensorCores are only supported in NVIDIA GPU with Compute Capability 7.0 or higher, it may not
# be able to run on our build server
ctx = tvm.gpu(0)
if nvcc.have_tensorcore(ctx.compute_version):
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}):
func = tvm.build(s, [A, W, Conv], "cuda")
a_np = np.random.uniform(size=data_shape).astype(A.dtype)
w_np = np.random.uniform(size=kernel_shape).astype(W.dtype)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), ctx)
evaluator = func.time_evaluator(func.entry_name, ctx, number=10)
print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3))
###############################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can be used to
# call TensorCores on specific GPUs.
|
{
"content_hash": "abac70906d2fd0b421e09c205dca3b50",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 107,
"avg_line_length": 35.10632911392405,
"alnum_prop": 0.5901781207182519,
"repo_name": "sxjscience/tvm",
"id": "0cbcf7e0334265083b661841399f67d43bd92277",
"size": "14652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tutorials/optimize/opt_conv_tensorcore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5565032"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6763729"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96967"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from sphinx_celery import conf
globals().update(conf.build_config(
'kombu', __file__,
project='Kombu',
version_dev='4.4',
version_stable='4.3',
canonical_url='https://kombu.readthedocs.io/',
webdomain='kombu.readthedocs.io',
github_project='celery/kombu',
author='Ask Solem & contributors',
author_name='Ask Solem',
copyright='2009-2019',
publisher='Celery Project',
html_logo='images/kombusmall.jpg',
html_favicon='images/favicon.ico',
html_prepend_sidebars=['sidebardonations.html'],
extra_extensions=['sphinx.ext.napoleon'],
apicheck_ignore_modules=[
'kombu.entity',
'kombu.messaging',
'kombu.asynchronous.aws.ext',
'kombu.asynchronous.aws.sqs.ext',
'kombu.transport.qpid_patches',
'kombu.utils',
'kombu.transport.virtual.base',
],
))
|
{
"content_hash": "fe0452097eb72cfbe34bb8edb539167c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 56,
"avg_line_length": 30.7,
"alnum_prop": 0.6471226927252985,
"repo_name": "kawamon/hue",
"id": "3c25ea0c3b90e83a644fae76f01c78a032fbddc1",
"size": "945",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/kombu-4.3.0/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
"""Support for Meteo-France weather data."""
from datetime import timedelta
import logging
from meteofrance_api.client import MeteoFranceClient
from meteofrance_api.helpers import is_valid_warning_department
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_CITY,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL_RAIN = timedelta(minutes=5)
SCAN_INTERVAL = timedelta(minutes=15)
CITY_SCHEMA = vol.Schema({vol.Required(CONF_CITY): cv.string})
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [CITY_SCHEMA]))},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Meteo-France from legacy config file."""
if not (conf := config.get(DOMAIN)):
return True
for city_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=city_conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up an Meteo-France account from a config entry."""
hass.data.setdefault(DOMAIN, {})
client = MeteoFranceClient()
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
async def _async_update_data_forecast_forecast():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_forecast, latitude, longitude
)
async def _async_update_data_rain():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(client.get_rain, latitude, longitude)
async def _async_update_data_alert():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_warning_current_phenomenoms, department, 0, True
)
coordinator_forecast = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France forecast for city {entry.title}",
update_method=_async_update_data_forecast_forecast,
update_interval=SCAN_INTERVAL,
)
coordinator_rain = None
coordinator_alert = None
# Fetch initial data so we have data when entities subscribe
await coordinator_forecast.async_refresh()
if not coordinator_forecast.last_update_success:
raise ConfigEntryNotReady
# Check if rain forecast is available.
if coordinator_forecast.data.position.get("rain_product_available") == 1:
coordinator_rain = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France rain for city {entry.title}",
update_method=_async_update_data_rain,
update_interval=SCAN_INTERVAL_RAIN,
)
await coordinator_rain.async_refresh()
if not coordinator_rain.last_update_success:
raise ConfigEntryNotReady
else:
_LOGGER.warning(
"1 hour rain forecast not available. %s is not in covered zone",
entry.title,
)
department = coordinator_forecast.data.position.get("dept")
_LOGGER.debug(
"Department corresponding to %s is %s",
entry.title,
department,
)
if is_valid_warning_department(department):
if not hass.data[DOMAIN].get(department):
coordinator_alert = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France alert for department {department}",
update_method=_async_update_data_alert,
update_interval=SCAN_INTERVAL,
)
await coordinator_alert.async_refresh()
if not coordinator_alert.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][department] = True
else:
_LOGGER.warning(
"Weather alert for department %s won't be added with city %s, as it has already been added within another city",
department,
entry.title,
)
else:
_LOGGER.warning(
"Weather alert not available: The city %s is not in metropolitan France or Andorre",
entry.title,
)
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR_FORECAST: coordinator_forecast,
COORDINATOR_RAIN: coordinator_rain,
COORDINATOR_ALERT: coordinator_alert,
UNDO_UPDATE_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]:
department = hass.data[DOMAIN][entry.entry_id][
COORDINATOR_FORECAST
].data.position.get("dept")
hass.data[DOMAIN][department] = False
_LOGGER.debug(
"Weather alert for depatment %s unloaded and released. It can be added now by another city",
department,
)
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
|
{
"content_hash": "be666cd37fb1c02c681a320c1ad778db",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 128,
"avg_line_length": 32.54973821989529,
"alnum_prop": 0.653048093935982,
"repo_name": "home-assistant/home-assistant",
"id": "2f47aee9c02fc63c3d0ee2aba27f7032a5b85127",
"size": "6223",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/meteo_france/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
"""This directory is setup with configurations to run the main functional test.
It exercises a full analysis pipeline on a smaller subset of data.
"""
import os
import subprocess
import unittest
import shutil
import contextlib
import collections
import functools
from nose import SkipTest
from nose.plugins.attrib import attr
import yaml
from bcbio.pipeline.config_utils import load_system_config
@contextlib.contextmanager
def make_workdir():
remove_old_dir = True
#remove_old_dir = False
dirname = os.path.join(os.path.dirname(__file__), "test_automated_output")
if remove_old_dir:
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
orig_dir = os.getcwd()
try:
os.chdir(dirname)
yield dirname
finally:
os.chdir(orig_dir)
def expected_failure(test):
"""Small decorator to mark tests as expected failure.
Useful for tests that are work-in-progress.
"""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception:
raise SkipTest
else:
raise AssertionError('Failure expected')
return inner
def get_post_process_yaml(data_dir, workdir):
"""Prepare a bcbio_system YAML file pointing to test data.
"""
try:
from bcbiovm.docker.defaults import get_datadir
datadir = get_datadir()
system = os.path.join(datadir, "galaxy", "bcbio_system.yaml") if datadir else None
except ImportError:
system = None
if system is None or not os.path.exists(system):
try:
_, system = load_system_config("bcbio_system.yaml")
except ValueError:
system = None
if system is None or not os.path.exists(system):
system = os.path.join(data_dir, "post_process-sample.yaml")
# create local config pointing to reduced genomes
test_system = os.path.join(workdir, "bcbio_system.yaml")
with open(system) as in_handle:
config = yaml.load(in_handle)
config["galaxy_config"] = os.path.join(data_dir, "universe_wsgi.ini")
with open(test_system, "w") as out_handle:
yaml.dump(config, out_handle)
return test_system
class AutomatedAnalysisTest(unittest.TestCase):
"""Setup a full automated analysis and run the pipeline.
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated")
def _install_test_files(self, data_dir):
"""Download required sequence and reference files.
"""
DlInfo = collections.namedtuple("DlInfo", "fname dirname version")
download_data = [DlInfo("110106_FC70BUKAAXX.tar.gz", None, None),
DlInfo("genomes_automated_test.tar.gz", "genomes", 26),
DlInfo("110907_ERP000591.tar.gz", None, None),
DlInfo("100326_FC6107FAAXX.tar.gz", None, 9),
DlInfo("tcga_benchmark.tar.gz", None, 3)]
for dl in download_data:
url = "http://chapmanb.s3.amazonaws.com/{fname}".format(fname=dl.fname)
dirname = os.path.join(data_dir, os.pardir,
dl.fname.replace(".tar.gz", "") if dl.dirname is None
else dl.dirname)
if os.path.exists(dirname) and dl.version is not None:
version_file = os.path.join(dirname, "VERSION")
is_old = True
if os.path.exists(version_file):
with open(version_file) as in_handle:
version = int(in_handle.read())
is_old = version < dl.version
if is_old:
shutil.rmtree(dirname)
if not os.path.exists(dirname):
self._download_to_dir(url, dirname)
def _download_to_dir(self, url, dirname):
print dirname
cl = ["wget", url]
subprocess.check_call(cl)
cl = ["tar", "-xzvpf", os.path.basename(url)]
subprocess.check_call(cl)
shutil.move(os.path.basename(dirname), dirname)
os.remove(os.path.basename(url))
@attr(speed=3)
def IGNOREtest_3_full_pipeline(self):
"""Run full automated analysis pipeline with multiplexing.
XXX Multiplexing not supporting in latest versions.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110106_FC70BUKAAXX"),
os.path.join(self.data_dir, "run_info.yaml")]
subprocess.check_call(cl)
@attr(speed=3)
def IGNOREtest_4_empty_fastq(self):
"""Handle analysis of empty fastq inputs from failed runs.
XXX Multiplexing not supporting in latest versions.
"""
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110221_empty_FC12345AAXX"),
os.path.join(self.data_dir, "run_info-empty.yaml")]
subprocess.check_call(cl)
@attr(stranded=True)
@attr(rnaseq=True)
def test_2_stranded(self):
"""Run an RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_stranded"),
os.path.join(self.data_dir, "run_info-stranded.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(tophat=True)
def test_2_rnaseq(self):
"""Run an RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-rnaseq.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(sailfish=True)
@unittest.skip('sailfish support is in progress, skipping the unit test.')
def test_2_sailfish(self):
"""Run an RNA-seq analysis with Sailfish
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-sailfish.yaml")]
subprocess.check_call(cl)
@attr(fusion=True)
def test_2_fusion(self):
"""Run an RNA-seq analysis and test fusion genes
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_fusion"),
os.path.join(self.data_dir, "run_info-fusion.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(rnaseq_standard=True)
@attr(star=True)
def test_2_star(self):
"""Run an RNA-seq analysis with STAR and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-star.yaml")]
subprocess.check_call(cl)
@attr(rnaseq=True)
@attr(rnaseq_standard=True)
@attr(hisat2=True)
def test_2_hisat2(self):
"""Run an RNA-seq analysis with hisat2 and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "110907_ERP000591"),
os.path.join(self.data_dir, "run_info-hisat2.yaml")]
subprocess.check_call(cl)
@attr(explant=True)
@attr(singleend=True)
@attr(rnaseq=True)
def test_explant(self):
"""
Run an explant RNA-seq analysis with TopHat and generate gene-level counts.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "1_explant"),
os.path.join(self.data_dir, "run_info-explant.yaml")]
subprocess.check_call(cl)
@attr(srnaseq=True)
@attr(srnaseq_star=True)
def test_srnaseq_star(self):
"""Run an sRNA-seq analysis.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-srnaseq_star.yaml")]
subprocess.check_call(cl)
@attr(srnaseq=True)
@attr(srnaseq_bowtie=True)
def test_srnaseq_bowtie(self):
"""Run an sRNA-seq analysis.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-srnaseq_bowtie.yaml")]
subprocess.check_call(cl)
@attr(chipseq=True)
def test_chipseq(self):
"""
Run a chip-seq alignment with Bowtie2
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "test_chipseq"),
os.path.join(self.data_dir, "run_info-chipseq.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(ensemble=True)
def test_1_variantcall(self):
"""Test variant calling with GATK pipeline.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-variantcall.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(devel=True)
def test_5_bam(self):
"""Allow BAM files as input to pipeline.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-bam.yaml")]
subprocess.check_call(cl)
@attr(speed=2)
def test_6_bamclean(self):
"""Clean problem BAM input files that do not require alignment.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bamclean.yaml")]
subprocess.check_call(cl)
@attr(speed=2)
@attr(cancer=True)
@attr(cancermulti=True)
def test_7_cancer(self):
"""Test paired tumor-normal calling using multiple calling approaches: MuTect, VarScan, FreeBayes.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-cancer.yaml")]
subprocess.check_call(cl)
@attr(cancer=True)
@attr(cancerpanel=True)
def test_7_cancer_nonormal(self):
"""Test cancer calling without normal samples or with normal VCF panels.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-cancer2.yaml")]
subprocess.check_call(cl)
@attr(speed=1)
@attr(template=True)
def test_8_template(self):
"""Create a project template from input files and metadata configuration.
"""
self._install_test_files(self.data_dir)
fc_dir = os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX")
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py", "-w", "template", "--only-metadata",
"freebayes-variant",
os.path.join(fc_dir, "100326.csv"),
os.path.join(fc_dir, "7_100326_FC6107FAAXX_1_fastq.txt"),
os.path.join(fc_dir, "7_100326_FC6107FAAXX_2_fastq.txt"),
os.path.join(fc_dir, "8_100326_FC6107FAAXX.bam")]
subprocess.check_call(cl)
@attr(joint=True)
def test_9_joint(self):
"""Perform joint calling/backfilling/squaring off following variant calling.
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_nextgen.py",
get_post_process_yaml(self.data_dir, workdir),
os.path.join(self.data_dir, "run_info-joint.yaml")]
subprocess.check_call(cl)
@attr(docker=True)
def test_docker(self):
"""Run an analysis with code and tools inside a docker container.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_vm.py",
"--datadir=%s" % self.data_dir,
"run",
"--systemconfig=%s" % get_post_process_yaml(self.data_dir, workdir),
"--fcdir=%s" % os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bam.yaml")]
subprocess.check_call(cl)
@attr(docker_ipython=True)
def test_docker_ipython(self):
"""Run an analysis with code and tools inside a docker container, driven via IPython.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
self._install_test_files(self.data_dir)
with make_workdir() as workdir:
cl = ["bcbio_vm.py",
"--datadir=%s" % self.data_dir,
"ipython",
"--systemconfig=%s" % get_post_process_yaml(self.data_dir, workdir),
"--fcdir=%s" % os.path.join(self.data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(self.data_dir, "run_info-bam.yaml"),
"lsf", "localrun"]
subprocess.check_call(cl)
class CWLTest(unittest.TestCase):
""" Run simple CWL workflows.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated")
@attr(speed=2)
@attr(cwl=True)
@attr(cwl_local=True)
def test_1_cwl_local(self):
"""Create a common workflow language description and run on local installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-bam.yaml",
"--systemconfig", get_post_process_yaml(self.data_dir, workdir)]
subprocess.check_call(cl)
out_base = "run_info-bam-workflow/run_info-bam-main"
cl = ["cwltool", "--verbose", "--preserve-environment", "PATH", "HOME", "--no-container",
out_base + ".cwl", out_base + "-samples.json"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
@attr(speed=2)
@attr(cwl=True)
@attr(cwl_docker=True)
def test_2_cwl_docker(self):
"""Create a common workflow language description and run on a Docker installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-bam.yaml",
"--systemconfig", get_post_process_yaml(self.data_dir, workdir)]
subprocess.check_call(cl)
out_base = "run_info-bam-workflow/run_info-bam-main"
cl = ["cwltool", "--verbose", out_base + ".cwl", out_base + "-samples.json"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
|
{
"content_hash": "4d9b03966642c23c100db035069c3888",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 106,
"avg_line_length": 40.224256292906176,
"alnum_prop": 0.5759472067356923,
"repo_name": "lpantano/bcbio-nextgen",
"id": "c5a80654e3b91de92593d340e66c22a9e9a9581f",
"size": "17578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_automated_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1553199"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
}
|
"""Deploy the database.
This module contains the logic to deploy the database
to the staging server
"""
import os, subprocess
def deploy_database(settings, dbconn, database=None):
"""Deploy the database
Args:
database: An open connection to the Drupal database.
Deploy the database tables into the staging server.
"""
deployed = False
try:
custom_sql = settings['sql']['deploy_sql_filename']
except AttributeError:
print "Could not find custom deploy script."
else:
if os.path.isfile(custom_sql):
deployed = dbconn.execute_sql_file(custom_sql, database)
else:
print "No custom deploy SQL found at {}".format(custom_sql)
#################################
# Put any custom steps here
#################################
# Begin custom steps
# End custom steps
#################################
return deployed
|
{
"content_hash": "bc1b3af658bae42f0b04439700bc3e93",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.5531697341513292,
"repo_name": "anthonylv/pyD2W",
"id": "ac371bc5394027f96c33f0d030d23396f8d54053",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92285"
}
],
"symlink_target": ""
}
|
import os
import socket
import struct
try:
import mmap
except ImportError:
mmap = None
__all__ = ['IPv4Database', 'find']
_unpack_V = lambda b: struct.unpack("<L", b)[0]
_unpack_N = lambda b: struct.unpack(">L", b)[0]
def _unpack_C(b):
if isinstance(b, int):
return b
return struct.unpack("B", b)[0]
datfile = os.path.join(os.path.dirname(__file__), "17monipdb.dat")
class IPv4Database(object):
"""Database for search IPv4 address.
The 17mon dat file format in bytes::
-----------
| 4 bytes | <- offset number
-----------------
| 256 * 4 bytes | <- first ip number index
-----------------------
| offset - 1028 bytes | <- ip index
-----------------------
| data storage |
-----------------------
"""
def __init__(self, filename=None, use_mmap=True):
if filename is None:
filename = datfile
with open(filename, 'rb') as f:
if use_mmap and mmap is not None:
buf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
else:
buf = f.read()
use_mmap = False
self._use_mmap = use_mmap
self._buf = buf
self._offset = _unpack_N(buf[:4])
self._is_closed = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
if self._use_mmap:
self._buf.close()
self._is_closed = True
def _lookup_ipv4(self, ip):
nip = socket.inet_aton(ip)
# first IP number
fip = bytearray(nip)[0]
# 4 + (fip - 1) * 4
fip_offset = fip * 4 + 4
# position in the index block
count = _unpack_V(self._buf[fip_offset:fip_offset + 4])
pos = count * 8
offset = pos + 1028
data_length = 0
data_pos = 0
lo, hi = 0, (self._offset - offset) // 8
while lo < hi:
mid = (lo + hi) // 2
mid_offset = pos + 1028 + 8 * mid
mid_val = self._buf[mid_offset: mid_offset+4]
if mid_val < nip:
lo = mid + 1
else:
hi = mid
offset = pos + 1028 + 8 * lo
if offset == self._offset:
return None
data_pos = _unpack_V(self._buf[offset + 4:offset + 7] + b'\0')
data_length = _unpack_C(self._buf[offset + 7])
offset = self._offset + data_pos - 1024
value = self._buf[offset:offset + data_length]
return value.decode('utf-8').strip()
def find(self, ip):
if self._is_closed:
raise ValueError('I/O operation on closed dat file')
return self._lookup_ipv4(ip)
def find(ip):
# keep find for compatibility
try:
ip = socket.gethostbyname(ip)
except socket.gaierror:
return
with IPv4Database() as db:
return db.find(ip)
|
{
"content_hash": "da4ebbdc312e7813c9fb85fbce2fa698",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 71,
"avg_line_length": 24.892561983471076,
"alnum_prop": 0.48871181938911024,
"repo_name": "phyng/phyip",
"id": "61652d6bfb98b6f33008a983602f6b5429a01c22",
"size": "3108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipdata/mon17.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22381"
}
],
"symlink_target": ""
}
|
from pyflink.common import *
from pyflink.dataset import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
import pyflink
from py4j.java_gateway import java_import
intp = gateway.entry_point
pyflink.java_gateway._gateway = gateway
pyflink.java_gateway.import_flink_view(gateway)
pyflink.java_gateway.install_exception_handler()
b_env = pyflink.dataset.ExecutionEnvironment(intp.getJavaExecutionEnvironment())
bt_env = BatchTableEnvironment.create(b_env)
s_env = StreamExecutionEnvironment(intp.getJavaStreamExecutionEnvironment())
st_env = StreamTableEnvironment.create(s_env)
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPyFlinkZeppelinContext
class PyFlinkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PyFlinkZeppelinContext, self).__init__(z, gateway)
def show(self, obj):
from pyflink.table import Table
if isinstance(obj, Table):
print(self.z.showData(obj._j_table))
else:
super(PyFlinkZeppelinContext, self).show(obj)
z = __zeppelin__ = PyFlinkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
|
{
"content_hash": "ef423a60798ab14c02a18a58cd217b67",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 31.5,
"alnum_prop": 0.7793650793650794,
"repo_name": "zetaris/zeppelin",
"id": "86e1a50d7a6b41e87c9cc1597ba7f13bd6048812",
"size": "2045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flink/src/main/resources/python/zeppelin_pyflink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12046"
},
{
"name": "CSS",
"bytes": "101478"
},
{
"name": "Dockerfile",
"bytes": "22145"
},
{
"name": "Groovy",
"bytes": "9274"
},
{
"name": "HTML",
"bytes": "353455"
},
{
"name": "Java",
"bytes": "6479989"
},
{
"name": "JavaScript",
"bytes": "1309634"
},
{
"name": "Jupyter Notebook",
"bytes": "84915"
},
{
"name": "Python",
"bytes": "114062"
},
{
"name": "R",
"bytes": "21511"
},
{
"name": "Roff",
"bytes": "63155"
},
{
"name": "Ruby",
"bytes": "3102"
},
{
"name": "Scala",
"bytes": "446610"
},
{
"name": "Shell",
"bytes": "96091"
},
{
"name": "Thrift",
"bytes": "7842"
},
{
"name": "XSLT",
"bytes": "1326"
}
],
"symlink_target": ""
}
|
"""Gradient tape utilites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from autograd import container_types
from autograd import core as ag_core
from tensorflow.python.framework import dtypes
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
def tensor_id(t):
"""Returns a unique identifier for this Tensor."""
t = ag_core.getval(t)
return t._id # pylint: disable=protected-access
class ImplicitTape(object):
"""Global object which can watch tensors and wrap them with autograd."""
def __init__(self):
self.tensors = {}
self.gradients = []
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
@ag_core.primitive
def _watch_with_tape_internal(_, tensor):
"""Primitive to wrap a tensor around an ImplicitTape progenitor."""
return tensor
def _watch_with_tape(tape, tensor):
"""Wraps a watched Tensor and keeps track of it in the implicit tape."""
w = _watch_with_tape_internal(tape, tensor)
if ag_core.isnode(tape):
tape.value.tensors[tensor_id(tensor)] = w
return w
def _watch_with_tape_vjp(g, ans, vs, gvs, tape, tensor):
"""Gradient for _watch_with_tape_internal."""
del ans, gvs, tape
def mut_add(implicit_tape):
t = ag_core.getval(tensor)
implicit_tape.gradients.append((t, g))
return implicit_tape
return ag_core.SparseObject(vs, mut_add)
_watch_with_tape_internal.defvjp(_watch_with_tape_vjp, argnum=0)
_watch_with_tape_internal.defvjp(
lambda g, ans, vs, gvs, tape, tensor: g,
argnum=1)
class ImplicitTapeVSpace(ag_core.VSpace):
"""VSpace needed to have ImplicitTape be a valid progenitor."""
def zeros(self):
return ImplicitTape()
class ImplicitTapeNode(ag_core.Node):
"""Node to wrap ImplicitTape in."""
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
ag_core.register_node(ImplicitTapeNode, ImplicitTape)
ag_core.register_vspace(ImplicitTapeVSpace, ImplicitTape)
# TODO(apassos) try to not do this.
class NoneVSpace(ag_core.VSpace):
"""VSpace for python None."""
def __init__(self, _):
self.size = 0
ag_core.register_vspace(NoneVSpace, type(None))
class _TapeStack(threading.local):
def __init__(self):
super(_TapeStack, self).__init__()
self._stack = []
@property
def stack(self):
return self._stack
@tf_contextlib.contextmanager
def replace_stack(self, new_stack):
old = self._stack
self._stack = new_stack
yield
self._stack = old
# The global tape stack.
_tape_stack = _TapeStack()
def push_new_tape():
"""Pushes a new tape onto the tape stack."""
progenitor = ag_core.new_progenitor(ImplicitTape())
_tape_stack.stack.append(progenitor)
ag_core.active_progenitors.add(progenitor)
def watch(tensor):
"""Marks this tensor to be watched by all tapes in the stack.
Args:
tensor: tensor to be watched.
Returns:
The tensor, potentially wrapped by all tapes in the stack.
"""
for t in _tape_stack.stack:
tensor = _watch_with_tape(t, tensor)
return tensor
def pop_tape():
"""Pops the top tape in the stack, if any."""
if _tape_stack.stack:
return _tape_stack.stack.pop()
return None
def any_tape_has(tensor):
for t in _tape_stack.stack:
if tensor_id(tensor) in t.value.tensors:
return True
return False
def should_record(tensors):
"""Returns true if any tape in the stach watches any of these tensors."""
return any(ag_core.isnode(x) for x in tensors)
class _EagerSequenceNode(container_types.SequenceNode):
"""Eager version of SequenceNode, to live in EagerSequenceVSpace."""
pass
class _EagerSequenceVSpace(container_types.SequenceVSpace):
"""Changes equality on SequenceVSpace to conform to tfe requirements."""
def __init__(self, value):
self.shape = [ag_core.vspace(x) for x in value]
self.size = sum(s.size for s in self.shape)
self.sequence_type = type(value)
def __eq__(self, other):
if type(self) != type(other): # pylint: disable=unidiomatic-typecheck
return False
if len(self.shape) != len(other.shape):
# TODO(apassos) function gradients sometimes return gradients for side
# inputs which breaks this assertion. Understand how to fix it.
return True
for ss, os in zip(self.shape, other.shape):
if ss != os:
if isinstance(ss, NoneVSpace) or isinstance(os, NoneVSpace):
continue
if ss.dtype == dtypes.resource or os.dtype == dtypes.resource:
continue
return False
return True
class _EagerList(list):
"""Type used to bypass SequenceVSpace."""
def __init__(self, value):
super(_EagerList, self).__init__(value)
for v in value:
assert not ag_core.isnode(v)
ag_core.register_vspace(_EagerSequenceVSpace, _EagerList)
ag_core.register_node(_EagerSequenceNode, _EagerList)
@ag_core.primitive
def _record_operation(output_tensors, input_tensors, side_outputs,
backward_function):
del input_tensors, side_outputs, backward_function
return _EagerList(output_tensors)
def record_operation(o, i, s, b):
"""Primitive to trigger autograd tracing on outputs from inputs."""
inputs = container_types.make_sequence(_EagerList, *i)
return _record_operation(o, inputs, s, b)
def _record_operation_vjp(g, ans, vs, gvs, output_tensors, input_tensors,
side_outputs, backward_function):
"""Gradient for _record_operation."""
del ans, vs, gvs, output_tensors, input_tensors
backward_args = tuple(g) + tuple(side_outputs)
if ag_core.isnode(backward_args):
backward_args = list(backward_args)
tensors = nest.flatten(backward_function(*backward_args))
return _EagerList([ag_core.getval(t) for t in tensors])
_record_operation.defvjp(_record_operation_vjp, argnum=1)
|
{
"content_hash": "35a97aecc3db73d3e53c3ce53bf24b34",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 76,
"avg_line_length": 26.18141592920354,
"alnum_prop": 0.6880175764745649,
"repo_name": "xuleiboy1234/autoTitle",
"id": "1cab4346b0c7b6bfdb9c456bd915a71c517a483d",
"size": "6606",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/tensorflow/python/eager/tape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "196965"
},
{
"name": "C++",
"bytes": "28230132"
},
{
"name": "CMake",
"bytes": "624472"
},
{
"name": "Go",
"bytes": "941453"
},
{
"name": "Java",
"bytes": "380704"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37232"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "5350"
},
{
"name": "Perl 6",
"bytes": "1365"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25123920"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "358280"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('UserManagement', '0026_auto_20170315_1915'),
]
operations = [
migrations.RemoveField(
model_name='studentprofile',
name='team',
),
]
|
{
"content_hash": "81635e9e4a6f28a2c1a3994317683d79",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 19.58823529411765,
"alnum_prop": 0.6006006006006006,
"repo_name": "SkillSmart/ConferenceManagementSystem",
"id": "9223fa2e5fa5cd1dcead2fc01a460fbbfd3c5c24",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UserManagement/migrations/0027_remove_studentprofile_team.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1259"
},
{
"name": "C",
"bytes": "487034"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "53731"
},
{
"name": "HTML",
"bytes": "172113"
},
{
"name": "JavaScript",
"bytes": "126279"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "217460"
},
{
"name": "Tcl",
"bytes": "1237789"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py 2014/08/24 12:12:31 garyo"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "ccd0f62300e9122310bba2594628f57a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 34.320754716981135,
"alnum_prop": 0.742715777899945,
"repo_name": "engineer0x47/SCONS",
"id": "f50d06f9fda0d1b136e33ac04e08e7e143403ead",
"size": "1819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/SCons/Tool/sgicc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3707391"
},
{
"name": "Shell",
"bytes": "2934"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("djasana", "0022_alter_attachment_url_length"),
]
operations = [
migrations.AlterField(
model_name="attachment",
name="download_url",
field=models.URLField(max_length=5120),
),
]
|
{
"content_hash": "74d670bd4c4e425786bffd2317390ae2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 22.3125,
"alnum_prop": 0.5882352941176471,
"repo_name": "sbywater/django-asana",
"id": "27889e6650d0add3654137941d58788b15f94fee",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djasana/migrations/0023_alter_attachment_url_download_length.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199690"
}
],
"symlink_target": ""
}
|
"""
.. _plot_source_alignment:
Source alignment and coordinate frames
======================================
This tutorial shows how to visually assess the spatial alignment of MEG sensor
locations, digitized scalp landmark and sensor locations, and MRI volumes. This
alignment process is crucial for computing the forward solution, as is
understanding the different coordinate frames involved in this process.
.. contents:: Page contents
:local:
:depth: 2
Let's start out by loading some data.
"""
import os.path as op
import numpy as np
import nibabel as nib
from scipy import linalg
import mne
from mne.io.constants import FIFF
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(raw_fname)
trans = mne.read_trans(trans_fname)
src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
# Load the T1 file and change the header information to the correct units
t1w = nib.load(op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz'))
t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine)
t1w.header['xyzt_units'] = np.array(10, dtype='uint8')
t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine)
###############################################################################
# .. raw:: html
#
# <style>
# .pink {color:DarkSalmon; font-weight:bold}
# .blue {color:DeepSkyBlue; font-weight:bold}
# .gray {color:Gray; font-weight:bold}
# .magenta {color:Magenta; font-weight:bold}
# .purple {color:Indigo; font-weight:bold}
# .green {color:LimeGreen; font-weight:bold}
# .red {color:Red; font-weight:bold}
# </style>
#
# .. role:: pink
# .. role:: blue
# .. role:: gray
# .. role:: magenta
# .. role:: purple
# .. role:: green
# .. role:: red
#
#
# Understanding coordinate frames
# -------------------------------
# For M/EEG source imaging, there are three **coordinate frames** must be
# brought into alignment using two 3D `transformation matrices <wiki_xform_>`_
# that define how to rotate and translate points in one coordinate frame
# to their equivalent locations in another. The three main coordinate frames
# are:
#
# * :blue:`"meg"`: the coordinate frame for the physical locations of MEG
# sensors
# * :gray:`"mri"`: the coordinate frame for MRI images, and scalp/skull/brain
# surfaces derived from the MRI images
# * :pink:`"head"`: the coordinate frame for digitized sensor locations and
# scalp landmarks ("fiducials")
#
#
# Each of these are described in more detail in the next section.
#
# A good way to start visualizing these coordinate frames is to use the
# `mne.viz.plot_alignment` function, which is used for creating or inspecting
# the transformations that bring these coordinate frames into alignment, and
# displaying the resulting alignment of EEG sensors, MEG sensors, brain
# sources, and conductor models. If you provide ``subjects_dir`` and
# ``subject`` parameters, the function automatically loads the subject's
# Freesurfer MRI surfaces. Important for our purposes, passing
# ``show_axes=True`` to `~mne.viz.plot_alignment` will draw the origin of each
# coordinate frame in a different color, with axes indicated by different sized
# arrows:
#
# * shortest arrow: (**R**)ight / X
# * medium arrow: forward / (**A**)nterior / Y
# * longest arrow: up / (**S**)uperior / Z
#
# Note that all three coordinate systems are **RAS** coordinate frames and
# hence are also `right-handed`_ coordinate systems. Finally, note that the
# ``coord_frame`` parameter sets which coordinate frame the camera
# should initially be aligned with. Let's take a look:
fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
subjects_dir=subjects_dir, surfaces='head-dense',
show_axes=True, dig=True, eeg=[], meg='sensors',
coord_frame='meg', mri_fiducials='estimated')
mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.))
print('Distance from head origin to MEG origin: %0.1f mm'
% (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))
print('Distance from head origin to MRI origin: %0.1f mm'
% (1000 * np.linalg.norm(trans['trans'][:3, 3])))
dists = mne.dig_mri_distances(raw.info, trans, 'sample',
subjects_dir=subjects_dir)
print('Distance from %s digitized points to head surface: %0.1f mm'
% (len(dists), 1000 * np.mean(dists)))
###############################################################################
# Coordinate frame definitions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# 1. Neuromag/Elekta/MEGIN head coordinate frame ("head", :pink:`pink axes`)
# The head coordinate frame is defined through the coordinates of
# anatomical landmarks on the subject's head: usually the Nasion (`NAS`_),
# and the left and right preauricular points (`LPA`_ and `RPA`_).
# Different MEG manufacturers may have different definitions of the head
# coordinate frame. A good overview can be seen in the
# `FieldTrip FAQ on coordinate systems`_.
#
# For Neuromag/Elekta/MEGIN, the head coordinate frame is defined by the
# intersection of
#
# 1. the line between the LPA (:red:`red sphere`) and RPA
# (:purple:`purple sphere`), and
# 2. the line perpendicular to this LPA-RPA line one that goes through
# the Nasion (:green:`green sphere`).
#
# The axes are oriented as **X** origin→RPA, **Y** origin→NAS,
# **Z** origin→upward (orthogonal to X and Y).
#
# .. note:: The required 3D coordinates for defining the head coordinate
# frame (NAS, LPA, RPA) are measured at a stage separate from
# the MEG data recording. There exist numerous devices to
# perform such measurements, usually called "digitizers". For
# example, see the devices by the company `Polhemus`_.
#
# 2. MEG device coordinate frame ("meg", :blue:`blue axes`)
# The MEG device coordinate frame is defined by the respective MEG
# manufacturers. All MEG data is acquired with respect to this coordinate
# frame. To account for the anatomy and position of the subject's head, we
# use so-called head position indicator (HPI) coils. The HPI coils are
# placed at known locations on the scalp of the subject and emit
# high-frequency magnetic fields used to coregister the head coordinate
# frame with the device coordinate frame.
#
# From the Neuromag/Elekta/MEGIN user manual:
#
# The origin of the device coordinate system is located at the center
# of the posterior spherical section of the helmet with X axis going
# from left to right and Y axis pointing front. The Z axis is, again
# normal to the plane with positive direction up.
#
# .. note:: The HPI coils are shown as :magenta:`magenta spheres`.
# Coregistration happens at the beginning of the recording and
# the head↔meg transformation matrix is stored in
# ``raw.info['dev_head_t']``.
#
# 3. MRI coordinate frame ("mri", :gray:`gray axes`)
# Defined by Freesurfer, the "MRI surface RAS" coordinate frame has its
# origin at the center of a 256×256×256 1mm anisotropic volume (though the
# center may not correspond to the anatomical center of the subject's
# head).
#
# .. note:: We typically align the MRI coordinate frame to the head
# coordinate frame through a
# `rotation and translation matrix <wiki_xform_>`_,
# that we refer to in MNE as ``trans``.
#
# A bad example
# ^^^^^^^^^^^^^
# Let's try using `~mne.viz.plot_alignment` with ``trans=None``, which
# (incorrectly!) equates the MRI and head coordinate frames.
mne.viz.plot_alignment(raw.info, trans=None, subject='sample', src=src,
subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# A good example
# ^^^^^^^^^^^^^^
# Here is the same plot, this time with the ``trans`` properly defined
# (using a precomputed transformation matrix).
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
src=src, subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
###############################################################################
# Visualizing the transformations
# -------------------------------
# Let's visualize these coordinate frames using just the scalp surface; this
# will make it easier to see their relative orientations. To do this we'll
# first load the Freesurfer scalp surface, then apply a few different
# transforms to it. In addition to the three coordinate frames discussed above,
# we'll also show the "mri_voxel" coordinate frame. Unlike MRI Surface RAS,
# "mri_voxel" has its origin in the corner of the volume (the left-most,
# posterior-most coordinate on the inferior-most MRI slice) instead of at the
# center of the volume. "mri_voxel" is also **not** an RAS coordinate system:
# rather, its XYZ directions are based on the acquisition order of the T1 image
# slices.
# The head surface is stored in "mri" coordinate frame
# (origin at center of volume, units=mm)
seghead_rr, seghead_tri = mne.read_surface(
op.join(subjects_dir, 'sample', 'surf', 'lh.seghead'))
# To put the scalp in the "head" coordinate frame, we apply the inverse of
# the precomputed `trans` (which maps head → mri)
mri_to_head = linalg.inv(trans['trans'])
scalp_pts_in_head_coord = mne.transforms.apply_trans(
mri_to_head, seghead_rr, move=True)
# To put the scalp in the "meg" coordinate frame, we use the inverse of
# raw.info['dev_head_t']
head_to_meg = linalg.inv(raw.info['dev_head_t']['trans'])
scalp_pts_in_meg_coord = mne.transforms.apply_trans(
head_to_meg, scalp_pts_in_head_coord, move=True)
# The "mri_voxel"→"mri" transform is embedded in the header of the T1 image
# file. We'll invert it and then apply it to the original `seghead_rr` points.
# No unit conversion necessary: this transform expects mm and the scalp surface
# is defined in mm.
vox_to_mri = t1_mgh.header.get_vox2ras_tkr()
mri_to_vox = linalg.inv(vox_to_mri)
scalp_points_in_vox = mne.transforms.apply_trans(
mri_to_vox, seghead_rr, move=True)
###############################################################################
# Now that we've transformed all the points, let's plot them. We'll use the
# same colors used by `~mne.viz.plot_alignment` and use :green:`green` for the
# "mri_voxel" coordinate frame:
def add_head(renderer, points, color, opacity=0.95):
renderer.mesh(*points.T, triangles=seghead_tri, color=color,
opacity=opacity)
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(600, 600), bgcolor='w', scene=False)
add_head(renderer, seghead_rr, 'gray')
add_head(renderer, scalp_pts_in_meg_coord, 'blue')
add_head(renderer, scalp_pts_in_head_coord, 'pink')
add_head(renderer, scalp_points_in_vox, 'green')
mne.viz.set_3d_view(figure=renderer.figure, distance=800,
focalpoint=(0., 30., 30.), elevation=105, azimuth=180)
renderer.show()
###############################################################################
# The relative orientations of the coordinate frames can be inferred by
# observing the direction of the subject's nose. Notice also how the origin of
# the :green:`mri_voxel` coordinate frame is in the corner of the volume
# (above, behind, and to the left of the subject), whereas the other three
# coordinate frames have their origin roughly in the center of the head.
#
# Example: MRI defacing
# ^^^^^^^^^^^^^^^^^^^^^
# For a real-world example of using these transforms, consider the task of
# defacing the MRI to preserve subject anonymity. If you know the points in
# the "head" coordinate frame (as you might if you're basing the defacing on
# digitized points) you would need to transform them into "mri" or "mri_voxel"
# in order to apply the blurring or smoothing operations to the MRI surfaces or
# images. Here's what that would look like (we'll use the nasion landmark as a
# representative example):
# Get the nasion
nasion = [p for p in raw.info['dig'] if
p['kind'] == FIFF.FIFFV_POINT_CARDINAL and
p['ident'] == FIFF.FIFFV_POINT_NASION][0]
assert nasion['coord_frame'] == FIFF.FIFFV_COORD_HEAD
nasion = nasion['r'] # get just the XYZ values
# Transform it from head to MRI space (recall that `trans` is head → mri)
nasion_mri = mne.transforms.apply_trans(trans, nasion, move=True)
# Then transform to voxel space, after converting from meters to millimeters
nasion_vox = mne.transforms.apply_trans(
mri_to_vox, nasion_mri * 1e3, move=True)
# Plot it to make sure the transforms worked
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(400, 400), bgcolor='w', scene=False)
add_head(renderer, scalp_points_in_vox, 'green', opacity=1)
renderer.sphere(center=nasion_vox, color='orange', scale=10)
mne.viz.set_3d_view(figure=renderer.figure, distance=600.,
focalpoint=(0., 125., 250.), elevation=45, azimuth=180)
renderer.show()
###############################################################################
# Defining the head↔MRI ``trans`` using the GUI
# ---------------------------------------------
# You can try creating the head↔MRI transform yourself using
# :func:`mne.gui.coregistration`.
#
# * First you must load the digitization data from the raw file
# (``Head Shape Source``). The MRI data is already loaded if you provide the
# ``subject`` and ``subjects_dir``. Toggle ``Always Show Head Points`` to see
# the digitization points.
# * To set the landmarks, toggle ``Edit`` radio button in ``MRI Fiducials``.
# * Set the landmarks by clicking the radio button (LPA, Nasion, RPA) and then
# clicking the corresponding point in the image.
# * After doing this for all the landmarks, toggle ``Lock`` radio button. You
# can omit outlier points, so that they don't interfere with the finetuning.
#
# .. note:: You can save the fiducials to a file and pass
# ``mri_fiducials=True`` to plot them in
# :func:`mne.viz.plot_alignment`. The fiducials are saved to the
# subject's bem folder by default.
# * Click ``Fit Head Shape``. This will align the digitization points to the
# head surface. Sometimes the fitting algorithm doesn't find the correct
# alignment immediately. You can try first fitting using LPA/RPA or fiducials
# and then align according to the digitization. You can also finetune
# manually with the controls on the right side of the panel.
# * Click ``Save As...`` (lower right corner of the panel), set the filename
# and read it with :func:`mne.read_trans`.
#
# For more information, see step by step instructions
# `in these slides
# <https://www.slideshare.net/mne-python/mnepython-coregistration>`_.
# Uncomment the following line to align the data yourself.
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)
###############################################################################
# .. _plot_source_alignment_without_mri:
#
# Alignment without MRI
# ---------------------
# The surface alignments above are possible if you have the surfaces available
# from Freesurfer. :func:`mne.viz.plot_alignment` automatically searches for
# the correct surfaces from the provided ``subjects_dir``. Another option is
# to use a :ref:`spherical conductor model <eeg_sphere_model>`. It is
# passed through ``bem`` parameter.
sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
raw.info, eeg='projected', bem=sphere, src=src, dig=True,
surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True)
###############################################################################
# It is also possible to use :func:`mne.gui.coregistration`
# to warp a subject (usually ``fsaverage``) to subject digitization data, see
# `these slides
# <https://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
#
# .. _right-handed: https://en.wikipedia.org/wiki/Right-hand_rule
# .. _wiki_xform: https://en.wikipedia.org/wiki/Transformation_matrix
# .. _NAS: https://en.wikipedia.org/wiki/Nasion
# .. _LPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _RPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _Polhemus: https://polhemus.com/scanning-digitizing/digitizing-products/
# .. _FieldTrip FAQ on coordinate systems: http://www.fieldtriptoolbox.org/faq/how_are_the_different_head_and_mri_coordinate_systems_defined/ # noqa:E501
|
{
"content_hash": "2f1b10e02a535ed6461b1bc23a0b460f",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 154,
"avg_line_length": 47.729805013927574,
"alnum_prop": 0.6583017216224103,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "9739eb83d6e67696d36d504f5484f927be6f4bcb",
"size": "17179",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "0.22/_downloads/4bd3132f565f8eeb8f92269a858f1f3f/plot_source_alignment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
from celery import task
from openpds.core.models import IPReferral, Profile, Emoji, Notification, Device, QuestionInstance, QuestionType
from bson import ObjectId
from pymongo import Connection
from django.conf import settings
import time
from datetime import date, timedelta, datetime
import json
import pdb
from gcm import GCM
from SPARQLWrapper import SPARQLWrapper, JSON
from collections import Counter
import sqlite3
import random
from openpds.questions.socialhealth_tasks import getStartTime
from openpds import getInternalDataStore
from django.utils import timezone
import requests
"""
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
"""
@task()
def ensureFunfIndexes():
profiles = Profile.objects.all()
for profile in profiles:
ensureFunfIndex.delay(profile.pk)
@task()
def ensureFunfIndex(pk):
profile = Profile.objects.get(pk=pk)
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
try:
connection.admin.command('enablesharding', dbName)
except:
pass
collection = connection[dbName]["funf"]
collection.ensure_index([("time", -1), ("key", 1)], cache_for=7200, background=True, unique=True, dropDups=True)
connection.close()
#this might be causing a bug so i removed it from openpds_scheduled_tasts.py
@task()
def deleteUnusedProfiles():
profiles = Profile.objects.all()
for profile in profiles:
deleteUnusedProfile.delay(profile.pk)
#deprecated - do not use this
@task()
def deleteUnusedProfile(pk):
#start = getStartTime(60, False)
profile = Profile.objects.get(pk=pk)
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
db = connection[dbName]#["funf"]
#if collection.find({"time": { "$gte": start}}).count() == 0:
if 'funf' not in db.collection_names():
connection.drop_database(dbName)
if Emoji.objects.filter(profile=profile).count() == 0 and not profile.fbid and not profile.referral:
profile.delete()
connection.close()
@task()
def recentProbeCounts():
profiles = Profile.objects.all()
for profile in profiles:
recentProbeCounts2.delay(profile.pk)
@task()
def recentProbeCounts2(pk):
startTime = getStartTime(1, False)
profile = Profile.objects.get(pk=pk)
ids = getInternalDataStore(profile, "", "Living Lab", "")
probes = ["ActivityProbe", "SimpleLocationProbe", "CallLogProbe", "SmsProbe", "WifiProbe", "BluetoothProbe"]
answer = {}
for probe in probes:
data = ids.getData(probe, startTime, None)
answer[probe] = data.count()
ids.saveAnswer("RecentProbeCounts", answer)
def addNotification(profile, notificationType, title, content, uri):
notification, created = Notification.objects.get_or_create(datastore_owner=profile, type=notificationType)
notification.title = title
notification.content = content
notification.datastore_owner = profile
if uri is not None:
notification.uri = uri
notification.save()
def addNotificationAndNotify(profile, notificationType, title, content, uri):
addNotification(profile, notificationType, title, content, uri)
if Device.objects.filter(datastore_owner = profile).count() > 0:
gcm = GCM(settings.GCM_API_KEY)
for device in Device.objects.filter(datastore_owner = profile):
try:
gcm.plaintext_request(registration_id=device.gcm_reg_id, data= {"action":"notify"})
except Exception as e:
print "Issue with sending notification to: %s, %s" % (profile.id, profile.uuid)
print e
def notifyAll():
for profile in Profile.objects.all():
if Device.objects.filter(datastore_owner = profile).count() > 0:
gcm = GCM(settings.GCM_API_KEY)
for device in Device.objects.filter(datastore_owner = profile):
try:
gcm.plaintext_request(registration_id=device.gcm_reg_id, data={"action":"notify"})
except Exception as e:
print "Issue with sending notification to: %s, %s" % (profile.id, profile.uuid)
print e
def broadcastNotification(notificationType, title, content, uri):
for profile in Profile.objects.all():
addNotificationAndNotify(profile, notificationType, title, content, uri)
@task()
def sendVerificationSurvey():
broadcastNotification(2, "Social Health Survey", "Please take a moment to complete this social health survey", "/survey/?survey=8")
@task()
def sendPast3DaysSurvey():
broadcastNotification(2, "Social Health Survey", "Please take a moment to complete this social health survey", "/survey/?survey=5")
@task()
def sendExperienceSampleSurvey():
broadcastNotification(2, "Social Health Survey", "Please take a moment to complete this social health survey", "/survey/?survey=9")
@task()
def sendSleepStartSurvey():
broadcastNotification(2, "Sleep Tracker", "Please take this survey right before bed", "/survey/?survey=10")
@task()
def sendSleepEndSurvey():
broadcastNotification(2, "Sleep Tracker", "Please take this survey right after waking up", "/survey/?survey=11")
def minDiff(elements, item):
return min([abs(el - item) for el in elements])
@task()
def scheduleExperienceSamplesForToday():
# We're scheduling 4 surveys / day, starting in the morning, with at least an hour of time in between each
# assuming we send the first within 2 hours of running this, and need to get all surveys done within 8 hours,
# we can build the list of delays via simple rejection
maxDelay = 3600 * 8
delays = [random.randint(0,maxDelay)]
while len(delays) < 4:
nextDelay = random.randint(0, maxDelay)
if minDiff(delays, nextDelay) >= 3600:
delays.append(nextDelay)
print delays
print [time.strftime("%H:%M", time.localtime(1385042444 + d)) for d in delays]
for t in delays:
print "sending survey with %s second delay..." % str(t)
sendExperienceSampleSurvey.apply_async(countdown = t)
@task()
def findMusicGenres():
profiles = Profile.objects.all()
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setReturnFormat(JSON)
artistQuery = "PREFIX dbpprop: <http://dbpedia.org/property/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?artist ?genre from <http://dbpedia.org> where { ?artist rdfs:label \"%s\"@en . ?artist dbpprop:genre ?genre }"
albumQuery = "PREFIX dbpedia-owl: <http://dbpedia.org/ontology/> PREFIX dbpprop: <http://dbpedia.org/property/> select ?album ?genre from <http://dbpedia.org> where { ?album a dbpedia-owl:Album . ?album dbpprop:name '%s'@en . ?album dbpprop:genre ?genre }"
for profile in profiles:
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
answerListCollection = connection[dbName]["answerlist"]
collection = connection[dbName]["funf"]
songs = [song["value"] for song in collection.find({ "key": { "$regex": "AudioMediaProbe$"}})]
artists = set([str(song["artist"]) for song in songs if str(song["artist"]) != "<unkown>" and '"' not in str(song["artist"])])
# albums = set([str(song["album"]) for song in songs if str(song["album"]) != "<unknown>" and '"' not in str(song["album"])])
genres = []
for artist in artists:
temp = artistQuery % artist
print temp
sparql.setQuery(temp)
results = sparql.query().convert()
genres.extend([binding["genre"]["value"] for binding in results["results"]["bindings"]])
# for album in albums:
# temp = albumQuery % album
# print temp
# sparql.setQuery(temp)
# results = sparql.query().convert()
# genres.extend([binding["genre"]["value"] for binding in results["results"]["bindings"]])
if len(genres) > 0:
counts = Counter(genres).most_common(10)
musicGenres = answerListCollection.find({ "key": "MusicGenres" })
musicGenres = musicGenres[0] if musicGenres.count() > 0 else { "key": "MusicGenres", "value": [] }
musicGenres["value"] = [count[0] for count in counts]
answerListCollection.save(musicGenres)
connection.close()
@task()
def dumpFunfData():
profiles = Profile.objects.all()
for profile in profiles:
dumpFunfData2.delay(profile.pk)
@task()
def dumpFunfData2(pk):
outputConnection = sqlite3.connect("openpds/static/dump.db")
c = outputConnection.cursor()
c.execute("CREATE TABLE IF NOT EXISTS funf (user_id integer, key text, time real, value text, PRIMARY KEY (user_id, key, time) on conflict ignore)")
startTime = getStartTime(3, False)#max(1378008000, startTimeRow[0]) if startTimeRow is not None else 1378008000
profile = Profile.objects.get(pk=pk)
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
try:
connection.admin.command('enablesharding', dbName)
except:
pass
funf = connection[dbName]["funf"]
user = int(profile.id)
c.executemany("INSERT INTO funf VALUES (?,?,?,?)", [(user,d["key"][d["key"].rfind(".")+1:],d["time"],"%s"%d["value"]) for d in funf.find({"time": {"$gte": startTime}}) if d["key"] is not None])
outputConnection.commit()
outputConnection.close()
connection.close()
@task()
def dumpSurveyData():
profiles = Profile.objects.all()
outputConnection = sqlite3.connect("openpds/static/dump.db")
c = outputConnection.cursor()
#c.execute("DROP TABLE IF EXISTS survey;")
c.execute("CREATE TABLE IF NOT EXISTS survey (user_id integer, key text, time real, value text, PRIMARY KEY (user_id, key, time) on conflict ignore);")
for profile in profiles:
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
try:
connection.admin.command('enablesharding', dbName)
except:
pass
answerlist = connection[dbName]["answerlist"]
user = int(profile.id)
for datum in answerlist.find({ "key": { "$regex": "Past3Days$"}}):
for answer in datum["value"]:
#print type(user), type(datum["key"]), type(answer)#, type(datum["value"])
c.execute("INSERT INTO survey VALUES (?,?,?,?);", (user,datum["key"],answer["time"],"%s"%answer["value"]))
for datum in answerlist.find({ "key": { "$regex": "Verification"}}):
for answer in datum["value"]:
c.execute("INSERT INTO survey VALUES (?,?,?,?);", (user,datum["key"],answer["time"],"%s"%answer["value"]))
for datum in answerlist.find({ "key": { "$regex": "Last15Minutes"}}):
for answer in datum["value"]:
c.execute("INSERT INTO survey VALUES (?,?,?,?);", (user,datum["key"],answer["time"],"%s"%answer["value"]))
connection.close()
outputConnection.commit()
outputConnection.close()
def addNotification(profile, notificationType, title, content, uri):
notification, created = Notification.objects.get_or_create(datastore_owner=profile, type=notificationType)
if uri != -1:
notification.title = uri # Set title as URI for now
notification.content = content
notification.datastore_owner = profile
if uri is not None:
notification.uri = uri
notification.save()
# formats a notification the way the SmartCATCH client understands it
def formatNotification(question, type="Picker", description="", items=[], **kwargs):
s1 = "<startTitle>" + question + "<endTitle><startType>" + type + "<endType><startDescription>" + description + "<endDescription>"
s2 = "<startNumItems>%d<endNumItems>" % len(items) + ''.join(["<startI%d>%s<endI%d>" % (i+1,items[i],i+1) for i in xrange(len(items))])
s3 = "<startNegButton>Delay<endNegButton><startPosButton>Submit<endPosButton><startNumRepeats>3<endNumRepeats><startTimeRepeat>5000<endTimeRepeat>" # TODO: format this. Unclear about this param$
return json.dumps({ 's1': s1, 's2': s2, 's3': s3 })
# return s1, s2, s3 # TODO: JSONfy
def fetchQuestion(profile, device):
ret_val = None
q_list = []
qtypes = QuestionType.objects.filter(frequency_interval__isnull=False, frequency_interval__lt=5256000)
qtypes = qtypes.filter(goal__isnull=True) | qtypes.filter(goal=profile.goal)
print "DEBUG: got %d question types" % qtypes.count()
for qtype in qtypes:
# Get previous recent questions asked
# TODO: d needs to be based on wakeup time of the current user, and not on NOW.
# d = date.today() - timedelta(minutes=qtype.frequency_interval)
# questions = QuestionInstance.objects.filter(profile=profile).filter(datetime__gt=d).filter(question_type=qtype) # TODO: probably need an index: (profile, datetime).
questions = QuestionInstance.objects.filter(profile=profile).filter(question_type=qtype).filter(expired=False)
print "QUESTIONS = %s" % questions
if qtype.sleep_offset < 0:
d_ask = datetime.combine(datetime.date(datetime.today()), profile.sleep) - timedelta(qtype.sleep_offset)
else:
d_ask = datetime.combine(datetime.date(datetime.today()), profile.wake) + timedelta(qtype.sleep_offset)
if (questions.count() == 0 and d_ask.time() < datetime.time(datetime.now()) ):
# this question wasn't asked yet. Generate it.
q = QuestionInstance(question_type=qtype, profile=profile)
q_list.append({'instance': q, 'type': qtype})
qf = None
if qtype.followup_question:
# generate the data question as well
qf = QuestionInstance(question_type=qtype.followup_question, profile=profile)
q.notification_counter = 1
q.save()
if qf:
qf.save()
else:
ql = list(questions.reverse()[:1])
q = None
if ql:
q = ql[0]
if (q and qtype.resend_quantity > q.notification_counter):
# Check if q needs to be reasked and act accordingly
next_qtime = q.datetime + timedelta(minutes=REPEAT_INTERVAL)
if (next_qtime < timezone.now()):
if not q.answer or (qtype.followup_key != None and q.answer != qtype.followup_key):
print 'Updating question = %s' % q
q.notification_counter = q.notification_counter + 1
q.save()
q_list.append({'instance': q, 'type': qtype})
# at this point q_list contains notification questions (not followups)
if len(q_list) > 1:
ret_val = {'question': ('You have %d unanswered questions' % len(q_list)), 'description': 'Multiple questions', 'action': -1}
elif len(q_list) == 1:
ret_val = {'question': q_list[0]['type'].text, 'description': q_list[0]['type'].text, 'action': q_list[0]['instance'].id}
return ret_val
def expireQuestions():
questions = QuestionInstance.objects.all().filter(expired=False)
for question in questions:
dt = timezone.now() - timedelta(minutes=question.question_type.expiry)
if ( dt > question.datetime ):
# This question is expired. Update it
question.expired = True
print "Expiring question %d" % question.id
question.save()
@task()
def flumojiNotifications():
print "Starting notifications task"
expireQuestions()
profiles = Profile.objects.all()
for profile in profiles:
if Device.objects.filter(datastore_owner = profile).count() > 0:
gcm = GCM(settings.GCM_API_KEY)
for device in Device.objects.filter(datastore_owner = profile):
try:
# add the notification to the D
q_params = fetchQuestion(profile, device)
except Exception as e:
print "NotificationError1: Issue with sending notification to: %s, %s" % (profile.id, profile.uuid)
print e
try:
print 'q_params = %s' % q_params
if q_params is not None:
js = formatNotification(q_params['question'],
description=q_params['description'],
items=[q_params['action']])
addNotification(profile, 2, 'SmartCATCH',
q_params['question'],
q_params['action'])
# send an alert that a notification is ready (app will call back to fetch the notification data)
print "id=%s, uuid=%s, device=%s" % (profile.id, profile.uuid,device.gcm_reg_id)
gcm.plaintext_request(registration_id=device.gcm_reg_id,
data={"action":"notify"},
collapse_key=q_params['question'])
except Exception as e:
print "NotificationError2: Issue with sending notification to: %s, %s" % (profile.id, profile.uuid)
print e
def setProfileLocation(profile):
dbName = profile.getDBName()
connection = Connection(
host=random.choice(getattr(settings, "MONGODB_HOST", None)),
port=getattr(settings, "MONGODB_PORT", None),
readPreference='nearest'
)
collection = connection[dbName]["funf"]
location = collection.find_one({"key": "edu.mit.media.funf.probe.builtin.LocationProbe"})
try:
lat = location["value"]["mlatitude"]
lng = location["value"]["mlongitude"]
profile.lat = int(lat*1000000.0)
profile.lng = int(lng*1000000.0)
profile.location_last_set = datetime.now()
profile.save()
except:
pass
connection.close()
@task()
def emojiLocations():
SIX_HOURS_AGO = datetime.now() - timedelta(hours=6)
emojis = Emoji.objects.filter(lat__isnull=True).order_by('-created')
for emoji in emojis:
setProfileLocation(emoji.profile)
if emoji.profile.lat:
emoji.lat = emoji.profile.lat
emoji.lng = emoji.profile.lng
emoji.save()
@task()
def profileLocations():
SIX_HOURS_AGO = datetime.now() - timedelta(hours=6)
profiles = Profile.objects.filter(location_last_set__lt=SIX_HOURS_AGO).order_by('location_last_set')
for profile in profiles:
profileLocation.delay(profile.pk)
@task()
def profileLocation(pk):
profile = Profile.objects.get(pk=pk)
setProfileLocation(profile)
@task()
def setInfluenceScores():
profiles = Profile.objects.filter(referral__isnull=False).order_by('-created')
for profile in profiles:
setInfluenceScore.delay(profile.referral.pk)
@task()
def setInfluenceScore(pk):
profile = Profile.objects.get(pk=pk)
score = 0
for child in profile.profile_set.all():
score += int(child.score * .333) + 10
if profile.score != score:
profile.score = score
profile.save()
@task()
def cleanExpiredReferrals():
ONE_MONTH_AGO = datetime.now() - timedelta(days=30)
IPReferral.objects.filter(created__lt=ONE_MONTH_AGO).delete()
@task()
def checkForProfileReferral(pk, ip):
profile = Profile.objects.get(pk=pk)
if not profile.referral:
ref = IPReferral.objects.filter(created__lt=profile.created, ip=ip).order_by('-created')
if len(ref) > 0:
profile.referral = ref[0].profile
profile.save()
fcm_url = 'https://fcm.googleapis.com/fcm/send'
@task()
def howAreYouFeelingTodayAllUsers():
profiles = Profile.objects.all().exclude(emoji__created__gte=timezone.now()-datetime.timedelta(days=4), location_last_set__gte=timezone.now()-datetime.timedelta(days=4))
#use location_last_set i nthis
#howAreYouFeelingToday(100)
@task()
def howAreYouFeelingToday(pk):
# user datetime.datetime.now() for east coast time
body = {
"notification":{
"title":"Flumoji",
"body":"How are you feeling today?",
"content_available": "true"
},
"registration_ids":["c2tns8d8y_U:APA91bFLDla_bzUn9-wzyREjcTFMSGZkBPzjJL3YRlZ8fj5rGrz1GPEuZYs89Nl9W-Tok283zLgP6z-8jInt93jFmiJljQVLRle4izEibSOh2nBAuJNZNCk_hfBGe0gz1mewPAoWwh-b",]
}
headers = {"Content-Type":"application/json",
"Authorization": "key=%s" % settings.FCM_SERVER_KEY }
requests.post(fcm_url, data=json.dumps(body), headers=headers)
|
{
"content_hash": "f5b1f7c9bfa97f5b726c3da04626bdce",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 261,
"avg_line_length": 41.97872340425532,
"alnum_prop": 0.6307422936921163,
"repo_name": "eschloss/FluFuture",
"id": "868f35603f3d40631d3ddf342e4f90ca9213669e",
"size": "21703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openpds/questions/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246641"
},
{
"name": "HTML",
"bytes": "145880"
},
{
"name": "JavaScript",
"bytes": "1185030"
},
{
"name": "Python",
"bytes": "328789"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._ssh_public_keys_operations import (
build_create_request,
build_delete_request,
build_generate_key_pair_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SshPublicKeysOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_11_01.aio.ComputeManagementClient`'s
:attr:`ssh_public_keys` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.SshPublicKeyResource"]:
"""Lists all of the SSH public keys in the subscription. Use the nextLink property in the response
to get the next page of SSH public keys.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeyResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeysGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys"} # type: ignore
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.SshPublicKeyResource"]:
"""Lists all of the SSH public keys in the specified resource group. Use the nextLink property in
the response to get the next page of SSH public keys.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeyResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeysGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys"} # type: ignore
@overload
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: _models.SshPublicKeyResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Required.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: Union[_models.SshPublicKeyResource, IO],
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeyResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "SshPublicKeyResource")
request = build_create_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: _models.SshPublicKeyUpdateResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Required.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyUpdateResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: Union[_models.SshPublicKeyUpdateResource, IO],
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyUpdateResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeyResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "SshPublicKeyUpdateResource")
request = build_update_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> None:
"""Delete an SSH public key.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> _models.SshPublicKeyResource:
"""Retrieves information about an SSH public key.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeyResource]
request = build_get_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"} # type: ignore
@distributed_trace_async
async def generate_key_pair(
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> _models.SshPublicKeyGenerateKeyPairResult:
"""Generates and returns a public/private key pair and populates the SSH public key resource with
the public key. The length of the key will be 3072 bits. This operation can only be performed
once per SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyGenerateKeyPairResult or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_11_01.models.SshPublicKeyGenerateKeyPairResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SshPublicKeyGenerateKeyPairResult]
request = build_generate_key_pair_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.generate_key_pair.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyGenerateKeyPairResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_key_pair.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair"} # type: ignore
|
{
"content_hash": "cb11f09d7fa5b6c998b9e24ef11386de",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 203,
"avg_line_length": 44.65435745937962,
"alnum_prop": 0.6373259237206841,
"repo_name": "Azure/azure-sdk-for-python",
"id": "cc071bfa7710fea2ca5b691beb29c646d9920031",
"size": "30731",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_11_01/aio/operations/_ssh_public_keys_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
Simple echo server, using nonblocking I/O
"""
from OpenSSL import SSL
import sys, os, select, socket
def verify_cb(conn, cert, errnum, depth, ok):
# This obviously has to be updated
print 'Got certificate: %s' % cert.get_subject()
return ok
if len(sys.argv) < 2:
print 'Usage: python[2] server.py PORT'
sys.exit(1)
dir = os.path.dirname(sys.argv[0])
if dir == '':
dir = os.curdir
# Initialize context
ctx = SSL.Context(SSL.TLSv1_1_METHOD)
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.set_verify(SSL.VERIFY_PEER|SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb) # Demand a certificate
ctx.use_privatekey_file (os.path.join(dir, 'server.pkey'))
ctx.use_certificate_file(os.path.join(dir, 'server.cert'))
ctx.load_verify_locations(os.path.join(dir, 'CA.cert'))
# Set up server
server = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
server.bind(('', int(sys.argv[1])))
server.listen(3)
server.setblocking(0)
clients = {}
writers = {}
def dropClient(cli, errors=None):
if errors:
print 'Client %s left unexpectedly:' % (clients[cli],)
print ' ', errors
else:
print 'Client %s left politely' % (clients[cli],)
del clients[cli]
if writers.has_key(cli):
del writers[cli]
if not errors:
cli.shutdown()
cli.close()
while 1:
try:
r,w,_ = select.select([server]+clients.keys(), writers.keys(), [])
except:
break
for cli in r:
if cli == server:
cli,addr = server.accept()
print 'Connection from %s' % (addr,)
clients[cli] = addr
else:
try:
ret = cli.recv(1024)
except (SSL.WantReadError, SSL.WantWriteError, SSL.WantX509LookupError):
pass
except SSL.ZeroReturnError:
dropClient(cli)
except SSL.Error, errors:
dropClient(cli, errors)
else:
if not writers.has_key(cli):
writers[cli] = ''
writers[cli] = writers[cli] + ret
for cli in w:
try:
ret = cli.send(writers[cli])
except (SSL.WantReadError, SSL.WantWriteError, SSL.WantX509LookupError):
pass
except SSL.ZeroReturnError:
dropClient(cli)
except SSL.Error, errors:
dropClient(cli, errors)
else:
writers[cli] = writers[cli][ret:]
if writers[cli] == '':
del writers[cli]
for cli in clients.keys():
cli.close()
server.close()
|
{
"content_hash": "d32702bdcb234200ac6bbad20edbbfdf",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 97,
"avg_line_length": 27.361702127659573,
"alnum_prop": 0.5824261275272161,
"repo_name": "EnerNOC/pyopenssl",
"id": "d508c4c1c92d7c5d989a99f8b182d4073fdebeed",
"size": "2690",
"binary": false,
"copies": "1",
"ref": "refs/heads/tls1.2",
"path": "examples/simple/server_tls_v1_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "284169"
},
{
"name": "Python",
"bytes": "431046"
},
{
"name": "Shell",
"bytes": "9030"
}
],
"symlink_target": ""
}
|
import concurrent.futures
import ssl
import pytest
import socket
import logging
from six.moves import queue
from gremlin_python.driver.client import Client
from gremlin_python.driver.connection import Connection
from gremlin_python.driver import serializer
from gremlin_python.driver.driver_remote_connection import (
DriverRemoteConnection)
from gremlin_python.driver.protocol import GremlinServerWSProtocol
from gremlin_python.driver.serializer import (
GraphSONMessageSerializer, GraphSONSerializersV2d0, GraphSONSerializersV3d0,
GraphBinarySerializersV1)
from gremlin_python.driver.aiohttp.transport import AiohttpTransport
gremlin_server_url = 'ws://localhost:{}/gremlin'
anonymous_url = gremlin_server_url.format(45940)
basic_url = 'wss://localhost:{}/gremlin'.format(45941)
kerberos_url = gremlin_server_url.format(45942)
kerberized_service = 'test-service@{}'.format(socket.gethostname())
verbose_logging = False
logging.basicConfig(format='%(asctime)s [%(levelname)8s] [%(filename)15s:%(lineno)d - %(funcName)10s()] - %(message)s',
level=logging.DEBUG if verbose_logging else logging.INFO)
@pytest.fixture
def connection(request):
protocol = GremlinServerWSProtocol(
GraphSONMessageSerializer(),
username='stephen', password='password')
executor = concurrent.futures.ThreadPoolExecutor(5)
pool = queue.Queue()
try:
conn = Connection(anonymous_url, 'gmodern', protocol,
lambda: AiohttpTransport(), executor, pool)
except OSError:
executor.shutdown()
pytest.skip('Gremlin Server is not running')
else:
def fin():
executor.shutdown()
conn.close()
request.addfinalizer(fin)
return conn
@pytest.fixture
def client(request):
try:
client = Client(anonymous_url, 'gmodern')
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
client.close()
request.addfinalizer(fin)
return client
@pytest.fixture(params=['basic', 'kerberos'])
def authenticated_client(request):
try:
if request.param == 'basic':
# turn off certificate verification for testing purposes only
ssl_opts = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_opts.verify_mode = ssl.CERT_NONE
client = Client(basic_url, 'gmodern', username='stephen', password='password',
transport_factory=lambda: AiohttpTransport(ssl_options=ssl_opts))
elif request.param == 'kerberos':
client = Client(kerberos_url, 'gmodern', kerberized_service=kerberized_service)
else:
raise ValueError("Invalid authentication option - " + request.param)
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
client.close()
request.addfinalizer(fin)
return client
@pytest.fixture(params=['graphsonv2', 'graphsonv3', 'graphbinaryv1'])
def remote_connection(request):
try:
if request.param == 'graphbinaryv1':
remote_conn = DriverRemoteConnection(anonymous_url, 'gmodern',
message_serializer=serializer.GraphBinarySerializersV1())
elif request.param == 'graphsonv2':
remote_conn = DriverRemoteConnection(anonymous_url, 'gmodern',
message_serializer=serializer.GraphSONSerializersV2d0())
elif request.param == 'graphsonv3':
remote_conn = DriverRemoteConnection(anonymous_url, 'gmodern',
message_serializer=serializer.GraphSONSerializersV3d0())
else:
raise ValueError("Invalid serializer option - " + request.param)
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
remote_conn.close()
request.addfinalizer(fin)
return remote_conn
@pytest.fixture(params=['graphsonv2', 'graphsonv3', 'graphbinaryv1'])
def remote_transaction_connection(request):
try:
if request.param == 'graphbinaryv1':
remote_conn = DriverRemoteConnection(anonymous_url, 'gtx',
message_serializer=serializer.GraphBinarySerializersV1())
elif request.param == 'graphsonv2':
remote_conn = DriverRemoteConnection(anonymous_url, 'gtx',
message_serializer=serializer.GraphSONSerializersV2d0())
elif request.param == 'graphsonv3':
remote_conn = DriverRemoteConnection(anonymous_url, 'gtx',
message_serializer=serializer.GraphSONSerializersV3d0())
else:
raise ValueError("Invalid serializer option - " + request.param)
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
remote_conn.close()
request.addfinalizer(fin)
return remote_conn
@pytest.fixture(params=['basic', 'kerberos'])
def remote_connection_authenticated(request):
try:
if request.param == 'basic':
# turn off certificate verification for testing purposes only
ssl_opts = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_opts.verify_mode = ssl.CERT_NONE
remote_conn = DriverRemoteConnection(basic_url, 'gmodern',
username='stephen', password='password',
message_serializer=serializer.GraphSONSerializersV2d0(),
transport_factory=lambda: AiohttpTransport(ssl_options=ssl_opts))
elif request.param == 'kerberos':
remote_conn = DriverRemoteConnection(kerberos_url, 'gmodern', kerberized_service=kerberized_service,
message_serializer=serializer.GraphSONSerializersV2d0())
else:
raise ValueError("Invalid authentication option - " + request.param)
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
remote_conn.close()
request.addfinalizer(fin)
return remote_conn
@pytest.fixture
def remote_connection_graphsonV2(request):
try:
remote_conn = DriverRemoteConnection(anonymous_url,
message_serializer=serializer.GraphSONSerializersV2d0())
except OSError:
pytest.skip('Gremlin Server is not running')
else:
def fin():
remote_conn.close()
request.addfinalizer(fin)
return remote_conn
@pytest.fixture
def graphson_serializer_v2(request):
return GraphSONSerializersV2d0()
@pytest.fixture
def graphson_serializer_v3(request):
return GraphSONSerializersV3d0()
@pytest.fixture
def graphbinary_serializer_v1(request):
return GraphBinarySerializersV1()
|
{
"content_hash": "720de80d67c4a50ce7768c7cee2b98a9",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 119,
"avg_line_length": 37.27368421052632,
"alnum_prop": 0.6304716181869529,
"repo_name": "apache/incubator-tinkerpop",
"id": "fc3ef8bd9bf8a027d6d06a0d0132ed2e66c15852",
"size": "7872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/python/tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "369370"
},
{
"name": "Java",
"bytes": "6510259"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "24104"
}
],
"symlink_target": ""
}
|
from .geom import geom
import matplotlib.patches as patches
class geom_polygon(geom):
"""
Polygon specified by (x, y) coordinates
Parameters
----------
x:
x values for (x, y) coordinates
y:
y values for (x, y) coordinates
color:
color of outer line
alpha:
transparency of fill
linetype:
type of the line ('solid', 'dashed', 'dashdot', 'dotted')
fill:
color of the inside of the shape
Examples
--------
"""
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'facecolor', 'color': 'edgecolor'}
def plot(self, ax, data, _aes):
(data, _aes) = self._update_data(data, _aes)
params = self._get_plot_args(data, _aes)
variables = _aes.data
x = data[variables['x']]
y = data[variables['y']]
coordinates = zip(x, y)
ax.add_patch(
patches.Polygon(
coordinates,
closed=True,
fill=True,
**params
)
)
# matplotlib patches don't automatically impact the scale of the ax, so
# we manually autoscale the x and y axes
ax.autoscale_view()
|
{
"content_hash": "eafdf5af72e5ba70f3505d44cdb91e9d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 26.88679245283019,
"alnum_prop": 0.5164912280701754,
"repo_name": "yhat/ggplot",
"id": "8158b1fe16ea80139e54170871806729caa37f59",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ggplot/geoms/geom_polygon.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "265064"
},
{
"name": "Shell",
"bytes": "450"
}
],
"symlink_target": ""
}
|
"""
werkzeug.debug.repr
~~~~~~~~~~~~~~~~~~~
This module implements object representations for debugging purposes.
Unlike the default repr these reprs expose a lot more information and
produce HTML instead of ASCII.
Together with the CSS and JavaScript files of the debugger this gives
a colorful and more compact output.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
import sys
from collections import deque
from traceback import format_exception_only
from .._compat import integer_types
from .._compat import iteritems
from .._compat import PY2
from .._compat import string_types
from .._compat import text_type
from ..utils import escape
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
HELP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
"""
OBJECT_DUMP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
"""
def debug_repr(obj):
"""Creates a debug repr of an object as HTML unicode string."""
return DebugReprGenerator().repr(obj)
def dump(obj=missing):
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv)
class _Helper(object):
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self):
return "Type help(object) for help about object."
def __call__(self, topic=None):
if topic is None:
sys.stdout._write("<span class=help>%s</span>" % repr(self))
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset()
if isinstance(rv, bytes):
rv = rv.decode("utf-8", "ignore")
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = "\n\n".join(paragraphs[1:])
else: # pragma: no cover
title = "Help"
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {"title": title, "text": text})
helper = _Helper()
def _add_subclass_info(inner, obj, base):
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = '<span class="module">%s.</span>' % obj.__class__.__module__
return "%s%s(%s)" % (module, obj.__class__.__name__, inner)
class DebugReprGenerator(object):
def __init__(self):
self._stack = []
def _sequence_repr_maker(left, right, base=object(), limit=8): # noqa: B008, B902
def proxy(self, obj, recursive):
if recursive:
return _add_subclass_info(left + "..." + right, obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info(u"".join(buf), obj, base)
return proxy
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.' "</span>deque([", "])", deque
)
del _sequence_repr_maker
def regex_repr(self, obj):
pattern = repr(obj.pattern)
if PY2:
pattern = pattern.decode("string-escape", "ignore")
else:
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
if pattern[:1] == "u":
pattern = "ur" + pattern[1:]
else:
pattern = "r" + pattern
return u're.compile(<span class="string regex">%s</span>)' % pattern
def string_repr(self, obj, limit=70):
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = u"".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] in "ub" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, text_type))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(self, d, recursive, limit=5):
if recursive:
return _add_subclass_info(u"{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(iteritems(d)):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
'<span class="pair"><span class="key">%s</span>: '
'<span class="value">%s</span></span>'
% (self.repr(key), self.repr(value))
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info(u"".join(buf), d, dict)
def object_repr(self, obj):
r = repr(obj)
if PY2:
r = r.decode("utf-8", "replace")
return u'<span class="object">%s</span>' % escape(r)
def dispatch_repr(self, obj, recursive):
if obj is helper:
return u'<span class="help">%r</span>' % helper
if isinstance(obj, (integer_types, float, complex)):
return u'<span class="number">%r</span>' % obj
if isinstance(obj, string_types) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if deque is not None and isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self):
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception: # pragma: no cover
info = "?"
if PY2:
info = info.decode("utf-8", "ignore")
return u'<span class="brokenrepr"><broken repr (%s)>' u"</span>" % escape(
info.strip()
)
def repr(self, obj):
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj):
repr = items = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in iteritems(obj):
if not isinstance(key, string_types):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += " " + object.__repr__(obj)[1:-1]
return self.render_object_dump(items, title, repr)
def dump_locals(self, d):
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(self, items, title, repr=None):
html_items = []
for key, value in items:
html_items.append(
"<tr><th>%s<td><pre class=repr>%s</pre>" % (escape(key), value)
)
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": "<pre class=repr>%s</pre>" % repr if repr else "",
"items": "\n".join(html_items),
}
|
{
"content_hash": "585d8857ec9011b3ce68cf29d302251d",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 88,
"avg_line_length": 32.4222972972973,
"alnum_prop": 0.5338126497863915,
"repo_name": "turbomanage/training-data-analyst",
"id": "d7a7285ca95dfbc64d765d1a949580d07f2f7a44",
"size": "9621",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "courses/machine_learning/deepdive2/structured/labs/serving/application/lib/werkzeug/debug/repr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19768"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "13208"
},
{
"name": "Dockerfile",
"bytes": "35682"
},
{
"name": "HTML",
"bytes": "2069111"
},
{
"name": "Java",
"bytes": "1539437"
},
{
"name": "JavaScript",
"bytes": "2540305"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "61371931"
},
{
"name": "Makefile",
"bytes": "4118"
},
{
"name": "PLpgSQL",
"bytes": "5868"
},
{
"name": "PigLatin",
"bytes": "393"
},
{
"name": "Python",
"bytes": "9553863"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Shell",
"bytes": "390786"
},
{
"name": "TSQL",
"bytes": "34160"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils, generic_utils
from six.moves import range
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2, activity_l2
from keras.layers.advanced_activations import PReLU
from data_multi_classes import load_data
import h5py
import argparse
from keras.models import model_from_json
#import numpy as np
#np.random.seed(1337) # for reproducibility
#from keras.utils.visualize_util import plot
#import tensorflow as tf
#import theano
#theano.config.device = 'gpu1'
#theano.config.floatX = 'float32'
#Number of samples per gradient update
batch_size = 100
#分类个数
nb_classes = 3
#the number of times to iterate over the training data arrays
nb_epoch = 50
#训练CNN的个数
nb_loop = 10
# input image dimensions
img_rows, img_cols = 64, 32
# the images are RGB
img_channels = 3
'''
ap = argparse.ArgumentParser()
ap.add_argument("--train", required=True, help="Path to the training set")
ap.add_argument("--valid", required=True, help="Path to the validation set")
ap.add_argument("--test", required=True, help="Path to the testing set")
ap.add_argument("--0", required=True, help="Path to the negative set")
args = vars(ap.parse_args())
#加载数据
path_train = args["train"]
path_valid = args["valid"]
path_test = args["test"]
path_0 = args["0"]
'''
#path_train_1='/home/cad/dataset/pinkall/train_1'
#path_valid_1='/home/cad/dataset/pinkall/valid_test_1'
#path_train_2='/home/cad/dataset/bigyellow/train_1'
#path_valid_2='/home/cad/dataset/bigyellow/valid_test_1'
#path_train_0='/home/cad/dataset/negative/pink_yellow_158'
#path_valid_0='/home/cad/dataset/bigyellow/valid_test_0'
data_path = '/home/cad/dataset_new'
X_train,Y_train_label,X_valid,Y_valid_label = load_data(data_path)
print(X_train.shape[0], 'training samples')
print(X_valid.shape[0],'validation samples')
#print(X_test.shape[0],'testing samples')
#label为0,1两个类别,keras要求格式为binary class matrices,转化一下,调用keras提供的函数
Y_train = np_utils.to_categorical(Y_train_label, nb_classes)
Y_valid = np_utils.to_categorical(Y_valid_label, nb_classes)
#Y_test = np_utils.to_categorical(Y_test_label, nb_classes)
dist={}
#prelu=PReLU(init='zero', weights=None)
for i in range(nb_loop):
###############
#开始建立CNN模型
###############
model = Sequential()
#第一个卷积层,32个卷积核,每个卷积核大小5*5
#border_mode为valid
#激活函数用relu
model.add(Convolution2D(32, 5, 5,
# border_mode='valid',
# activation='relu',
# W_regularizer=l2(0.01),
# b_regularizer=l2(0.01),
# activity_regularizer=activity_l2(0.001),
input_shape=(img_channels, img_rows, img_cols)))
#input_shape=(X_train.shape[1:])))
model.add(Activation('relu'))
# model.add(prelu)
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.5))
#第二个卷积层,32个卷积核,每个卷积核大小5*5
#采用maxpooling,poolsize为(2,2)
#使用Dropout
model.add(Convolution2D(32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
#第三个卷积层,64个卷积核,每个卷积核大小3*3
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
#全连接层
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
#Softmax回归,输出nb_classes个类别
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
##############
#开始训练模型
##############
#lr: float >= 0. Learning rate.
#momentum: float >= 0. Parameter updates momentum.
#decay: float >= 0. Learning rate decay over each update.
#nesterov: boolean. Whether to apply Nesterov momentum.
#model.compile里的参数loss就是损失函数(目标函数),binary_crossentropy即为logistic loss
sgd = SGD(lr=0.004, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#this will do preprocessing and realtime data augmentation
'''
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=5, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
'''
#datagen.fit(X_train)
#Stop training when a monitored quantity has stopped improving.
#monitor: quantity to be monitored.
#patience: number of epochs with no improvement after which training will be stopped.
#verbose: verbosity mode.
#mode: one of {auto, min, max}. In 'min' mode, training will stop when the quantity monitored has stopped decreasing; in 'max' mode it will stop when the quantity monitored has stopped increasing.
earlyStopping= EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath="../model/model/model_weights_"+str(i)+".h5", verbose=1, save_best_only=True)
# class_weight = {0:1,1:5}
#调用fit方法
#shuffle=True,数据经过随机打乱
#verbose=1,训练过程中输出的信息,1为输出进度条记录
#validation_data,指定验证集
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, shuffle=True, verbose=1, validation_data=(X_valid,Y_valid),callbacks=[checkpointer,earlyStopping])
'''
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size,
shuffle=True),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_valid, Y_valid),
callbacks=[checkpointer,earlyStopping])
'''
#load the best model
model.load_weights('../model/model/model_weights_'+str(i)+'.h5')
score = model.evaluate(X_train, Y_train, verbose=1)
print('Train loss:', score[0])
print('Train accuracy:', score[1])
count = 0
result = model.predict(X_train, batch_size=100, verbose = 0)
for j in range(X_train.shape[0]):
for label in range(nb_classes):
if label == 0:
if Y_train_label[j] == 0 and result[j][0] > 0.01:
count += 1
else:
if Y_train_label[j] == label and result[j][label] > 0.99:
count += 1
print(('Strict Train accuracy:', float(count)/X_train.shape[0]))
score = model.evaluate(X_valid, Y_valid, verbose=1)
print('Validation loss:', score[0])
print('Validation accuracy:', score[1])
count = 0
result = model.predict(X_valid, batch_size=100, verbose = 0)
for j in range(X_valid.shape[0]):
for label in range(nb_classes):
if label == 0:
if Y_valid_label[j] == 0 and result[j][0] > 0.01:
count += 1
else:
if Y_valid_label[j] == label and result[j][label] > 0.99:
count += 1
print(('Strict Validation accuracy:', float(count)/X_valid.shape[0]))
'''
score = model.evaluate(X_test, Y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
count = 0
result = model.predict(X_test, batch_size=100, verbose = 0)
for j in range(X_test.shape[0]):
if Y_test_label[j] == 1 and result[j][1] > 0.9:
count += 1
elif Y_test_label[j] == 0 and result[j][0] > 0.5:
count += 1
print(('Strict Test accuracy:', float(count)/X_test.shape[0]))
'''
# dist[i]=score[0]
dist[i]=float(count)/X_valid.shape[0]
######################################
#保存CNN模型
######################################
path_save_model = '/home/cad/model'
json_string = model.to_json()
open(path_save_model+'/model_architecture.json','w').write(json_string)
##根据dist.values()做一个排序,选择前5个或10个效果较好的cnn model单独保存,应用于sliding window最后判断
print(dist)
import subprocess
for i in range(3):
index = dist.keys()[dist.values().index(max(dist.values()))]
subprocess.call(["mv", "../model/model/model_weights_"+str(index)+".h5",path_save_model+"/model_weights_"+str(index)+".h5"])
dist.pop(index)
|
{
"content_hash": "ae6cb5f8192045bfae02e521bdc1b88f",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 200,
"avg_line_length": 38.02145922746781,
"alnum_prop": 0.6561688678180382,
"repo_name": "AKAMobi/goods-counter",
"id": "e3bcd83d7a8c5ae98a9d9a38fda55ba1ab2d9784",
"size": "9311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/train_goods_multi_sku.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51947"
}
],
"symlink_target": ""
}
|
import threading
from helpers import CountingHandler
import osmium as o
def test_threaded_processing():
""" Process a file in a different thread and make sure that processing
completes.
"""
function_complete = threading.Event()
c = CountingHandler()
def import_data():
c.apply_buffer('n1 x67.8 y-45.6932'.encode('utf-8'), 'opl')
function_complete.set()
t = threading.Thread(target=import_data)
t.start()
function_complete.wait(timeout=2)
assert function_complete.is_set()
assert c.counts == [1, 0, 0 ,0]
|
{
"content_hash": "8bb89b9e6664a7117352464e7fd565c4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.6579406631762653,
"repo_name": "osmcode/pyosmium",
"id": "6517d5ebca6e7388e4d363c01699943231a19ce1",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_threaded.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "278"
},
{
"name": "C++",
"bytes": "74226"
},
{
"name": "CMake",
"bytes": "19992"
},
{
"name": "Python",
"bytes": "128846"
}
],
"symlink_target": ""
}
|
import logging
import chryso.errors
import flask
import flask_login
import dominus.platform
LOGGER = logging.getLogger(__name__)
blueprint = flask.Blueprint('auth', __name__)
@blueprint.route('/login/', methods=['GET'])
def get_login():
return flask.render_template('login.html')
@blueprint.route('/login/', methods=['POST'])
def create_login():
username = flask.request.form.get('username')
password = flask.request.form.get('password')
LOGGER.debug("Checking credentials for %s %s", username, password)
record = dominus.platform.user_by_credentials(username, password)
LOGGER.debug("Resulting user: %s", record)
if not record:
return flask.make_response('Your username or password is incorrect', 403)
user = User(record['username'], record['uuid'])
flask_login.login_user(user)
return flask.redirect('/')
@blueprint.route('/login/', methods=['DELETE'])
def delete_login():
flask_login.logout_user()
return flask.redirect('/login/')
@blueprint.route('/register/', methods=['GET'])
def get_register():
error = {
'already-exists' : 'That username already exists',
}.get(flask.request.args.get('error'))
return flask.render_template('register.html', error=error)
@blueprint.route('/register/', methods=['POST'])
def do_register():
username = flask.request.form.get('username')
password = flask.request.form.get('password')
try:
dominus.platform.create_user(username, password)
except chryso.errors.DuplicateKeyError:
return flask.redirect('/register/?error=already-exists')
record = dominus.platform.user_by_credentials(username, password)
user = User(record['username'], record['uuid'])
flask_login.login_user(user)
return flask.redirect('/')
class User():
def __init__(self, username, uuid):
self.username = username
self.uuid = uuid
def is_active(self): # pylint: disable=no-self-use
return True
def get_id(self):
return self.uuid
def is_authenticated(self): # pylint: disable=no-self-use
return True
def is_anonymous(self): # pylint: disable=no-self-use
return False
def load_user(user_id):
return dominus.platform.user_by_uuid(user_id)
|
{
"content_hash": "7093c74056c45e12146eeb1bd5fe3f1a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 30.972602739726028,
"alnum_prop": 0.6727111897390535,
"repo_name": "EliRibble/dominus",
"id": "0cf337110c986de4980c7f06c7eedd298fe61539",
"size": "2261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dominus/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15158"
},
{
"name": "HTML",
"bytes": "16056"
},
{
"name": "JavaScript",
"bytes": "351"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "79247"
},
{
"name": "Shell",
"bytes": "700"
}
],
"symlink_target": ""
}
|
'''
Problem 5
@author: Kevin Ji
'''
def get_factor_repetitions( number ):
# Data of form
# factor => repetitions
factor_repetitions = {}
# Start with 2
cur_factor = 2
while number % cur_factor == 0:
factor_repetitions[ cur_factor ] = factor_repetitions.get( cur_factor, 0 ) + 1
number = number // cur_factor
# Start with 3, increment by 2
cur_factor = 3
while number != 1 and cur_factor < number:
while number % cur_factor == 0:
factor_repetitions[ cur_factor ] = factor_repetitions.get( cur_factor, 0 ) + 1
number = number // cur_factor
cur_factor += 2
if number != 1:
factor_repetitions[ number ] = 1
return factor_repetitions
def get_smallest_evenly_divisible( largest_divisor ):
number = 1
number_factors = {}
divisor = 1
# Get the list of divisors
while divisor <= largest_divisor:
factors = get_factor_repetitions( divisor )
for factor in factors:
if factors[ factor ] > number_factors.get( factor, 0 ):
number_factors[ factor ] = factors[ factor ]
divisor += 1
# Now create the number
for factor, num_times in number_factors.iteritems():
for unused in range( 0, num_times ):
number *= factor
return number
print( get_smallest_evenly_divisible( 10 ) ) # 2520
print( get_smallest_evenly_divisible( 20 ) )
|
{
"content_hash": "479a151fa4c36faf40dc6734fd677a3e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 90,
"avg_line_length": 26.29824561403509,
"alnum_prop": 0.5770513675783856,
"repo_name": "mc10/project-euler",
"id": "3a4ee6b8d9a76e3d4c63ca77d079e462a4f1b83c",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem_5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176163"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template,session
app = Flask(__name__)
app.secret_key= "ThisIsSecret"
@app.route('/')
def myfirstfunction():
if not 'title' in session:
session['title'] = 'hello world'
return render_template('index.html', name="Mike")
if __name__ == '__main__':
app.run(debug = True)
|
{
"content_hash": "6e73b1d9b01303d4a32b1d1abb5e62e3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 53,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.64375,
"repo_name": "dallaspythondojo/python",
"id": "53d71da9031190736f76915e72654522527965d3",
"size": "320",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Preston_Kellen/Assignments/flaskolympics/olympics3/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25381"
},
{
"name": "HTML",
"bytes": "256675"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Python",
"bytes": "399336"
}
],
"symlink_target": ""
}
|
"""
This module stores session-level commands.
"""
from evennia.commands.cmdset import CmdSet
from evennia.commands.default import account
class SessionCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultSession"
priority = -20
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(account.CmdSessions())
|
{
"content_hash": "ffefb76e5baf01dd94e1e7fb2cb4e8ba",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 44,
"avg_line_length": 20.61111111111111,
"alnum_prop": 0.6765498652291105,
"repo_name": "jamesbeebop/evennia",
"id": "f81e8b9636ba38a51ef104164b7a3794cdecfd11",
"size": "371",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/commands/default/cmdset_session.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
}
|
"""'logging sinks delete' command."""
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.api_lib.util import exceptions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a sink."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument('sink_name', help='The name of the sink to delete.')
util.AddNonProjectArgs(parser, 'Delete a sink')
def DeleteLogSink(self):
"""Deletes a log sink specified by the arguments."""
messages = util.GetMessagesV1()
sink_ref = self.context['sink_reference']
return util.GetClientV1().projects_logs_sinks.Delete(
messages.LoggingProjectsLogsSinksDeleteRequest(
projectsId=sink_ref.projectsId, logsId=sink_ref.logsId,
sinksId=sink_ref.sinksId))
def DeleteLogServiceSink(self):
"""Deletes a log service sink specified by the arguments."""
messages = util.GetMessagesV1()
sink_ref = self.context['sink_reference']
return util.GetClientV1().projects_logServices_sinks.Delete(
messages.LoggingProjectsLogServicesSinksDeleteRequest(
projectsId=sink_ref.projectsId,
logServicesId=sink_ref.logServicesId, sinksId=sink_ref.sinksId))
def DeleteSink(self, parent):
"""Deletes a sink specified by the arguments."""
# Use V2 logging API.
messages = util.GetMessages()
sink_ref = self.context['sink_reference']
return util.GetClient().projects_sinks.Delete(
messages.LoggingProjectsSinksDeleteRequest(
sinkName=util.CreateResourceName(
parent, 'sinks', sink_ref.sinksId)))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
"""
util.CheckLegacySinksCommandArguments(args)
sink_ref = self.context['sink_reference']
if args.log:
sink_description = 'log sink [%s] from [%s]' % (
sink_ref.sinksId, sink_ref.logsId)
elif args.service:
sink_description = 'log-service sink [%s] from [%s]' % (
sink_ref.sinksId, sink_ref.logServicesId)
else:
sink_description = 'sink [%s]' % sink_ref.sinksId
if not console_io.PromptContinue('Really delete %s?' % sink_description):
raise calliope_exceptions.ToolException('action canceled by user')
try:
if args.log:
self.DeleteLogSink()
elif args.service:
self.DeleteLogServiceSink()
else:
self.DeleteSink(util.GetParentFromArgs(args))
log.DeletedResource(sink_ref)
except apitools_exceptions.HttpError as error:
v2_sink = not args.log and not args.service
# Suggest the user to add --log or --log-service flag.
if v2_sink and exceptions.HttpException(
error).payload.status_code == 404:
log.status.Print(('Sink was not found. '
'Did you forget to add --log or --log-service flag?'))
raise error
Delete.detailed_help = {
'DESCRIPTION': """\
Deletes a sink and halts the export of log entries associated
with that sink.
If you don't include one of the *--log* or *--log-service* flags,
this command deletes a v2 sink.
Deleting a sink does not affect log entries already exported
through the deleted sink, and will not affect other sinks that are
exporting the same log(s).
""",
}
|
{
"content_hash": "59d533eea54a91a8f3e11a52f878f50a",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 37.39,
"alnum_prop": 0.6793260230008024,
"repo_name": "KaranToor/MA450",
"id": "80e8d727b2155d696422d04fa048c9a4af49a586",
"size": "4335",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/logging/sinks/delete.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
"""
Installation of Windows Updates using the Windows Update Agent
.. versionadded:: 2017.7.0
Salt can manage Windows updates via the "wua" state module. Updates can be
installed and removed. Update management declarations are as follows:
For installation:
.. code-block:: yaml
# Install a single update using the KB
KB3194343:
wua.installed
# Install a single update using the name parameter
install_update:
wua.installed:
- name: KB3194343
# Install multiple updates using the updates parameter and a combination of
# KB number and GUID
install_updates:
wua.installed:
- updates:
- KB3194343
- bb1dbb26-3fb6-45fd-bb05-e3c8e379195c
For removal:
.. code-block:: yaml
# Remove a single update using the KB
KB3194343:
wua.removed
# Remove a single update using the name parameter
remove_update:
wua.removed:
- name: KB3194343
# Remove multiple updates using the updates parameter and a combination of
# KB number and GUID
remove_updates:
wua.removed:
- updates:
- KB3194343
- bb1dbb26-3fb6-45fd-bb05-e3c8e379195c
"""
import logging
import salt.utils.data
import salt.utils.platform
import salt.utils.win_update
log = logging.getLogger(__name__)
__virtualname__ = "wua"
def __virtual__():
"""
Only valid on Windows machines
"""
if not salt.utils.platform.is_windows():
return False, "WUA: Only available on Window systems"
if not salt.utils.win_update.HAS_PYWIN32:
return False, "WUA: Requires PyWin32 libraries"
return __virtualname__
def installed(name, updates=None):
"""
Ensure Microsoft Updates are installed. Updates will be downloaded if
needed.
Args:
name (str):
The identifier of a single update to install.
updates (list):
A list of identifiers for updates to be installed. Overrides
``name``. Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
to ensure you're installing the correct update.
.. warning:: Using a partial KB number or a partial Title could result in
more than one update being installed.
Returns:
dict: A dictionary containing the results of the update. There are three
keys under changes. `installed` is a list of updates that were
successfully installed. `failed` is a list of updates that failed
to install. `superseded` is a list of updates that were not
installed because they were superseded by another update.
CLI Example:
.. code-block:: yaml
# using a GUID
install_update:
wua.installed:
- name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211
# using a KB
install_update:
wua.installed:
- name: KB3194343
# using the full Title
install_update:
wua.installed:
- name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)
# Install multiple updates
install_updates:
wua.installed:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
"""
if isinstance(updates, str):
updates = [updates]
if not updates:
updates = name
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for updates
install_list = wua.search(updates)
# No updates found
if install_list.count() == 0:
ret["comment"] = "No updates found"
return ret
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
installed_updates = []
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
else:
installed_updates.extend("KB" + kb for kb in item.KBArticleIDs)
if install.count() == 0:
ret["comment"] = "Updates already installed: "
ret["comment"] += "\n - ".join(installed_updates)
return ret
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be installed:"
for update in install.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# superseded_updates is a list of updates that the WUA first requested to be
# installed but became ineligible for installation because they were
# superseded
superseded_updates = {}
failed_updates = {}
installed_updates = {}
# Verify the installation
installed_items = install.list()
for item in installed_items:
if item not in post_info:
# Update (item) was not installed for valid reason
superseded_updates[item] = {
"Title": installed_items[item]["Title"],
"KBs": installed_items[item]["KBs"],
}
else:
if not salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
installed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
comments = []
if installed_updates:
comments.append("Updates installed successfully")
ret["changes"]["installed"] = installed_updates
if failed_updates:
comments.append("Some updates failed to install")
ret["changes"]["failed"] = failed_updates
ret["result"] = False
# Add the list of updates not installed to the return
if superseded_updates:
comments.append("Some updates were superseded")
ret["changes"]["superseded"] = superseded_updates
ret["comment"] = "\n".join(comments)
return ret
def removed(name, updates=None):
"""
Ensure Microsoft Updates are uninstalled.
Args:
name (str):
The identifier of a single update to uninstall.
updates (list):
A list of identifiers for updates to be removed. Overrides ``name``.
Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
to ensure you're uninstalling the correct update.
.. warning:: Using a partial KB number or a partial Title could result in
more than one update being removed.
Returns:
dict: A dictionary containing the results of the removal. There are
three keys under changes. `removed` is a list of updates that
were successfully removed. `failed` is a list of updates that
failed to be removed.
CLI Example:
.. code-block:: yaml
# using a GUID
uninstall_update:
wua.removed:
- name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211
# using a KB
uninstall_update:
wua.removed:
- name: KB3194343
# using the full Title
uninstall_update:
wua.removed:
- name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)
# Install multiple updates
uninstall_updates:
wua.removed:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
"""
if isinstance(updates, str):
updates = [updates]
if not updates:
updates = name
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for updates
updates = wua.search(updates)
# No updates found
if updates.count() == 0:
ret["comment"] = "No updates found"
return ret
# List of updates to uninstall
uninstall = salt.utils.win_update.Updates()
removed_updates = []
for item in updates.updates:
if salt.utils.data.is_true(item.IsInstalled):
uninstall.updates.Add(item)
else:
removed_updates.extend("KB" + kb for kb in item.KBArticleIDs)
if uninstall.count() == 0:
ret["comment"] = "Updates already removed: "
ret["comment"] += "\n - ".join(removed_updates)
return ret
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be removed:"
for update in uninstall.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Install updates
wua.uninstall(uninstall)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
failed_updates = {}
removed_updates = {}
# Verify the installation
for item in uninstall.list():
if salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
removed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
if removed_updates:
ret["comment"] = "Updates removed successfully"
ret["changes"]["removed"] = removed_updates
if failed_updates:
ret["comment"] = "Some updates failed to uninstall"
ret["changes"]["failed"] = failed_updates
ret["result"] = False
return ret
def uptodate(
name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,
):
"""
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update. There are three
keys under changes. `installed` is a list of updates that were
successfully installed. `failed` is a list of updates that failed
to install. `superseded` is a list of updates that were not
installed because they were superseded by another update.
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden,
skip_installed=True,
skip_mandatory=skip_mandatory,
skip_reboot=skip_reboot,
software=software,
drivers=drivers,
categories=categories,
severities=severities,
)
# No updates found
if available_updates.count() == 0:
ret["comment"] = "No updates found"
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be installed:"
for update in install.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# superseded_updates is a list of updates that the WUA first requested to be
# installed but became ineligible for installation because they were
# superseded by other updates
superseded_updates = {}
failed_updates = {}
installed_updates = {}
# Verify the installation
installed_items = install.list()
for item in installed_items:
if item not in post_info:
# Update (item) was not installed for valid reason
superseded_updates[item] = {
"Title": installed_items[item]["Title"],
"KBs": installed_items[item]["KBs"],
}
else:
if not salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
installed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
comments = []
if installed_updates:
comments.append("Updates installed successfully")
ret["changes"]["installed"] = installed_updates
if failed_updates:
comments.append("Some updates failed to install")
ret["changes"]["failed"] = failed_updates
ret["result"] = False
# Add the list of updates not installed to the return
if superseded_updates:
comments.append("Some updates were superseded")
ret["changes"]["superseded"] = superseded_updates
ret["comment"] = "\n".join(comments)
return ret
|
{
"content_hash": "82a5dfd9316fabecbc2a680d24370ec4",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 122,
"avg_line_length": 29.54609929078014,
"alnum_prop": 0.5927148343734998,
"repo_name": "saltstack/salt",
"id": "0c00d5808963354f8f7bff2b3ea57242400f6d59",
"size": "16664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/win_wua.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from boole.core.goals import *
from nose.tools import *
def test_create():
|
{
"content_hash": "affb486e656eb1bc554df058278c20dd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 30,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.6829268292682927,
"repo_name": "avigad/boole",
"id": "cd4ae48d4fe7211505a195faa32e345eb17aba77",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boole/tests/goal_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "473511"
}
],
"symlink_target": ""
}
|
import re
from streamlink.plugin import Plugin
from streamlink.plugins.brightcove import BrightcovePlayer
from streamlink.stream import RTMPStream
class AlJazeeraEnglish(Plugin):
url_re = re.compile(r"https?://(?:\w+\.)?aljazeera\.com")
account_id = 665003303001
render_re = re.compile(r'''RenderPagesVideo\((?P<q>['"])(?P<id>\d+)(?P=q)''') # VOD
video_id_re = re.compile(r'''videoId=(?P<id>\d+)["']''') # Live
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
# check two different styles to include the video id in the page
video_id_m = self.render_re.search(res.text) or self.video_id_re.search(res.text)
video_id = video_id_m and video_id_m.group("id")
if not video_id:
self.logger.error("Could not find a video ID on this page")
return
# Use BrightcovePlayer class to get the streams
self.logger.debug("Found video ID: {0}", video_id)
bp = BrightcovePlayer(self.session, self.account_id)
for q, s in bp.get_streams(video_id):
# RTMP Streams are listed, but they do not appear to work
if not isinstance(s, RTMPStream):
yield q, s
__plugin__ = AlJazeeraEnglish
|
{
"content_hash": "37bdceaebbb4d285eb828ac50c428cbb",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 89,
"avg_line_length": 34.48717948717949,
"alnum_prop": 0.6289962825278811,
"repo_name": "back-to/streamlink",
"id": "9d8431f683cddb2382449a1bf7cae786988f27ca",
"size": "1345",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/aljazeeraen.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1451380"
},
{
"name": "Shell",
"bytes": "18044"
}
],
"symlink_target": ""
}
|
import urllib.request
import re
from bs4 import BeautifulSoup
url = 'http://bj.fangjia.com/ershoufang/'
response = urllib.request.urlopen(url)
html_doc = response.read()
soup = BeautifulSoup(html_doc, "html.parser")
house_list = soup.select('.xq_list')[0].find_all('li')
house_img = ''
house_name = ''
house_address = ''
house_baidu_key = ''
house_total = ''
house_price = ''
house_area = ''
house_type = ''
house_floor = ''
house_info_url = ''
for house in house_list:
#house_img = house.find('img')['src']
house_name = house.select('.h_name')[0].text.replace('\n','')
house_address = house.select('.address')[0].text.replace('\n','')
house_baidu_key = house_address.split('】')[1]
house_total = house.select('.xq_aprice')[0].text.replace('\n','')\
.split('万')[0]
house_price = house.select('.xq_aprice')[0].text.replace('\n','')\
.split('万')[1].split('元/㎡')[0]
house_attr = house.select('.attribute')[0].text.replace('\n','').replace('面积:','$$$')\
.replace(',房型:','$$$').replace(',楼层:','$$$').split('$$$')
house_area = house_attr[1].replace('平米','')
house_type = house_attr[2]
house_floor = house_attr[3]
house_info_url = house.select('.xq_pic')[0]['href']
print(house_info_url,house_name,house_address,house_baidu_key,house_total
,house_price,house_area,house_type,house_floor, '\n')
|
{
"content_hash": "3822b380fc98e4b7ba4518579188869c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 91,
"avg_line_length": 38.32432432432432,
"alnum_prop": 0.5895627644569816,
"repo_name": "cxyxc/webgis-house",
"id": "11c0d70c7e795b562c1cefe24b7b583ee89ea818",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/fangjiawang/test_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "1792"
},
{
"name": "JavaScript",
"bytes": "71994"
},
{
"name": "PHP",
"bytes": "675"
},
{
"name": "Python",
"bytes": "14319"
}
],
"symlink_target": ""
}
|
""":mod:`rawkit.options` --- High level options for processing raw files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import ctypes
from collections import namedtuple
class option(object):
"""
The :class:`option` decorator is an internal decorator which allows you to
define an option in a clean manner (specifying its name and how it maps to
the libraw params).
"""
def __init__(self, param=None, ctype=None):
if callable(param):
func = param
param = None
else:
func = None
self._prepare_func(func)
self.param = param
self.ctype = ctype
self.setter_func = None
self.param_func = None
def _prepare_func(self, func):
self.func = func
if func is not None:
self.__doc__ = getattr(func, '__doc__')
self.__name__ = getattr(func, '__name__')
self.internal_name = '_{name}'.format(name=self.__name__)
self.__module__ = getattr(func, '__module__')
def __call__(self, func=None):
self._prepare_func(func)
if func is None:
raise TypeError("option should not be called except as a property")
self.func = func
return self
def setter(self, func):
self.setter_func = func
return self
def param_writer(self, func):
self.param_func = func
return self
def write_param(self, obj, params):
if self.param_func is None:
val = self.__get__(obj, None)
try:
setattr(params, self.param, self.ctype(*val))
except TypeError:
setattr(params, self.param, self.ctype(val))
else:
self.param_func(obj, params)
def __set__(self, obj, value):
if self.setter_func is None:
setattr(obj, self.internal_name, value)
else:
self.setter_func(obj, value)
def __get__(self, obj, cls):
try:
val = getattr(obj, self.internal_name)
if val is None:
return self.func(obj)
else:
return val
except AttributeError:
# We're probably generating the documentation...
return self
highlight_modes = namedtuple(
'HighlightMode', ['clip', 'ignore', 'blend', 'reconstruct']
)(0, 1, 2, 5)
"""
Constants for setting the highlight mode.
- ``clip`` --- Clip all highlights to white (default).
- ``ignore`` --- Leave highlights unclipped.
- ``blend`` --- Blend clipped and unclipped highlights.
- ``reconstruct`` --- A good average value for reconstruction of clipped
highlights which compromises between favoring whites and favoring colors.
"""
gamma_curves = namedtuple(
'GammaCurves', ['linear', 'bt709', 'srgb', 'adobe_rgb']
)([1, 1], [1 / 2.222, 4.5], [1 / 2.4, 12.92], [256 / float(563)])
"""
Gamma curves for a few common color profiles.
- ``linear`` --- A basic linear transfer function.
- ``bt709`` --- The BT.709 (Rec. 709) curve used by HDTVs (uses the median
power of sRGB, and a similar but shifted transfer function).
- ``srgb`` --- The sRGB gamma curve (uses the max power to account for linear
discontinuity and to attain the standard `IEC 61966-2-1` solution $K_0
\\\\approx 0.04045 $).
- ``adobe_rgb`` --- The correction function power for the Adobe RGB
colorspace. The toe-slope is left off.
"""
colorspaces = namedtuple(
'ColorSpaces', ['raw', 'srgb', 'adobe_rgb', 'wide_gammut_rgb',
'kodak_prophoto_rgb', 'xyz']
)(0, 1, 2, 3, 4, 5)
"""
Constants for setting the colorspace.
- ``raw_color`` --- Raw colorspace (unique to each camera)
- ``srgb`` --- sRGB D65 (default colorspace)
- ``adobe_rgb`` --- Adobe RGB (1998) D65
- ``wide_gammut_rgb`` --- Wide Gamut RGB D65
- ``kodak_prophoto_rgb`` --- Kodak ProPhoto RGB D65
- ``xyz`` --- XYZ colorspace
"""
interpolation = namedtuple(
'InterpolationAlgo', ['linear', 'vng', 'ppg', 'ahd', 'dcb', 'modified_ahd',
'afd', 'vcd', 'mixed_vcd_modified_ahd', 'lmmse',
'amaze']
)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
"""
Constants for setting the interpolation algorithm:
0. Linear
1. VNG
2. PPG
3. AHD
4. DCB
5. Modified AHD
6. AFD
7. VCD
8. Mixed VCD and Modified AHD
9. LMMSE
10. AMaZE
Modified AHD (5) through LMMSE (9) are only useful if you're using a version of
LibRaw with the "`LibRaw Demosaic Pack GPL2`_" built in and AMaZE (10) is only
useful if LibRaw was built with the "`LibRaw Demosaic Pack GPL3`_". If you
attepmt to use an interpolation method that's not built into your version of
LibRaw, it will silently fallback to AHD.
Usage example:
.. sourcecode:: python
from rawkit.raw import Raw
from rawkit.options import interpolation
with Raw(filename="RawFile.CR2") as raw:
raw.options.interpolation = interpolation.ahd
raw.save("RawFile.ppm")
.. _LibRaw Demosaic Pack GPL2:
https://github.com/LibRaw/LibRaw-demosaic-pack-GPL2
.. _LibRaw Demosaic Pack GPL3:
https://github.com/LibRaw/LibRaw-demosaic-pack-GPL3
"""
class WhiteBalance(namedtuple('WhiteBalance',
['auto', 'camera', 'greybox', 'rgbg'])):
"""
Represents the white balance of a photo. If the camera white balance is
used, but not present, we fallback to the other options. Other options
white balance multipliers stack (eg. you can use auto white balance, and
then specify a manual ``rgbg`` multiplier on top of that).
Args:
auto (boolean): Determines if we should automatically set the WB.
camera (boolean): Causes us to use the camera defined WB if present.
greybox (4 int tuple): Set the WB based on a neutral grey region of the
image.
rgbg (4 float tuple): Set the WB manually based on an RGBG channel
multiplier.
Returns:
WhiteBalance: A white balance object.
"""
__slots__ = ()
def __new__(cls, auto=False, camera=False, greybox=None, rgbg=None):
return super(WhiteBalance, cls).__new__(
cls, auto, camera, greybox, rgbg)
class Options(object):
"""
Represents a set of options which can be used when processing raw data.
Args:
attrs (dict): A subscriptable object from which to take the initial
state of the options object.
"""
__slots__ = [
'_bps',
'_brightness',
'_auto_brightness',
'_auto_brightness_threshold',
'_chromatic_aberration',
'_darkness',
'_half_size',
'_noise_threshold',
'_rgbg_interpolation',
'_saturation',
'_shot',
'_use_camera_matrix',
'_white_balance',
'_highlight_mode',
'_colorspace',
'_output_profile',
'_input_profile',
'_use_camera_profile',
'_cropbox',
'_gamma',
'_interpolation',
'_auto_stretch',
'_rotation',
'_dark_frame',
'_green_matching',
'_bad_pixels_file',
'_median_filter_passes',
'_adjust_maximum_threshold',
]
def __init__(self, attrs=None):
"""
Create the options object, initializing values to ``None`` or their
corresponding value from `attrs`.
"""
for i in self.__slots__:
try:
param = i[1:]
setattr(self, param, attrs[param])
except (KeyError, TypeError):
setattr(self, i, None)
def __iter__(self):
"""Allow iterating over the options."""
idx = 0
while True:
idx += 1
try:
yield self.keys()[idx - 1]
except IndexError:
return
def __repr__(self):
"""Represents the options as a dict."""
return repr(dict(self))
def keys(self):
"""
A list of keys which have a value other than ``None`` and which have
been set by the user (even if those options are set to the default
value).
Returns:
tuple: List of option keys which have been set.
"""
return [slot[1:] for slot in self.__slots__ if getattr(self, slot) is
not None]
def values(self):
"""
The values of all options which appear in :func:`keys`.
Returns:
tuple: List of options values.
"""
return [self.__getitem__(k) for k in self.keys()]
def __getitem__(self, k):
"""
Allow accessing options with dictionary syntax eg. ::
opts['half_size'].
"""
return getattr(self, k)
@option(param='output_color', ctype=ctypes.c_int)
def colorspace(self):
"""
Sets the colorspace used for the output image. Supported colorspaces
are defined as constants in :class:`rawkit.options.colorspaces`.
:type: :class:`int`
:default: :class:`rawkit.options.colorspaces.srgb`
:dcraw: ``-o``
:libraw: :class:`libraw.structs.libraw_output_params_t.output_color`
"""
return colorspaces.srgb
@option(param='highlight', ctype=ctypes.c_int)
def highlight_mode(self):
"""
The mode for dealing with highlights in the image. Some constants have
been defined in :class:`rawkit.options.highlight_modes` to make things
easier, or you can set an integer directly.
Integers that are greater than or equal to 3 will attempt to
reconstruct highlights. Lower numbers favor whites, and higher colors
favor colors. :class:`rawkit.options.RECONSTRUCT` (5) is a good
compromise.
:type: :class:`int`
:default: :class:`rawkit.options.highlight_modes.clip`
:dcraw: ``-H``
:libraw: :class:`libraw.structs.libraw_output_params_t.highlight`
"""
return highlight_modes.clip
@option
def white_balance(self):
"""
The white balance of the image.
:type: :class:`rawkit.options.WhiteBalance`
:default: WhiteBalance(auto=True, camera=True)
:dcraw: ``-a``
``-w``
``-A``
``-r``
:libraw: :class:`libraw.structs.libraw_output_params_t.use_auto_wb`
:class:`libraw.structs.libraw_output_params_t.use_camera_wb`
:class:`libraw.structs.libraw_output_params_t.greybox`
:class:`libraw.structs.libraw_output_params_t.user_mul`
"""
return WhiteBalance(auto=True, camera=True)
@white_balance.param_writer
def white_balance(self, params):
if self.white_balance.greybox is not None:
params.greybox = (ctypes.c_uint * 4)(*self.white_balance.greybox)
if self.white_balance.rgbg is not None:
params.user_mul = (ctypes.c_float * 4)(*self.white_balance.rgbg)
params.use_camera_wb = ctypes.c_int(self.white_balance.camera)
params.use_auto_wb = ctypes.c_int(self.white_balance.auto)
@option(param='use_camera_matrix', ctype=ctypes.c_int)
def use_camera_matrix(self):
"""
Use the color matrix from the raw's metadata. Only affects Olympus,
Leaf, and Phase One cameras (and DNG files).
Note that we differ from the LibRaw defaults on this option. LibRaw
defaults to true if the photo is in DNG format or the camera white
balance is being used, and false otherwise. rawkit always defaults to
true.
:type: :class:`boolean`
:default: True
:dcraw: ``+M``
``-M``
:libraw: :class:`libraw.libraw_output_params_t.use_camera_matrix`
"""
return True
@option(param='shot_select', ctype=ctypes.c_uint)
def shot(self):
"""
Selects the shot to process for raw images that contain multiple
images.
:type: :class:`int`
:default: 0
:dcraw: ``-s``
:libraw: :class:`libraw.structs.libraw_output_params_t.shot_select`
"""
return None
@option(param='user_sat', ctype=ctypes.c_int)
def saturation(self):
"""
Determines the saturation level of the output image.
:type: :class:`int`
:default: None
:dcraw: ``-S``
:libraw: :class:`libraw.structs.libraw_output_params_t.user_sat`
"""
return None
@option(param='four_color_rgb', ctype=ctypes.c_int)
def rgbg_interpolation(self):
"""
Determines if we should use four channel RGB interpolation.
:type: :class:`boolean`
:default: False
:dcraw: ``-f``
:libraw: :class:`libraw.structs.libraw_output_params_t.four_color_rgb`
"""
return False
@option(param='threshold', ctype=ctypes.c_float)
def noise_threshold(self):
"""
Sets the threshold for noise reduction using wavelet denoising.
:type: :class:`float`
:default: None
:dcraw: ``-n``
:libraw: :class:`libraw.structs.libraw_output_params_t.threshold`
"""
return None
@option(param='half_size', ctype=ctypes.c_int)
def half_size(self):
"""
When developing the image, output it at 50% size. This makes developing
preview images much faster.
:type: :class:`boolean`
:default: False
:dcraw: ``-h``
:libraw: :class:`libraw.structs.libraw_output_params_t.half_size`
"""
return False
@option(param='user_black', ctype=ctypes.c_int)
def darkness(self):
"""
Raise the black level of a photo.
:type: :class:`int`
:default: None
:dcraw: ``-k``
:libraw: :class:`libraw.structs.libraw_output_params_t.user_black`
"""
return None
@option
def chromatic_aberration(self):
"""
A Red-Blue scale factor that's used to correct for chromatic aberration
by scaling the respective channels.
eg. ::
# (red_scale, blue_scale)
raw.options.chromatic_aberration = (0.999, 1.001)
:type: :class:`double tuple`
:default: (1, 1)
:dcraw: ``-C``
:libraw: :class:`libraw.structs.libraw_output_params_t.aber`
"""
return (1, 1)
@chromatic_aberration.param_writer
def chromatic_aberration(self, params):
params.aber = (ctypes.c_double * 4)(*(
self.chromatic_aberration[0],
0, # TODO: What is this index used for?
self.chromatic_aberration[1],
0 # TODO: What is this index used for?
))
@option(param='output_bps', ctype=ctypes.c_int)
def bps(self):
"""
Set the bits per sample used for the photo (8 or 16).
Setting this to 16 is effectively the same as running dcraw with the
``-4`` option.
:type: :class:`int`
:default: 8
:dcraw: ``-4``
:libraw: :class:`libraw.structs.libraw_output_params_t.output_bps`
"""
return 8
@bps.setter
def bps(self, value):
if value in (8, 16):
self._bps = value
else:
raise ValueError("BPS must be 8 or 16")
@option(param='cropbox', ctype=(ctypes.c_uint * 4))
def cropbox(self):
"""
Crops the image.
:type: :class:`4 float tuple`
:default: None
:dcraw: None
:libraw: :class:`libraw.structs.libraw_output_params_t.cropbox`
"""
return None
@option(param='gamm', ctype=(ctypes.c_double * 6))
def gamma(self):
"""
Sets the gamma-curve of the photo. The two values in the tuple
correspond to:
- gamma[0] --- Correction function power (inverted Gamma power,
$\\\\gamma^{-1}$)
- gamma[1] --- toe-slope ($\\\\phi$)
For a simple power curve, set the toe-slope to zero.
:type: :class:`2 double tuple`
:default: None
:dcraw: ``-g``
:libraw: :class:`libraw.structs.libraw_output_params_t.gamm`
"""
return None
@option(param='interpolation', ctype=ctypes.c_uint)
def interpolation(self):
"""
Sets the interpolation algorithm.
:type: :class:`rawkit.options.interpolation`
:default: `ahd`
:dcraw: ``-q``
:libraw: :class:`libraw.structs.libraw_output_params_t.user_qual`
"""
return interpolation.ahd
@option(param='bright', ctype=ctypes.c_float)
def brightness(self):
"""
Sets the brightness level by dividing the white level by this value.
This is ignored if :class:`~auto_brightness` is ``True``.
:type: :class:`float`
:default: 1.0
:dcraw: ``-b``
:libraw: :class:`libraw.structs.libraw_output_params_t.bright`
"""
return None
@option(param='auto_bright_thr', ctype=ctypes.c_float)
def auto_brightness_threshold(self):
"""
The allowable percentage of clipped pixels when
:class:`~auto_brightness` is used.
:type: :class:`float`
:default: 0.001 (0.1%)
:dcraw: None
:libraw: :class:`libraw.structs.libraw_output_params_t.auto_bright_thr`
"""
return 0.001
@option
def auto_brightness(self):
"""
Set the brightness automatically based on the image histogram and the
:class:`~auto_brightness_threshold`.
:type: :class:`boolean`
:default: True
:dcraw: ``-W``
:libraw: :class:`libraw.structs.libraw_output_params_t.no_auto_bright`
"""
return True
@auto_brightness.param_writer
def auto_brightness(self, param):
param.no_auto_bright = ctypes.c_int(not self.auto_brightness)
@option(param='use_fuji_rotate', ctype=ctypes.c_int)
def auto_stretch(self):
"""
Stretches images taken on cameras with non-square pixels to the correct
aspect ratio. For Fuji Super CCD cameras, rotates the image 45 degrees.
This guarantees that the output pixels share a 1:1 correspondence with
the raw pixels.
:type: :class:`boolean`
:default: True
:dcraw: ``-j``
:libraw: :class:`libraw.structs.libraw_output_params_t.use_fuji_rotate`
"""
return True
@option
def rotation(self):
"""
Rotates the image by the given number of degrees. Must be a multiple of
90 (0, 90, 180, 270, etc).
The default (None) is to use the rotation provided by the camera.
:type: :class:`int`
:default: None
:dcraw: ``-t``
:libraw: :class:`libraw.structs.libraw_output_params_t.user_flip`
"""
return None
@rotation.setter
def rotation(self, value):
if value is not None and value > 0:
value = ((value + 3600) % 360)
if value in (None, 0, 90, 180, 270):
self._rotation = value
else:
raise ValueError('Rotation must be None (use camera rotation) or '
'a multiple of 90')
@rotation.param_writer
def rotation(self, params):
params.user_flip = ctypes.c_int({
270: 5,
180: 3,
90: 6,
0: 0
}[self.rotation])
@option
def dark_frame(self):
"""
A dark frame in 16-bit PGM format. This may either be a path to an
existing file, or an instance of :class:`rawkit.raw.DarkFrame`.
:type: :class:`rawkit.raw.DarkFrame`
:class:`str`
:default: None
:dcraw: ``-K``
:libraw: :class:`libraw.structs.libraw_output_params_t.dark_frame`
"""
return None
@dark_frame.setter
def dark_frame(self, value):
self._dark_frame = value
@dark_frame.param_writer
def dark_frame(self, params):
try:
self.dark_frame.save()
params.dark_frame = ctypes.c_char_p(
self.dark_frame.name.encode('utf-8')
)
except AttributeError:
params.dark_frame = ctypes.c_char_p(
self.dark_frame.encode('utf-8')
)
@option(param='green_matching', ctype=ctypes.c_int)
def green_matching(self):
"""
Performs a second post-processing pass to correct for green channel
imbalance.
:type: :class:`boolean`
:default: False
:dcraw: None
:libraw: :class:`libraw.structs.libraw_output_params_t.green_matching`
"""
return False
@option(param='output_profile', ctype=ctypes.c_char_p)
def output_profile(self):
"""
Path to an ICC color profile file containing the output profile. Only
used if the version of LibRaw that you're linking against was compiled
with LCMS support.
:type: :class:`string`
:default: None
:dcraw: ``-o``
``-p``
:libraw: :class:`libraw.structs.libraw_output_params_t.output_profile`
"""
return None
@option(param='camera_profile', ctype=ctypes.c_char_p)
def input_profile(self):
"""
Path to an ICC color profile file containing the input profile. Only
used if the version of LibRaw that you're linking against was compiled
with LCMS support.
Note that LibRaw defines a magic string, 'embed', which causes it to
use the profile embedded in the raw image if present. This is the same
as setting the :class:`~use_camera_profile` option.
:type: :class:`string`
:default: None
:dcraw: ``-o``
``-p``
:libraw: :class:`libraw.structs.libraw_output_params_t.camera_profile`
"""
return None
@option
def use_camera_profile(self):
"""
True if we should use the embedded camera profile (if present in the
raw file and we're linking against a version of LibRaw with LCMS
support).
:type: :class:`boolean`
:default: True
:dcraw: ``-o``
``-p``
:libraw: :class:`libraw.structs.libraw_output_params_t.camera_profile`
"""
return True
@use_camera_profile.setter
def use_camera_profile(self, value):
self._use_camera_profile = value
@use_camera_profile.param_writer
def use_camera_profile(self, params):
if self.use_camera_profile:
params.camera_profile = ctypes.c_char_p(b'embed')
else:
params.camera_profile = None
@option(param='bad_pixels', ctype=ctypes.c_char_p)
def bad_pixels_file(self):
"""
Points to a bad pixels map in dcraw format: ::
column row unix-timestamp\\n
:type: :class:`str`
:default: None
:dcraw: ``-P``
:libraw: :class:`libraw.structs.libraw_output_params_t.bad_pixels`
"""
return None
@option(param='med_passes', ctype=ctypes.c_int)
def median_filter_passes(self):
"""
Useful for cleaning up color artifacts by running a 3x3 median filter
over the R-G and B-G channels.
:type: :class:`int`
:default: 0
:dcraw: ``-m``
:libraw: :class:`libraw.structs.libraw_output_params_t.med_passes`
"""
return 0
@option(param='adjust_maximum_thr', ctype=ctypes.c_float)
def adjust_maximum_threshold(self):
"""
Automatically adjusts the maximum pixel value based on per channel
maximum data.
Note:
If this value is set above 0.99999, the default value will be used
instead. If it is set below 0.00001, no adjustment will happen.
:type: :class:`float`
:default: 0.75
:dcraw: None
:libraw:
:class:`libraw.structs.libraw_output_params_t.adjust_maximum_thr`
"""
return 0
def _map_to_libraw_params(self, params):
"""
Internal method that writes rawkit options into the libraw options
struct with the proper C data types.
Args:
params (libraw.structs.libraw_output_params_t):
The output params struct to set.
"""
for slot in self.__slots__:
prop = slot[1:]
opt = getattr(Options, prop)
if type(opt) is option and getattr(self, prop) is not None:
opt.write_param(self, params)
# This generally isn't needed, except for testing.
return params
|
{
"content_hash": "65be5dadc0b1a08fa54c905fb5c71fdf",
"timestamp": "",
"source": "github",
"line_count": 794,
"max_line_length": 79,
"avg_line_length": 31.234256926952142,
"alnum_prop": 0.5710887096774193,
"repo_name": "photoshell/rawkit",
"id": "678ad96432c533206e0aa89248c4100f218b9033",
"size": "24800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rawkit/options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1313"
},
{
"name": "Python",
"bytes": "134997"
}
],
"symlink_target": ""
}
|
import os
import sys
from threading import Thread
from pyelectro import analysis
import numpy
import math
import pprint
pp = pprint.PrettyPrinter(indent=4)
def alpha_normalised_cost_function(value, target, base=10):
"""Fitness of a value-target pair from 0 to 1
.. WARNING:
I've found that this cost function is producing some odd behaviour.
It is best avoided until this is investigated
For any value/target pair will give a normalised value for
agreement 1 is complete value-target match and 0 is 0 match.
A mirrored exponential function is used.
The fitness is given by the expression :math:`fitness = base^{-x}`
where:
.. math::
x = {\dfrac{(value-target)}{(target + 0.01)^2}}
:param value: value measured
:param t: target
:param base: the value 'base' in the above mathematical expression for x
:return: fitness - a real number from 0 to 1
"""
value = float(value)
target = float(target)
x = (
(value - target) / (target + 0.01)
) ** 2 # the 0.01 thing is a bit of a hack at the moment.
fitness = base ** (-x)
return fitness
def normalised_cost_function(value, target, Q=None):
"""Returns fitness of a value-target pair from 0 to 1
For any value/target pair will give a normalised value for
agreement 0 is complete value-target match and 1 is "no" match.
If no Q is assigned, it is set such that it satisfies the condition
fitness=0.7 when (target-valu)e=10*target. This is essentially
empirical and seems to work. Mathematical derivation is on Mike Vella's
Lab Book 1 p.42 (page dated 15/12/11).
:param value: value measured
:param t: target
:param Q: This is the sharpness of the cost function, higher values correspond
to a sharper cost function. A high Q-Value may lead an optimizer to a solution
quickly once it nears the solution.
:return: fitness value from 0 to 1
"""
value = float(value)
target = float(target)
if Q is None:
if target != 0:
Q = 7 / (300 * (target ** 2))
else:
Q = 0.023333 # PG: Gives fitness = 0.023333 when value = 1; fitness = 0.7 when value = 10
try:
fitness = 1 - 1 / (Q * (target - value) ** 2 + 1)
except:
print(
"Exeption when calculating the fitness function; target: %s; value %s; Q: %s"
% (target, value, Q)
)
fitness = 1
return fitness
class __CandidateData(object):
"""Container for information about a candidate (chromosome)"""
def __init__(self, chromosome):
self.chromosome = chromosome
def set_dbpath(self, dbpath):
self.dbpath = dbpath
def set_exp_id(self, exp_id):
self.exp_id = exp_id
def set_job_num(self, job_num):
self.job_num = job_num
class __Evaluator(object):
"""Base class for Evaluators"""
def __init__(self, parameters, weights, targets, controller):
self.parameters = parameters
self.weights = weights
self.targets = targets
self.controller = controller
'''
PG: Disabling these until they're tested again...
class __CondorContext(object):
"""manager for dealing with a condor-based grid"""
def __split_list(self,alist, wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def __prepare_candidates(self,candidates,candidates_per_job=1):
#Split candidate list into smaller ones (jobs):
#and make a job list
if optimizer_params.candidates_in_job != None:
candidates_in_job=optimizer_params.candidates_in_job
else:
candidates_in_job=candidates_per_job
num_candidates=len(candidates)
ids=range(num_candidates)
enumerated_candidates=zip(candidates,ids)
num_jobs=num_candidates/candidates_in_job
self.num_jobs=num_jobs
self.job_list=self.__split_list(enumerated_candidates,wanted_parts=self.num_jobs)
def __make_job_file(self,job,job_number):
#write the header:
filepath = os.path.join(self.tmpdir, 'run' + str(job_number) + '.sh')
run_shell = open(filepath, 'w')
run_shell.write('#!/bin/bash\n')
run_shell.write('reldir=`dirname $0`\n')
run_shell.write('cd $reldir\n')
run_shell.write('directory=`pwd`\n')
run_shell.write('pndirectory=$directory\n')
run_shell.write('#Untar the file:\n')
run_shell.write('/bin/tar xzf ./portable-neuron.tar.gz\n')
tarfile_name=optimizer_params.tarred_nrnproj
run_shell.write('/bin/tar xzf ./'+tarfile_name+'\n')
#CandidateData_list=[]
for enumerated_candidate in job:
chromosome = enumerated_candidate[0]
candidate_info = CandidateData(chromosome)
exp_id = enumerated_candidate[1]
candidate_info.set_exp_id(exp_id)
candidate_info.set_job_num(job_number)
self.CandidateData_list.append(candidate_info)
nproj = controllers.NrnProjSimRun(optimizer_params.project_path, chromosome)
run_shell.write('#issue the commands\n')
run_shell.write('$pndirectory/pnpython.sh \
$directory/src/simrunner.py "sim_var[\'exp_id\'] \
= ' + str(exp_id) + '\" ' + '"sim_var[\'''dbname''\'] \
= \'outputdb' + str(job_number) + '.sqlite\'"' +
nproj.sim_var_string + '\n')
run_shell.write('echo \'done\'\n')
run_shell.write('cp $directory/sims/outputdb' + str(job_number) + '.sqlite $directory\n')
#self.CandidateData_list=CandidateData_list
run_shell.close()
def __make_submit_file(self):
#now we write the submit file
filepath = os.path.join(self.tmpdir, 'submitfile.submit')
submit_file=open(filepath,'w')
submit_file.write('universe = vanilla\n')
submit_file.write('log = pneuron.log\n')
submit_file.write('Error = err.$(Process)\n')
submit_file.write('Output = out.$(Process)\n')
submit_file.write('requirements = GLIBC == "2.11"\n')
tarfile_name=optimizer_params.tarred_nrnproj
submit_file.write('transfer_input_files = portable-neuron.tar.gz,'+tarfile_name+'\n')
submit_file.write('should_transfer_files = yes\n')
submit_file.write('when_to_transfer_output = on_exit_or_evict\n')
#this is where you have to do the clever stuff:
for shellno in range(self.num_jobs):
submit_file.write('executable = run'+str(shellno)+'.sh\n')
submit_file.write('queue\n')
#finally close the submit file
submit_file.close()
def __build_condor_files(self,candidates,candidates_per_job=100):
#prepare list of candidates to be farmed on grid:
self.__prepare_candidates(candidates,candidates_per_job=100)
#make the job files (shell scripts to be executed on the execute nodes)
job_number=0 #run shell script number
for job in self.job_list:
self.__make_job_file(job,job_number)
job_number+=1
#now make the submit file
self.__make_submit_file()
def __delete_remote_files(self,host):
import ssh_utils
command='rm -rf ./*'
ssh_utils.issue_command(host, command)
def __put_multiple_files(self,host,filelist,localdir='/',remotedir='/'):
import ssh_utils
for file in filelist:
localpath=os.path.join(localdir,file)
remotepath=os.path.join(remotedir,file)
ssh_utils.put_file(host,localpath,remotepath)
'''
class DumbEvaluator(__Evaluator):
"""
The simulations themselves report their fitness. The evaluator
just reads them from a file. Requires the appropriate controller.
"""
def __init__(self, controller, fitness_filename_prefix, threads_number=1):
self.controller = controller
self.fitness_filename_prefix = fitness_filename_prefix
self.threads_number = threads_number
def evaluate(self, candidates, args):
threads_number = int(self.threads_number)
candidates_per_thread = (len(candidates)) / threads_number
remainder_candidates = len(candidates) % threads_number
chunk_begin = 0
chunk_end = candidates_per_thread
if remainder_candidates != 0:
chunk_end += 1
threads = []
try:
for i in range(0, threads_number):
# if fitness file exists need to destroy it:
file_name = self.fitness_filename_prefix + str(i)
if os.path.exists(file_name):
os.remove(file_name)
# run the candidates:
candidate_section = candidates[chunk_begin:chunk_end]
threads.append(
Thread(
target=self.controller.run,
args=(
candidate_section,
args,
file_name,
),
)
)
threads[i].daemon = True
threads[i].start()
chunk_begin = chunk_end
chunk_end += candidates_per_thread
if i < (remainder_candidates - 1):
chunk_end += 1
fitness = []
for i in range(0, threads_number):
# we should let the main thread handle keybord interrupts
while True:
threads[i].join(1)
if not threads[i].isAlive():
break
# get their fitness from the file
file_name = self.fitness_filename_prefix + str(i)
threads[i].join()
fitness = fitness + [float(i) for i in open(file_name).readlines()]
os.remove(file_name)
except (KeyboardInterrupt, SystemExit):
sys.exit("Interrupted by ctrl+c\n")
return fitness
class IClampEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(
self,
analysis_start_time,
controller,
analysis_end_time,
target_data_path,
parameters,
analysis_var,
weights,
targets=None,
automatic=False,
verbose=True,
):
super(IClampEvaluator, self).__init__(parameters, weights, targets, controller)
self.analysis_start_time = analysis_start_time
self.analysis_end_time = analysis_end_time
self.target_data_path = target_data_path
self.analysis_var = analysis_var
self.verbose = verbose
print("target data path in evaluator:" + target_data_path)
if automatic is True:
t, v_raw = analysis.load_csv_data(target_data_path)
v = numpy.array(v_raw)
v_smooth = list(analysis.smooth(v))
ic_analysis = analysis.IClampAnalysis(
v_smooth,
t,
analysis_var,
start_analysis=analysis_start_time,
end_analysis=analysis_end_time,
)
ic_analysis.analyse()
self.targets = ic_analysis.analysis_results
print("Obtained targets are:")
print(self.targets)
def evaluate(self, candidates, args):
print("\n>>>>> Evaluating: ")
for cand in candidates:
print(">>>>> %s" % cand)
simulations_data = self.controller.run(candidates, self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis = analysis.IClampAnalysis(
samples,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time,
target_data_path=self.target_data_path,
)
try:
data_analysis.analyse()
except:
data_analysis.analysable_data = False
fitness_value = self.evaluate_fitness(
data_analysis,
self.targets,
self.weights,
cost_function=normalised_cost_function,
)
fitness.append(fitness_value)
print("Fitness: %s\n" % fitness_value)
return fitness
def evaluate_fitness(
self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function,
):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: IClampAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
# calculate max fitness value (TODO: there may be a more pythonic way to do this..)
worst_cumulative_fitness = 0
for target in target_dict.keys():
if target_weights is None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
worst_cumulative_fitness += target_weight
# if we have 1 or 0 peaks we won't conduct any analysis
if data_analysis.analysable_data is False:
print("Data is non-analysable")
return worst_cumulative_fitness
else:
fitness = 0
for target in target_dict.keys():
target_value = target_dict[target]
cost = "?"
if target_weights is None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
value = data_analysis.analysis_results[target]
# let function pick Q automatically
cost = cost_function(value, target_value)
inc = target_weight * cost
fitness += inc
if self.verbose:
print(
"Target %s (weight %s): target val: %s, actual: %s, cost: %s, fitness inc: %s"
% (target, target_weight, target_value, value, cost, inc)
)
return fitness
class NetworkEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(
self,
analysis_start_time,
controller,
analysis_end_time,
parameters,
analysis_var,
weights,
targets=None,
):
super(NetworkEvaluator, self).__init__(parameters, weights, targets, controller)
self.analysis_start_time = analysis_start_time
self.analysis_end_time = analysis_end_time
self.analysis_var = analysis_var
self.targets = targets
def evaluate(self, candidates, args):
print("\n>>>>> Evaluating: ")
for cand in candidates:
print(">>>>> %s" % cand)
simulations_data = self.controller.run(candidates, self.parameters)
fitness = []
for i in range(len(simulations_data)):
data = simulations_data[i]
candidate = candidates[i]
times = data[0]
volts = data[1]
data_analysis = analysis.NetworkAnalysis(
volts,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time,
)
print(
"- Evaluating %s from %s -> %s (data %s -> %s)"
% (
candidate,
self.analysis_start_time,
self.analysis_end_time,
times[0],
times[-1],
)
)
data_analysis.analyse(self.targets)
fitness_value = self.evaluate_fitness(
data_analysis,
self.targets,
self.weights,
cost_function=normalised_cost_function,
)
fitness.append(fitness_value)
print("Fitness: %s\n" % fitness_value)
return fitness
def evaluate_fitness(
self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function,
):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: NetworkAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
for target in target_dict.keys():
target_value = target_dict[target]
if target_weights is None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 0 # If it's not mentioned assunme weight = 0!
if target_weight > 0:
inc = target_weight # default...
cost = "?"
if target in data_analysis.analysis_results:
value = data_analysis.analysis_results[target]
if not math.isnan(value):
# let function pick Q automatically
cost = cost_function(value, target_value)
inc = target_weight * cost
else:
value = "<<infinite value!>>"
inc = target_weight
else:
# Check if any targets for the provided entity are included
# in the analysis results. If not, something is wrong.
valid = False
entity = target.split("/")[0].split(":")[0]
for atarget in data_analysis.analysis_results.keys():
if entity in atarget:
valid = True
break
if not valid:
raise RuntimeError("No target values for entity {} were found in the analysis results. Please check your target parameter strings.\nAll target parameters are: {}".format(entity, data_analysis.analysis_results.keys()))
value = (
"<<cannot be calculated! (only: %s; peak_threshold: %s)>>"
% (
data_analysis.analysis_results.keys(),
self.analysis_var["peak_threshold"],
)
)
fitness += inc
print(
"Target %s (weight %s): target val: %s, actual: %s, cost: %s, fitness inc: %s"
% (target, target_weight, target_value, value, cost, inc)
)
return fitness
'''
class IClampCondorEvaluator(IClampEvaluator):
"""
Evaluate simulations and return their fitness on a condor grid.
Tested and known to work on CamGrid
(http://www.escience.cam.ac.uk/projects/camgrid/)
WARNING:
this entire class should now be considered obsolete, the evaluator
is just an IClampEvaluator and everything here that is different
from that class needs to become its own controller
"""
def __init__(self,local_analysis=False):
super(IClampCondorEvaluator,self).__init__()
#other things like the number of nodes to divide the work onto and
#host connection parameters need to go into this constructor
if local_analysis:
self.evaluate=self.__local_evaluate
else:
self.evaluate=self.__remote_evaluate__
def __condor_evaluate(self,candidates,args):
"""
Run simulations on grid and analyse data locally
WARNING: (???I'm quite confused here...there is a mistake somewhere
as the name doesn't match the description - which method is which?)
Once each generation has finished, all data is pulled to local
workstation in form of sqlite databases (1 database per job)
and these are analysed and the fitness estimated sequentially
the fitness array is then returned.
"""
import time
import ssh_utils
self.CandidateData_list=[]
self.__build_condor_files(candidates) #Build submit and runx.sh files, exp_id now corresponds to position in chromosome and fitness arrays
messagehost=ssh_utils.host(optimizer_params.host,optimizer_params.username,optimizer_params.password,optimizer_params.port)
self.__delete_remote_files__(messagehost)#delete everything in thssh_utilse directory you're about to put files in
filelist=os.listdir(self.tmpdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.tmpdir,remotedir=optimizer_params.remotedir)#copy local files over
filelist=os.listdir(self.portableswdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.portableswdir,remotedir=optimizer_params.remotedir)#copy local files over
ssh_utils.issue_command(messagehost,'export PATH=/opt/Condor/release/bin:$PATH\ncondor_submit submitfile.submit')
self.jobdbnames=[]
for job_num in range(self.num_jobs): #make a list of the databases we need:
jobdbname='outputdb'+str(job_num)+'.sqlite'
self.jobdbnames.append(jobdbname)
#wait till you know file exists:
dbs_created=False
pulled_dbs=[] # list of databases which have been extracted from remote server
while (dbs_created==False):
print('waiting..')
time.sleep(20)
print('checking if dbs created:')
command='ls'
remote_filelist=ssh_utils.issue_command(messagehost, command)
for jobdbname in self.jobdbnames:
db_exists=jobdbname+'\n' in remote_filelist
if (db_exists==False):
print(jobdbname +' has not been generated')
dbs_created=False
elif db_exists==True and jobdbname not in pulled_dbs:
print(jobdbname +' has been generated')
remotefile=optimizer_params.remotedir+jobdbname
localpath=os.path.join(self.datadir,str(self.generation)+jobdbname)
ssh_utils.get_file(messagehost,remotefile,localpath)
pulled_dbs.append(jobdbname) #so that it is not extracted more than once
#here pop-in the fitness evaluation
if len(pulled_dbs)==len(self.jobdbnames):
dbs_created=True
fitness=[]
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname=str(self.generation)+'outputdb'+str(job_num)+'.sqlite'
dbpath=os.path.join(self.datadir,dbname)
exp_id=CandidateData.exp_id
connection=sqldbutils.db_connect(dbpath) #establish a database connection
query='SELECT numerical_value\
FROM output_params WHERE experiment_id=\
'+str(exp_id)+' AND parameter="fitness"'
exp_fitness=sqldbutils.execute_query(connection,query)
exp_fitness=exp_fitness.fetchall()
exp_fitness=exp_fitness[0][0]
#print('fitness: %s'%exp_fitness)
fitness.append(exp_fitness)
self.generation+=1
return fitness
def __local_evaluate(self,candidates,args):
import time
self.CandidateData_list=[]
analysis_var=self.analysis_var
#Build submitfile.submit and runx.sh files:
self.__build_condor_files(candidates) #exp_id now corresponds to position in chromosome/fitness array
fitness=[]
#submit the jobs to the grid
os.chdir(self.tmpdir)
os.system('condor_submit submitfile.submit')
#wait till you know file exists:
dbs_created=False
while (dbs_created==False):
print('checking if dbs created:')
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
db_exists=os.path.exists(jobdbpath)
if (db_exists==False):
time.sleep(60)
dbs_created=False
break
dbs_created=True
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname='/outputdb'+str(job_num)+'.sqlite'
dbpath=self.datadir+dbname
exp_id=CandidateData.exp_id
exp_data=sqldbutils.sim_data(dbpath,exp_id)
analysis=analysis.IClampAnalysis(exp_data.samples,exp_data.t,analysis_var,5000,10000)
exp_fitness=analysis.evaluate_fitness(optimizer_params.targets,optimizer_params.weights,cost_function=analysis.normalised_cost_function)
fitness.append(exp_fitness)
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
os.remove(jobdbpath)
return fitness
'''
class PointBasedAnalysis(object):
def __init__(self, v, t):
self.v = numpy.array(v)
self.t = numpy.array(t)
def analyse(self, targets):
analysis_results = {}
for target in targets:
target_time = float(target.split("_")[1])
i = 0
while self.t[i] < target_time:
value = self.v[i]
i += 1
analysis_results[target] = value
return analysis_results
class PointValueEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self, controller, parameters, weights, targets=None):
super(PointValueEvaluator, self).__init__(
parameters, weights, targets, controller
)
def evaluate(self, candidates, args):
print("\n>>>>> Evaluating: ")
for cand in candidates:
print(">>>>> %s" % cand)
simulations_data = self.controller.run(candidates, self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis = PointBasedAnalysis(samples, times)
fitness_value = self.evaluate_fitness(
data_analysis, self.targets, self.weights
)
fitness.append(fitness_value)
print("Fitness: %s\n" % fitness_value)
return fitness
def evaluate_fitness(
self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function,
):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: PointBasedAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
analysed = data_analysis.analyse(target_dict)
for target in target_dict.keys():
target_value = target_dict[target]
if target_weights is None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
# let function pick Q automatically
inc = target_weight * cost_function(analysed[target], target_value)
fitness += inc
print(
"Target %s (weight %s): target val: %s, actual: %s, fitness increment: %s"
% (target, target_weight, target_value, analysed[target], inc)
)
return fitness
|
{
"content_hash": "5360ade3f8be67903f9459747cbeb3e9",
"timestamp": "",
"source": "github",
"line_count": 878,
"max_line_length": 242,
"avg_line_length": 34.29726651480638,
"alnum_prop": 0.5579317902566998,
"repo_name": "NeuralEnsemble/neurotune",
"id": "2f8a01d699b25fc6740fe19cdcd7eb60af9d26ff",
"size": "30113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neurotune/evaluators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62856"
},
{
"name": "Shell",
"bytes": "665"
}
],
"symlink_target": ""
}
|
import os
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
include_dirs = ['src',
get_include(),
os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')]
config.add_extension("_nd_image",
sources=["src/nd_image.c",
"src/ni_filters.c",
"src/ni_fourier.c",
"src/ni_interpolation.c",
"src/ni_measure.c",
"src/ni_morphology.c",
"src/ni_splines.c",
"src/ni_support.c"],
include_dirs=include_dirs,
**numpy_nodepr_api)
# Cython wants the .c and .pyx to have the underscore.
config.add_extension("_ni_label",
sources=["src/_ni_label.c",],
include_dirs=['src']+[get_include()])
config.add_extension("_ctest",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
**numpy_nodepr_api)
_define_macros = [("OLDAPI", 1)]
if 'define_macros' in numpy_nodepr_api:
_define_macros.extend(numpy_nodepr_api['define_macros'])
config.add_extension("_ctest_oldapi",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
define_macros=_define_macros)
config.add_extension("_cytest",
sources=["src/_cytest.c"])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
{
"content_hash": "ad7921d5df21d51cd5d62e818f77bd3e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 81,
"avg_line_length": 32.01754385964912,
"alnum_prop": 0.5210958904109589,
"repo_name": "person142/scipy",
"id": "d59462951e163b6f6bf18fbbc65b804561e9632e",
"size": "1825",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scipy/ndimage/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "654192"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368529"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12769355"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
__author__ = 'Tara Crittenden'
# Displays the state of the game in a simple text format.
import Observer
from Message import *
class Display(Observer.Observer):
#Determine which method to display
def notify(self, msg):
if msg.msgtype == 1:
#start of a tournament
self.display_start_tournament(msg)
elif msg.msgtype == 2:
#end of a tournament
self.display_end_tournament(msg)
elif msg.msgtype == 3:
#start of a match
self.display_start_match(msg)
elif msg.msgtype == 4:
#end of a match
self.display_end_match(msg)
elif msg.msgtype == 5:
#start of a round
self.display_start_round(msg)
elif msg.msgtype == 6:
#end of a round
self.display_end_round(msg)
else:
print('Unknown message type')
#Provides easy readiablity
def indent_cushion(self):
for i in range(4):
print('+')
#Helper method for deconstructing the info portion of a end round message
#Returns the char representation of the move
def get_move(self, mademove):
"""
:param mademove: move that was made in int form
:return: move that was made in char form
"""
if mademove == 0:
return 'Rock'
elif mademove == 1:
return 'Paper'
elif mademove == 2:
return 'Scissors'
#Display the start of a tournament
def display_start_tournament(self, msg):
"""
:param msg: message to be displayed
"""
self.indent_cushion()
print(' Tournament Start! ')
self.indent_cushion()
m = Message.get_players(msg)
print('\nPlayers: ' + m)
#assuming for the time being that info will hold the specified game
m = Message.get_info(msg)
print('\nGame: ' + m)
#Display the end of a tournament
def display_end_tournament(self, msg):
"""
:param msg: message to be displayed
"""
self.indent_cushion()
print(' Tournament End! ')
self.indent_cushion()
#assuming for the time being that info will hold the winner of the tournament
m = Message.get_info(msg)
print('\nWinner: ' + m)
self.indent_cushion()
self.indent_cushion()
print('\n')
self.indent_cushion()
self.indent_cushion()
#Display the start of a match
def display_start_match(self, msg):
"""
:param msg: message to be displayed
"""
self.indent_cushion()
print(' Match Start! ')
self.indent_cushion()
players = Message.get_players(msg)
print('\nPlayers: ')
for player in players:
print(player.get_name())
#Display the end of a match
def display_end_match(self, msg):
"""
:param msg: message to be displayed
"""
self.indent_cushion()
print ("Match end!")
# TODO fix
"""
self.indent_cushion()
print(' Match End! ')
self.indent_cushion()
m = Message.get_info(msg)
#r is the winner
#winnings is the number of times that r won
if m[1] > m[2]:
#player 1 won
r = 'Player 1 '
winnings = m[1]
else:
#player 2 won
r = 'Player 2 '
winnings = m[2]
print('Winner: ' + r + '( ' + winnings + ' out of ' + (m[1] + m[2]) + ')')
#
"""
#Display the start of a round
def display_start_round(self, msg):
"""
:param msg: message to be displayed
"""
pass
#Display the end of a round
def display_end_round(self, msg):
"""
:param msg: message to be displayed
"""
print('\nRound Results: ')
m = Message.get_info(msg)
#r is the winner of the round
if m[1] == (0,0):
r = 'Tied'
elif m[1] == (1,0):
#player 1 won
r = 'Player 1 '
#elif m[1] == (0,1):
else:
#player 2 won
r = 'Player 2 '
print('Winner: ' + r)
#find the moves that were played during this round
moves = m[0]
a = self.get_move(moves[0])
b = self.get_move(moves[1])
print(' Moves made: Player 1: ' + a + ' Player 2: ' + b)
|
{
"content_hash": "b17daa61a6021472938964e2311577f2",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 85,
"avg_line_length": 27.51219512195122,
"alnum_prop": 0.5152925531914894,
"repo_name": "PaulieC/RPSPlayer",
"id": "e35db8f9e3bd499f88bc4b5d8af2081ef0fb6613",
"size": "4512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Display.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39259"
}
],
"symlink_target": ""
}
|
"""
wandb integration tensorboard module.
"""
from .log import _log, log, reset_state, tf_summary_to_dict # noqa: F401
from .monkeypatch import patch, unpatch
__all__ = [
"patch",
"unpatch",
"log",
]
|
{
"content_hash": "cf8248438d1905e257e4b85d334bf497",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 17.916666666666668,
"alnum_prop": 0.6325581395348837,
"repo_name": "wandb/client",
"id": "265a6e3bb55543f6170fcdad236367a3b3b2002f",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/integration/tensorboard/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
import functools
import logging
from django import forms
from django.conf import settings
from django.utils.translation import ugettext as _
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from olympia import amo
from olympia.access import acl
from olympia.addons.models import Addon
from olympia.amo.decorators import use_master
from olympia.api.authentication import JWTKeyAuthentication
from olympia.devhub.views import handle_upload
from olympia.files.models import FileUpload
from olympia.files.utils import parse_addon
from olympia.versions import views as version_views
from olympia.versions.models import Version
from olympia.signing.serializers import FileUploadSerializer
log = logging.getLogger('signing')
def handle_read_only_mode(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if settings.READ_ONLY:
return Response(
{'error': _("Some features are temporarily disabled while we "
"perform website maintenance. We'll be back to "
"full capacity shortly.")},
status=503)
else:
return fn(*args, **kwargs)
return inner
def with_addon(allow_missing=False):
"""Call the view function with an addon instead of a guid. This will try
find an addon with the guid and verify the user's permissions. If the
add-on is not found it will 404 when allow_missing is False otherwise it
will call the view with addon set to None."""
def wrapper(fn):
@functools.wraps(fn)
def inner(view, request, **kwargs):
guid = kwargs.get('guid', None)
try:
if guid is None:
raise Addon.DoesNotExist('No GUID')
addon = Addon.unfiltered.get(guid=guid)
except Addon.DoesNotExist:
if allow_missing:
addon = None
else:
return Response({'error': _('Could not find add-on with '
'id "{}".').format(guid)},
status=status.HTTP_404_NOT_FOUND)
# Call the view if there is no add-on, the current user is an
# auther of the add-on or the current user is an admin and the
# request is a GET.
has_perm = (
addon is None or
(addon.has_author(request.user) or
(request.method == 'GET' and
acl.action_allowed_user(
request.user, 'Addons', 'Edit'))))
if has_perm:
return fn(view, request, addon=addon, **kwargs)
else:
return Response(
{'error': _('You do not own this addon.')},
status=status.HTTP_403_FORBIDDEN)
return inner
return wrapper
class VersionView(APIView):
authentication_classes = [JWTKeyAuthentication]
permission_classes = [IsAuthenticated]
@handle_read_only_mode
def post(self, request, *args, **kwargs):
version_string = request.data.get('version', None)
try:
file_upload, _ = self.handle_upload(request, None, version_string)
except forms.ValidationError as exc:
return Response(
{'error': exc.message},
status=exc.code or status.HTTP_400_BAD_REQUEST)
return Response(FileUploadSerializer(file_upload).data,
status=status.HTTP_201_CREATED)
@handle_read_only_mode
@with_addon(allow_missing=True)
def put(self, request, addon, version_string, guid=None):
try:
file_upload, created = self.handle_upload(
request, addon, version_string, guid=guid)
except forms.ValidationError as exc:
return Response(
{'error': exc.message},
status=exc.code or status.HTTP_400_BAD_REQUEST)
status_code = (
status.HTTP_201_CREATED if created else status.HTTP_202_ACCEPTED)
return Response(FileUploadSerializer(file_upload).data,
status=status_code)
def handle_upload(self, request, addon, version_string, guid=None):
if 'upload' in request.FILES:
filedata = request.FILES['upload']
else:
raise forms.ValidationError(
_(u'Missing "upload" key in multipart file data.'),
status.HTTP_400_BAD_REQUEST)
# Parse the file to get and validate package data with the addon.
pkg = parse_addon(filedata, addon)
if not acl.submission_allowed(request.user, pkg):
raise forms.ValidationError(
_(u'You cannot submit this type of add-on'),
status.HTTP_400_BAD_REQUEST)
if addon is not None and addon.status == amo.STATUS_DISABLED:
raise forms.ValidationError(
_('You cannot add versions to an addon that has status: %s.' %
amo.STATUS_CHOICES_ADDON[amo.STATUS_DISABLED]),
status.HTTP_400_BAD_REQUEST)
version_string = version_string or pkg['version']
if version_string and pkg['version'] != version_string:
raise forms.ValidationError(
_('Version does not match the manifest file.'),
status.HTTP_400_BAD_REQUEST)
if (addon is not None and
addon.versions.filter(version=version_string).exists()):
raise forms.ValidationError(
_('Version already exists.'),
status.HTTP_409_CONFLICT)
package_guid = pkg.get('guid', None)
dont_allow_no_guid = (
not addon and not package_guid and
not pkg.get('is_webextension', False))
if dont_allow_no_guid:
raise forms.ValidationError(
_('Only WebExtensions are allowed to omit the GUID'),
status.HTTP_400_BAD_REQUEST)
if guid is not None and not addon and not package_guid:
# No guid was present in the package, but one was provided in the
# URL, so we take it instead of generating one ourselves. But
# first, validate it properly.
if not amo.ADDON_GUID_PATTERN.match(guid):
raise forms.ValidationError(
_('Invalid GUID in URL'), status.HTTP_400_BAD_REQUEST)
pkg['guid'] = guid
# channel will be ignored for new addons.
if addon is None:
channel = amo.RELEASE_CHANNEL_UNLISTED # New is always unlisted.
addon = Addon.create_addon_from_upload_data(
data=pkg, user=request.user, upload=filedata, channel=channel)
created = True
else:
created = False
channel_param = request.POST.get('channel')
channel = amo.CHANNEL_CHOICES_LOOKUP.get(channel_param)
if not channel:
last_version = (
addon.find_latest_version_including_rejected(None))
if last_version:
channel = last_version.channel
else:
channel = amo.RELEASE_CHANNEL_UNLISTED # Treat as new.
will_have_listed = channel == amo.RELEASE_CHANNEL_LISTED
if not addon.has_complete_metadata(
has_listed_versions=will_have_listed):
raise forms.ValidationError(
_('You cannot add a listed version to this addon '
'via the API due to missing metadata. '
'Please submit via the website'),
status.HTTP_400_BAD_REQUEST)
file_upload = handle_upload(
filedata=filedata, user=request.user, addon=addon, submit=True,
channel=channel)
return file_upload, created
@use_master
@with_addon()
def get(self, request, addon, version_string, uuid=None, guid=None):
file_upload_qs = FileUpload.objects.filter(
addon=addon, version=version_string)
try:
if uuid is None:
file_upload = file_upload_qs.latest()
log.info('getting latest upload for {addon} {version}: '
'{file_upload.uuid}'.format(
addon=addon, version=version_string,
file_upload=file_upload))
else:
file_upload = file_upload_qs.get(uuid=uuid)
log.info('getting specific upload for {addon} {version} '
'{uuid}: {file_upload.uuid}'.format(
addon=addon, version=version_string, uuid=uuid,
file_upload=file_upload))
except FileUpload.DoesNotExist:
return Response(
{'error': _('No uploaded file for that addon and version.')},
status=status.HTTP_404_NOT_FOUND)
try:
version = addon.versions.filter(version=version_string).latest()
except Version.DoesNotExist:
version = None
serializer = FileUploadSerializer(file_upload, version=version)
return Response(serializer.data)
class SignedFile(APIView):
authentication_classes = [JWTKeyAuthentication]
permission_classes = [IsAuthenticated]
@use_master
def get(self, request, file_id):
return version_views.download_file(request, file_id)
|
{
"content_hash": "5c35cb1e3855a70213f511e852865391",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 39.732510288065846,
"alnum_prop": 0.5813568099430347,
"repo_name": "mstriemer/addons-server",
"id": "3299829133a36ec61af515fa2ba90b72f3e155fb",
"size": "9655",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/signing/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "834050"
},
{
"name": "HTML",
"bytes": "729865"
},
{
"name": "JavaScript",
"bytes": "1324474"
},
{
"name": "Makefile",
"bytes": "7937"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4348593"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "9643"
},
{
"name": "Smarty",
"bytes": "1824"
}
],
"symlink_target": ""
}
|
import sys, random
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import Qt
class Example (QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 280, 170)
self.setWindowTitle('Points')
self.show()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawPoints(qp)
qp.end()
def drawPoints(self, qp):
qp.setPen(Qt.blue)
size= self.size()
for i in range(1000):
x = random.randint(1, size.width() -1 )
y = random.randint(1, size.height() -1)
qp.drawPoint(x, y)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex= Example()
sys.exit(app.exec_())
|
{
"content_hash": "bf7173c047ef8bc1bd2abc57671fca39",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 51,
"avg_line_length": 26.375,
"alnum_prop": 0.5675355450236966,
"repo_name": "potato16/pythonl",
"id": "c29506f372bb111830c70d1cea7eac523c891987",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dirtylook/drawpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3381"
},
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "80892"
}
],
"symlink_target": ""
}
|
class BaseKVMComponent:
def __init__(self, controller):
self.controller = controller
@property
def is_created(self):
return True
@property
def is_started(self):
return True
def create(self):
return NotImplementedError()
def start(self):
return NotImplementedError()
def delete(self):
return NotImplementedError()
def stop(self):
return NotImplementedError()
def to_xml(self):
raise NotImplementedError()
@classmethod
def from_xml(cls, controller, xml):
raise NotImplementedError()
@classmethod
def get_by_name(cls, controller, name):
raise NotImplementedError()
|
{
"content_hash": "d39ae47af0483b29c05fe5c6da0cb6d0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 43,
"avg_line_length": 20.228571428571428,
"alnum_prop": 0.6271186440677966,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "00b060ca90f6f51568bc7fdce50bb80cd4c8cce1",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/sal/kvm/BaseKVMComponent.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
import functools
import operator
import numpy as np
from scipy.linalg import (get_lapack_funcs, LinAlgError,
cholesky_banded, cho_solve_banded)
from . import _bspl
from . import _fitpack_impl
from . import _fitpack as _dierckx
__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline"]
# copy-paste from interpolate.py
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def _get_dtype(dtype):
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
if np.issubdtype(dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _as_float_array(x, check_finite=False):
"""Convert the input into a C contiguous float array.
NB: Upcasts half- and single-precision floats to double precision.
"""
x = np.ascontiguousarray(x)
dtyp = _get_dtype(x.dtype)
x = x.astype(dtyp, copy=False)
if check_finite and not np.isfinite(x).all():
raise ValueError("Array must not contain infs or nans.")
return x
class BSpline(object):
r"""Univariate spline in the B-spline basis.
.. math::
S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
and knots `t`.
Parameters
----------
t : ndarray, shape (n+k+1,)
knots
c : ndarray, shape (>=n, ...)
spline coefficients
k : int
B-spline order
extrapolate : bool, optional
whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
or to return nans.
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
t : ndarray
knot vector
c : ndarray
spline coefficients
k : int
spline degree
extrapolate : bool
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
axis : int
Interpolation axis.
tck : tuple
A read-only equivalent of ``(self.t, self.c, self.k)``
Methods
-------
__call__
basis_element
derivative
antiderivative
integrate
construct_fast
Notes
-----
B-spline basis elements are defined via
.. math::
B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
**Implementation details**
- At least ``k+1`` coefficients are required for a spline of degree `k`,
so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
``j > n``, are ignored.
- B-spline basis elements of degree `k` form a partition of unity on the
*base interval*, ``t[k] <= x <= t[n]``.
Examples
--------
Translating the recursive definition of B-splines into Python code, we have:
>>> def B(x, k, i, t):
... if k == 0:
... return 1.0 if t[i] <= x < t[i+1] else 0.0
... if t[i+k] == t[i]:
... c1 = 0.0
... else:
... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
... if t[i+k+1] == t[i+1]:
... c2 = 0.0
... else:
... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
... return c1 + c2
>>> def bspline(x, t, c, k):
... n = len(t) - k - 1
... assert (n >= k+1) and (len(c) >= n)
... return sum(c[i] * B(x, k, i, t) for i in range(n))
Note that this is an inefficient (if straightforward) way to
evaluate B-splines --- this spline class does it in an equivalent,
but much more efficient way.
Here we construct a quadratic spline function on the base interval
``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
>>> from scipy.interpolate import BSpline
>>> k = 2
>>> t = [0, 1, 2, 3, 4, 5, 6]
>>> c = [-1, 2, 0, -1]
>>> spl = BSpline(t, c, k)
>>> spl(2.5)
array(1.375)
>>> bspline(2.5, t, c, k)
1.375
Note that outside of the base interval results differ. This is because
`BSpline` extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> xx = np.linspace(1.5, 4.5, 50)
>>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
>>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
>>> ax.grid(True)
>>> ax.legend(loc='best')
>>> plt.show()
References
----------
.. [1] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
.. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
"""
def __init__(self, t, c, k, extrapolate=True, axis=0):
super(BSpline, self).__init__()
self.k = int(k)
self.c = np.asarray(c)
self.t = np.ascontiguousarray(t, dtype=np.float64)
self.extrapolate = bool(extrapolate)
n = self.t.shape[0] - self.k - 1
if not (0 <= axis < self.c.ndim):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (n, ...),
# and axis !=0 means that we have c.shape (..., n, ...)
# ^
# axis
self.c = np.rollaxis(self.c, axis)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if int(k) != k:
raise ValueError("Spline order must be integer.")
if self.t.ndim != 1:
raise ValueError("Knot vector must be one-dimensional.")
if n < self.k + 1:
raise ValueError("Need at least %d knots for degree %d" %
(2*k + 2, k))
if (np.diff(self.t) < 0).any():
raise ValueError("Knots must be in a non-decreasing order.")
if len(np.unique(self.t[k:n+1])) < 2:
raise ValueError("Need at least two internal knots.")
if not np.isfinite(self.t).all():
raise ValueError("Knots should not have nans or infs.")
if self.c.ndim < 1:
raise ValueError("Coefficients must be at least 1-dimensional.")
if self.c.shape[0] < n:
raise ValueError("Knots, coefficients and degree are inconsistent.")
dt = _get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dt)
@classmethod
def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
"""Construct a spline without making checks.
Accepts same parameters as the regular constructor. Input arrays
`t` and `c` must of correct shape and dtype.
"""
self = object.__new__(cls)
self.t, self.c, self.k = t, c, k
self.extrapolate = extrapolate
self.axis = axis
return self
@property
def tck(self):
"""Equvalent to ``(self.t, self.c, self.k)`` (read-only).
"""
return self.t, self.c, self.k
@classmethod
def basis_element(cls, t, extrapolate=True):
"""Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
Parameters
----------
t : ndarray, shape (k+1,)
internal knots
extrapolate : bool, optional
whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
or to return nans. Default is True.
Returns
-------
basis_element : callable
A callable representing a B-spline basis element for the knot
vector `t`.
Notes
-----
The order of the b-spline, `k`, is inferred from the length of `t` as
``len(t)-2``. The knot vector is constructed by appending and prepending
``k+1`` elements to internal knots `t`.
Examples
--------
Construct a cubic b-spline:
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2, 3, 4])
>>> k = b.k
>>> b.t[k:-k]
array([ 0., 1., 2., 3., 4.])
>>> k
3
Construct a second order b-spline on ``[0, 1, 1, 2]``, and compare
to its explicit form:
>>> t = [-1, 0, 1, 1, 2]
>>> b = BSpline.basis_element(t[1:])
>>> def f(x):
... return np.where(x < 1, x*x, (2. - x)**2)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0, 2, 51)
>>> ax.plot(x, b(x), 'g', lw=3)
>>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
>>> ax.grid(True)
>>> plt.show()
"""
k = len(t) - 2
t = _as_float_array(t)
t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
c = np.zeros_like(t)
c[k] = 1.
return cls.construct_fast(t, c, k, extrapolate)
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate a spline function.
Parameters
----------
x : array_like
points to evaluate the spline at.
nu: int, optional
derivative to evaluate (default is 0).
extrapolate : bool, optional
whether to extrapolate based on the first and last intervals
or return nans. Default is `self.extrapolate`.
Returns
-------
y : array_like
Shape is determined by replacing the interpolation axis
in the coefficient array with the shape of `x`.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[1:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
def _evaluate(self, xp, nu, extrapolate, out):
_bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
self.k, xp, nu, extrapolate, out)
def _ensure_c_contiguous(self):
"""
c and t may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.t.flags.c_contiguous:
self.t = self.t.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def derivative(self, nu=1):
"""Return a b-spline representing the derivative.
Parameters
----------
nu : int, optional
Derivative order.
Default is 1.
Returns
-------
b : BSpline object
A new instance representing the derivative.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
tck = _fitpack_impl.splder((self.t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def antiderivative(self, nu=1):
"""Return a b-spline representing the antiderivative.
Parameters
----------
nu : int, optional
Antiderivative order. Default is 1.
Returns
-------
b : BSpline object
A new instance representing the antiderivative.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""Compute a definite integral of the spline.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
extrapolate : bool, optional
whether to extrapolate beyond the base interval, ``t[k] .. t[-k-1]``,
or take the spline to be zero outside of the base interval.
Default is True.
Returns
-------
I : array_like
Definite integral of the spline over the interval ``[a, b]``.
Examples
--------
Construct the linear spline ``x if x < 1 else 2 - x`` on the base
interval :math:`[0, 2]`, and integrate it
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2])
>>> b.integrate(0, 1)
array(0.5)
If the integration limits are outside of the base interval, the result
is controlled by the `extrapolate` parameter
>>> b.integrate(-1, 1)
array(0.0)
>>> b.integrate(-1, 1, extrapolate=False)
array(0.5)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.grid(True)
>>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
>>> ax.axvline(2, c='r', lw=5, alpha=0.5)
>>> xx = [-1, 1, 2]
>>> ax.plot(xx, b(xx))
>>> plt.show()
"""
if extrapolate is None:
extrapolate = self.extrapolate
if not extrapolate:
# shrink the integration interval, if needed
a = max(a, self.t[self.k])
b = min(b, self.t[-self.k - 1])
if self.c.ndim == 1:
# fast path: use FITPACK's routine (cf _fitpack_impl.splint)
t, c, k = self.tck
aint, wrk = _dierckx._splint(t, c, k, a, b)
return aint
# prepare t & c
self._ensure_c_contiguous()
# compute the antiderivative
c = self.c
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
t, c, k = _fitpack_impl.splantider((self.t, c, self.k), 1)
# evaluate the diff of antiderivatives
x = np.asarray([a, b], dtype=np.float_)
out = np.empty((2, prod(c.shape[1:])), dtype=c.dtype)
_bspl.evaluate_spline(t, c.reshape(c.shape[0], -1),
k, x, 0, extrapolate, out)
out = out[1] - out[0]
return out.reshape(c.shape[1:])
#################################
# Interpolating spline helpers #
#################################
def _not_a_knot(x, k):
"""Given data x, construct the knot vector w/ not-a-knot BC.
cf de Boor, XIII(12)."""
x = np.asarray(x)
if k % 2 != 1:
raise ValueError("Odd degree for now only. Got %s." % k)
m = (k - 1) // 2
t = x[m+1:-m-1]
t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
return t
def _augknt(x, k):
"""Construct a knot vector appropriate for the order-k interpolation."""
return np.r_[(x[0],)*k, x, (x[-1],)*k]
def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
check_finite=True):
"""Compute the (coefficients of) interpolating B-spline.
Parameters
----------
x : array_like, shape (n,)
Abscissas.
y : array_like, shape (n, ...)
Ordinates.
k : int, optional
B-spline degree. Default is cubic, k=3.
t : array_like, shape (nt + k + 1,), optional.
Knots.
The number of knots needs to agree with the number of datapoints and
the number of derivatives at the edges. Specifically, ``nt - n`` must
equal ``len(deriv_l) + len(deriv_r)``.
bc_type : 2-tuple or None
Boundary conditions.
Default is None, which means choosing the boundary conditions
automatically. Otherwise, it must be a length-two tuple where the first
element sets the boundary conditions at ``x[0]`` and the second
element sets the boundary conditions at ``x[-1]``. Each of these must
be an iterable of pairs ``(order, value)`` which gives the values of
derivatives of specified orders at the given edge of the interpolation
interval.
axis : int, optional
Interpolation axis. Default is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
Returns
-------
b : a BSpline object of the degree ``k`` and with knots ``t``.
Examples
--------
Use cubic interpolation on Chebyshev nodes:
>>> def cheb_nodes(N):
... jj = 2.*np.arange(N) + 1
... x = np.cos(np.pi * jj / 2 / N)[::-1]
... return x
>>> x = cheb_nodes(20)
>>> y = np.sqrt(1 - x**2)
>>> from scipy.interpolate import BSpline, make_interp_spline
>>> b = make_interp_spline(x, y)
>>> np.allclose(b(x), y)
True
Note that the default is a cubic spline with a not-a-knot boundary condition
>>> b.k
3
Here we use a 'natural' spline, with zero 2nd derivatives at edges:
>>> l, r = [(2, 0)], [(2, 0)]
>>> b_n = make_interp_spline(x, y, bc_type=(l, r))
>>> np.allclose(b_n(x), y)
True
>>> x0, x1 = x[0], x[-1]
>>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
True
Interpolation of parametric curves is also supported. As an example, we
compute a discretization of a snail curve in polar coordinates
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.3 + np.cos(phi)
>>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
Build an interpolating curve, parameterizing it by the angle
>>> from scipy.interpolate import make_interp_spline
>>> spl = make_interp_spline(phi, np.c_[x, y])
Evaluate the interpolant on a finer grid (note that we transpose the result
to unpack it into a pair of x- and y-arrays)
>>> phi_new = np.linspace(0, 2.*np.pi, 100)
>>> x_new, y_new = spl(phi_new).T
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
>>> plt.plot(x_new, y_new, '-')
>>> plt.show()
See Also
--------
BSpline : base class representing the B-spline objects
CubicSpline : a cubic spline in the polynomial basis
make_lsq_spline : a similar factory function for spline fitting
UnivariateSpline : a wrapper over FITPACK spline fitting routines
splrep : a wrapper over FITPACK spline fitting routines
"""
if bc_type is None:
bc_type = (None, None)
deriv_l, deriv_r = bc_type
# special-case k=0 right away
if k == 0:
if any(_ is not None for _ in (t, deriv_l, deriv_r)):
raise ValueError("Too much info for k=0: t and bc_type can only "
"be None.")
x = _as_float_array(x, check_finite)
t = np.r_[x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
return BSpline.construct_fast(t, c, k, axis=axis)
# special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
if k == 1 and t is None:
if not (deriv_l is None and deriv_r is None):
raise ValueError("Too much info for k=1: bc_type can only be None.")
x = _as_float_array(x, check_finite)
t = np.r_[x[0], x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
return BSpline.construct_fast(t, c, k, axis=axis)
# come up with a sensible knot vector, if needed
if t is None:
if deriv_l is None and deriv_r is None:
if k == 2:
# OK, it's a bit ad hoc: Greville sites + omit
# 2nd and 2nd-to-last points, a la not-a-knot
t = (x[1:] + x[:-1]) / 2.
t = np.r_[(x[0],)*(k+1),
t[1:-1],
(x[-1],)*(k+1)]
else:
t = _not_a_knot(x, k)
else:
t = _augknt(x, k)
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
t = _as_float_array(t, check_finite)
k = int(k)
axis = axis % y.ndim
y = np.rollaxis(y, axis) # now internally interp axis is zero
if x.ndim != 1 or np.any(x[1:] <= x[:-1]):
raise ValueError("Expect x to be a 1-D sorted array_like.")
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError("Expect t to be a 1-D sorted array_like.")
if x.size != y.shape[0]:
raise ValueError('x and y are incompatible.')
if t.size < x.size + k + 1:
raise ValueError('Got %d knots, need at least %d.' %
(t.size, x.size + k + 1))
if (x[0] < t[k]) or (x[-1] > t[-k]):
raise ValueError('Out of bounds w/ x = %s.' % x)
# Here : deriv_l, r = [(nu, value), ...]
if deriv_l is not None:
deriv_l_ords, deriv_l_vals = zip(*deriv_l)
else:
deriv_l_ords, deriv_l_vals = [], []
deriv_l_ords, deriv_l_vals = np.atleast_1d(deriv_l_ords, deriv_l_vals)
nleft = deriv_l_ords.shape[0]
if deriv_r is not None:
deriv_r_ords, deriv_r_vals = zip(*deriv_r)
else:
deriv_r_ords, deriv_r_vals = [], []
deriv_r_ords, deriv_r_vals = np.atleast_1d(deriv_r_ords, deriv_r_vals)
nright = deriv_r_ords.shape[0]
# have `n` conditions for `nt` coefficients; need nt-n derivatives
n = x.size
nt = t.size - k - 1
if nt - n != nleft + nright:
raise ValueError("number of derivatives at boundaries.")
# set up the LHS: the collocation matrix + derivatives at boundaries
kl = ku = k
ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F')
_bspl._colloc(x, t, k, ab, offset=nleft)
if nleft > 0:
_bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)
if nright > 0:
_bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,
offset=nt-nright)
# set up the RHS: values to interpolate (+ derivative values, if any)
extradim = prod(y.shape[1:])
rhs = np.empty((nt, extradim), dtype=y.dtype)
if nleft > 0:
rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
rhs[nleft:nt - nright] = y.reshape(-1, extradim)
if nright > 0:
rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
# solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
if check_finite:
ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
lu, piv, c, info = gbsv(kl, ku, ab, rhs,
overwrite_ab=True, overwrite_b=True)
if info > 0:
raise LinAlgError("Collocation matix is singular.")
elif info < 0:
raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
return BSpline.construct_fast(t, c, k, axis=axis)
def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
r"""Compute the (coefficients of) an LSQ B-spline.
The result is a linear combination
.. math::
S(x) = \sum_j c_j B_j(x; t)
of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
.. math::
\sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
Parameters
----------
x : array_like, shape (m,)
Abscissas.
y : array_like, shape (m, ...)
Ordinates.
t : array_like, shape (n + k + 1,).
Knots.
Knots and data points must satisfy Schoenberg-Whitney conditions.
k : int, optional
B-spline degree. Default is cubic, k=3.
w : array_like, shape (n,), optional
Weights for spline fitting. Must be positive. If ``None``,
then weights are all equal.
Default is ``None``.
axis : int, optional
Interpolation axis. Default is zero.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
Returns
-------
b : a BSpline object of the degree `k` with knots `t`.
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
Generate some noisy data:
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Now fit a smoothing cubic spline with a pre-defined internal knots.
Here we make the knot vector (k+1)-regular by adding boundary knots:
>>> from scipy.interpolate import make_lsq_spline, BSpline
>>> t = [-1, 0, 1]
>>> k = 3
>>> t = np.r_[(x[0],)*(k+1),
... t,
... (x[-1],)*(k+1)]
>>> spl = make_lsq_spline(x, y, t, k)
For comparison, we also construct an interpolating spline for the same
set of data:
>>> from scipy.interpolate import make_interp_spline
>>> spl_i = make_interp_spline(x, y)
Plot both:
>>> import matplotlib.pyplot as plt
>>> xs = np.linspace(-3, 3, 100)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
>>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
>>> plt.legend(loc='best')
>>> plt.show()
**NaN handling**: If the input arrays contain ``nan`` values, the result is
not useful since the underlying spline fitting routines cannot deal with
``nan``. A workaround is to use zero weights for not-a-number data points:
>>> y[8] = np.nan
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> tck = make_lsq_spline(x, y, t, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
See Also
--------
BSpline : base class representing the B-spline objects
make_interp_spline : a similar factory function for interpolating splines
LSQUnivariateSpline : a FITPACK-based spline fitting routine
splrep : a FITPACK-based fitting routine
"""
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
t = _as_float_array(t, check_finite)
if w is not None:
w = _as_float_array(w, check_finite)
else:
w = np.ones_like(x)
k = int(k)
axis = axis % y.ndim
y = np.rollaxis(y, axis) # now internally interp axis is zero
if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
raise ValueError("Expect x to be a 1-D sorted array_like.")
if x.shape[0] < k+1:
raise ValueError("Need more x points.")
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
raise ValueError("Expect t to be a 1-D sorted array_like.")
if x.size != y.shape[0]:
raise ValueError('x & y are incompatible.')
if k > 0 and np.any((x < t[k]) | (x > t[-k])):
raise ValueError('Out of bounds w/ x = %s.' % x)
if x.size != w.size:
raise ValueError('Incompatible weights.')
# number of coefficients
n = t.size - k - 1
# construct A.T @ A and rhs with A the collocation matrix, and
# rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
lower = True
extradim = prod(y.shape[1:])
ab = np.zeros((k+1, n), dtype=np.float_, order='F')
rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
_bspl._norm_eq_lsq(x, t, k,
y.reshape(-1, extradim),
w,
ab, rhs)
rhs = rhs.reshape((n,) + y.shape[1:])
# have observation matrix & rhs, can solve the LSQ problem
cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
check_finite=check_finite)
c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
check_finite=check_finite)
c = np.ascontiguousarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
|
{
"content_hash": "e77b0c5ece7574d6ef549d883fc39983",
"timestamp": "",
"source": "github",
"line_count": 892,
"max_line_length": 90,
"avg_line_length": 33.06502242152467,
"alnum_prop": 0.5445853393910626,
"repo_name": "apbard/scipy",
"id": "f3690474428272157ccdc3b72c7443a4b2082cc9",
"size": "29494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/interpolate/_bsplines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4188494"
},
{
"name": "C++",
"bytes": "497823"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11553222"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FindPlacesByName(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FindPlacesByName Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FindPlacesByName, self).__init__(temboo_session, '/Library/Factual/FindPlacesByName')
def new_input_set(self):
return FindPlacesByNameInputSet()
def _make_result_set(self, result, path):
return FindPlacesByNameResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FindPlacesByNameChoreographyExecution(session, exec_id, path)
class FindPlacesByNameInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FindPlacesByName
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((optional, string) The API Key provided by Factual (AKA the OAuth Consumer Key).)
"""
super(FindPlacesByNameInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((optional, string) The API Secret provided by Factual (AKA the OAuth Consumer Secret).)
"""
super(FindPlacesByNameInputSet, self)._set_input('APISecret', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) A search string (i.e. Starbucks))
"""
super(FindPlacesByNameInputSet, self)._set_input('Query', value)
class FindPlacesByNameResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FindPlacesByName Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Factual.)
"""
return self._output.get('Response', None)
class FindPlacesByNameChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FindPlacesByNameResultSet(response, path)
|
{
"content_hash": "ea46b5bdf118337101b388c2db487c71",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 150,
"avg_line_length": 39.878787878787875,
"alnum_prop": 0.7006079027355623,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "4a8cda081561937908babf4ce0842faa56f52e9a",
"size": "3480",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Factual/FindPlacesByName.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
{
"content_hash": "0e72ca62fc5580f253e8aabd2b988950",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 28.515151515151516,
"alnum_prop": 0.6179596174282678,
"repo_name": "searchs/bigdatabox",
"id": "b23df1b773c5176b3d7340bf15cd6bdf4f368660",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "movies_avg_etl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HiveQL",
"bytes": "1277"
},
{
"name": "Jupyter Notebook",
"bytes": "2101868"
},
{
"name": "Python",
"bytes": "13876"
},
{
"name": "Scala",
"bytes": "4940"
},
{
"name": "Shell",
"bytes": "11703"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xmpp2'
copyright = u'2010, Alex Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'xmpp2doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'xmpp2.tex', u'xmpp2 Documentation',
u'Alex Lee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "d36375a3be6bf83a6c21fc5374fdda73",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 121,
"avg_line_length": 32.93370165745856,
"alnum_prop": 0.7087736956886428,
"repo_name": "easies/xmpp2",
"id": "e5a81c91ff3a5c1cbcc26f2d5fb90150c0167abd",
"size": "6377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41925"
}
],
"symlink_target": ""
}
|
from traitsui.wx.file_editor import *
|
{
"content_hash": "ab121e20c8bbfc07a56a240df82f534b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.7894736842105263,
"repo_name": "enthought/etsproxy",
"id": "eaf2ee268fbb86043b7b26293e4e2c1c6fead116",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/traits/ui/wx/file_editor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['LSTM'] );
|
{
"content_hash": "a31be3545d1057bcf243dba0cdb34875",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 94,
"avg_line_length": 41.75,
"alnum_prop": 0.7245508982035929,
"repo_name": "antoinecarme/pyaf",
"id": "105405fac004860a80ccdea01b3a036c96305714",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_ConstantTrend_Seasonal_MonthOfYear_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x013E00
_REQUEST_MESSAGE_TYPE = 81408
# hex: 0x013E01
_RESPONSE_MESSAGE_TYPE = 81409
_REQUEST_INITIAL_FRAME_SIZE = REQUEST_HEADER_SIZE
def encode_request(name, predicate):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
StringCodec.encode(buf, name)
DataCodec.encode(buf, predicate, True)
return OutboundMessage(buf, False)
|
{
"content_hash": "2b2ea74099d971ef59ebdc18abda4df4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 105,
"avg_line_length": 34.705882352941174,
"alnum_prop": 0.7864406779661017,
"repo_name": "hazelcast/hazelcast-python-client",
"id": "2df01d2b074ccefe7ee8b89da40526ad11f8f7bb",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hazelcast/protocol/codec/map_remove_all_codec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2300326"
},
{
"name": "Shell",
"bytes": "1900"
}
],
"symlink_target": ""
}
|
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
}
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if they're relevant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes and %(num_cgs)s consistency groups '
'from host %(host)s have been included in cluster '
'%(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster})
def init_host(self, added_to_cluster=None):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = objects.SnapshotList.get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt, volume,
cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume, cascade=True)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
@coordination.synchronized('{volume.id}-{f_name}')
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
else:
attachments = None
try:
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
model_update = self._create_group_generic(context,
group)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
# self.host is 'host@backend'
# vol_obj.host is 'host@backend#pool'
# Extract host before doing comparison
if vol_obj.host:
new_host = vol_utils.extract_host(vol_obj.host)
msg = (_("Volume %(vol_id)s is not local to this node "
"%(host)s") % {'vol_id': vol_obj.id,
'host': self.host})
if new_host != self.host:
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref.host)
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
{
"content_hash": "953429799b719d4cfc9addfef8ba2f33",
"timestamp": "",
"source": "github",
"line_count": 4182,
"max_line_length": 79,
"avg_line_length": 44.43519846963176,
"alnum_prop": 0.5185009794003057,
"repo_name": "Hybrid-Cloud/cinder",
"id": "ef6f560945f801b13ba207a3cc7e00f9c1c82751",
"size": "186560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17513896"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
'''
As of Dec 2014 this sucessfully scrapes
http://www.olympic.it/english/game/id_W2014
for the winter olympics results for all years.
The website might get changed.
This can all be tested in a Scrapy shell, that's how
I first built it.
Created on 23 Oct 2014
@author: chris
'''
import scrapy
from country_medals_c import country_medals
class MedalsSpider(scrapy.Spider):
name = "medals"
allowed_domains = ["olympic.it"]
start_urls = [
"http://www.olympic.it/english/game/id_W2014"
]
def parse(self, response):
# filename = response.url.split("/")[-2]
for_cleaning = response.xpath('//table/tr[@valign="top"]/td[@class="testo"]')
for i, a_i in enumerate(for_cleaning):
test = a_i.xpath('text()').extract()
if len(test) > 0:
test = test[0].encode('ascii','ignore')
if test == '1.':
break
for_cleaning = for_cleaning[i:]
rot = 6
N = len(for_cleaning)/rot
for_cleaning = for_cleaning[:N*rot] # not sure about this line - it's meant to remove excess entries, but it's unsophisticated, could probably do this with "the final + 1 entry has italics"
countr_list = []
for j in range(N):
print j
i_0 = j*rot
rank = int(for_cleaning[i_0+0].xpath('text()').extract()[0].encode('ascii','ignore')[:-1])
name = for_cleaning[i_0+1].xpath('a/text()').extract()[0]
countr = country_medals(rank,name)
medals = []
for k in range(3):
no = int(for_cleaning[i_0+2+k].xpath('text()').extract()[0])
medals.append(no)
countr.medals_in(medals)
countr_list.append(countr)
filename = response.xpath('//title/text()').extract()[0][-4:] + '.p'
import pickle
with open(filename, 'w+') as f:
pickle.dump(countr_list,f)
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
class ManySitesMedals(CrawlSpider):
name = 'all_medals'
allowed_domains = ['olympic.it']
start_urls = ['http://www.olympic.it/english/game']
rules = (
# Extract links matching 'item.php' and parse them with the spider's method parse_item
Rule(LinkExtractor(allow=('/english/game/id_W\d{4}', )), callback='parse_item'),
)
def parse_item(self, response):
for_cleaning = response.xpath('//table/tr[@valign="top"]/td[@class="testo"]')
for i, a_i in enumerate(for_cleaning):
test = a_i.xpath('text()').extract()
if len(test) > 0:
test = test[0].encode('ascii','ignore')
if test == '1.':
break
for_cleaning = for_cleaning[i:]
rot = 6
N = len(for_cleaning)/rot
for_cleaning = for_cleaning[:N*rot] # not sure about this line - it's meant to remove excess entries, but it's unsophisticated, could probably do this with "the final + 1 entry has italics"
countr_list = []
for j in range(N):
print j
i_0 = j*rot
rank = int(for_cleaning[i_0+0].xpath('text()').extract()[0].encode('ascii','ignore')[:-1])
name = for_cleaning[i_0+1].xpath('a/text()').extract()[0]
countr = country_medals(rank,name)
medals = []
for k in range(3):
no = int(for_cleaning[i_0+2+k].xpath('text()').extract()[0])
medals.append(no)
countr.medals_in(medals)
countr_list.append(countr)
filename = response.xpath('//title/text()').extract()[0][-4:] + '.p'
import pickle
with open(filename, 'w+') as f:
pickle.dump(countr_list,f)
|
{
"content_hash": "befa843990943743d2a81511ef40c3eb",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 197,
"avg_line_length": 32.14173228346457,
"alnum_prop": 0.5269475747182754,
"repo_name": "chrisjdavie/Olympics_redo",
"id": "9bd1ff0fd886ce3e25da603e06e6857627901aa9",
"size": "4082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scrapy_spider/build_medals_table/build_medals_table/spiders/extract_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "50331"
},
{
"name": "Python",
"bytes": "35087"
}
],
"symlink_target": ""
}
|
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db(default='sqlite:///{0}'.format(normpath(join(RESOURCES_PATH, 'db', 'default.db'))))
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
# CACHES = {
# 'default': env.cache(default='locmem://'),
# }
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
|
{
"content_hash": "211e44c000f36211a1113d9d53fd8fec",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 27.01818181818182,
"alnum_prop": 0.6938088829071333,
"repo_name": "DeppSRL/odl_datasets_survey",
"id": "d1e583024ee676a4a633c316ae0f6135732f86b1",
"size": "1486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/datasets_survey/settings/development.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38"
},
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "34551"
},
{
"name": "Shell",
"bytes": "5121"
}
],
"symlink_target": ""
}
|
from pipeline.util import config
from pipeline.lab import Experiment
from pipeline.lab.util import group_by
ex = Experiment(config['logger'])
ex.get(_id=['5744d47f6fdf1e2f69f0716a'])
ex.get(key='im super cool')
ex.records
model = ex.records[0]
model['key'] = 'new value2'
new_model = ex.record()
new_model['key'] = 'im super cool'
ex.save()
group_by(ex.records, 'model')
|
{
"content_hash": "296db6f3610a0d1b9d35a705cc9cb789",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 40,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.7207446808510638,
"repo_name": "edublancas/pipeline",
"id": "3f83c35d9b049ff39bf0df21cf6afcacd112586f",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usage/lab/lab_read.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31058"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
from os import environ
bind = 'unix:/tmp/nginx.socket'
workers = int(environ.get('WORKERS', '4'))
def post_fork(server, worker):
with open('/tmp/app-initialized', 'w'):
pass
|
{
"content_hash": "745b4d828a911eaccee54ed9c0adc509",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 21,
"alnum_prop": 0.6455026455026455,
"repo_name": "appropriatetech/icat-symposion",
"id": "4b44b2719242c8bd1b39503fd80d2e6b7a5ecae4",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gunicorn.conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33184"
},
{
"name": "HTML",
"bytes": "102440"
},
{
"name": "Makefile",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "18897"
}
],
"symlink_target": ""
}
|
'''
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import re
import setuptools
import sys
import warnings
warnings.filterwarnings('default', module="^benchexec\..*")
# Links for documentation on how to build and use Python packages:
# http://python-packaging-user-guide.readthedocs.org/en/latest/
# http://gehrcke.de/2014/02/distributing-a-python-command-line-application/
# http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/
# https://pythonhosted.org/setuptools/setuptools.html
# https://docs.python.org/3/distutils/index.html
# determine version (more robust than importing benchexec)
# c.f. http://gehrcke.de/2014/02/distributing-a-python-command-line-application/
with open('benchexec/__init__.py') as f:
version = re.search('^__version__\s*=\s*\'(.*)\'', f.read(), re.M).group(1)
# Get the long description from the relevant file
readme = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')
try:
import pypandoc
long_description = pypandoc.convert(readme, 'rst', format='markdown_github-hard_line_breaks')
except (IOError, ImportError):
with open(readme, 'rb') as f:
long_description = f.read().decode('utf-8')
PY2 = sys.version_info[0] == 2
setuptools.setup(
name = 'BenchExec',
version = version,
author = 'Dirk Beyer',
description = ('A Framework for Reliable Benchmarking and Resource Measurement.'),
long_description = long_description,
url = 'https://github.com/sosy-lab/benchexec/',
license = 'Apache 2.0 License',
keywords = 'benchmarking resource measurement',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Benchmark',
],
platforms = ['Linux'],
packages = ['benchexec'] +
(['benchexec.tablegenerator', 'benchexec.tools'] if not PY2 else []),
package_data = {'benchexec.tablegenerator': ['template.*']} if not PY2 else {},
entry_points = {
"console_scripts": [
'runexec = benchexec.runexecutor:main',
'containerexec = benchexec.containerexecutor:main',
] + ([
'benchexec = benchexec.benchexec:main',
'table-generator = benchexec.tablegenerator:main',
] if not PY2 else []),
},
install_requires = ['tempita==0.5.2'],
setup_requires=['nose>=1.0'] + ['lxml'] if not PY2 else [],
test_suite = 'nose.collector' if not PY2 else 'benchexec.test_python2.Python2Tests',
zip_safe = True,
)
|
{
"content_hash": "e724b31a2bb6190276998e36ca30f9ce",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 97,
"avg_line_length": 38.89655172413793,
"alnum_prop": 0.6793735224586288,
"repo_name": "martin-neuhaeusser/benchexec",
"id": "480bb427738c7431ab680e04afa59a08871e83c3",
"size": "3407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Gnuplot",
"bytes": "3882"
},
{
"name": "HTML",
"bytes": "63337"
},
{
"name": "PHP",
"bytes": "4704"
},
{
"name": "Python",
"bytes": "797325"
},
{
"name": "Shell",
"bytes": "5328"
},
{
"name": "TeX",
"bytes": "6538"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlencode
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginError
from requests import Request, RequestException
from requests.utils import dict_from_cookiejar, cookiejar_from_dict
log = logging.getLogger('wordpress_auth')
def construct_request(url, username='', password='', redirect='/wp-admin/'):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'DNT': '1'
}
data = {
'log': username,
'pwd': password,
'wp-submit': 'Log In',
'testcookie': '1',
'redirect_to': redirect
}
return Request(method='POST', url=url, headers=headers, data=urlencode(data).encode('UTF-8')).prepare()
def collect_cookies(response):
cookies = dict_from_cookiejar(response.cookies)
for h_resp in response.history:
cookies.update(dict_from_cookiejar(h_resp.cookies))
return cookiejar_from_dict(cookies)
def get_valid_cookies(cookies):
def is_wp_cookie(key):
return re.match(r'(wordpress|wp)(?!_*test)[A-z0-9]*', key, re.IGNORECASE)
valid_cookies = {key: value for key, value in cookies.items() if is_wp_cookie(key)}
return cookiejar_from_dict(valid_cookies)
class PluginWordPress(object):
"""
Supports accessing feeds and media that require wordpress account credentials
Usage:
wordpress_auth:
url: 'your wordpress blog login page (ex http://example.org/wp-login.php)'
username: 'your username'
password: 'your password'
"""
schema = {'type': 'object',
'properties': {
'url': {'type': 'string', 'oneOf': [{'format': 'url'}]},
'username': {'type': 'string', 'default': ''},
'password': {'type': 'string', 'default': ''}
},
'required': ['url'],
'additionalProperties': False
}
@plugin.priority(135)
def on_task_start(self, task, config):
url = config['url']
username = config['username']
password = config['password']
try:
response = task.requests.send(construct_request(url, username=username, password=password))
if not response.ok:
raise RequestException(str(response))
cookies = collect_cookies(response)
if len(get_valid_cookies(cookies)) < 1:
raise RequestException('No recognized WordPress cookies found. Perhaps username/password is invalid?')
task.requests.add_cookiejar(cookies)
except RequestException as err:
log.error('%s', err)
raise PluginError('WordPress Authentication at %s failed' % (url,))
@event('plugin.register')
def register_plugin():
plugin.register(PluginWordPress, 'wordpress_auth', api_ver=2)
|
{
"content_hash": "d5c904014f191049dee83e46c39f7f0b",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 118,
"avg_line_length": 35.16483516483517,
"alnum_prop": 0.6271875,
"repo_name": "jawilson/Flexget",
"id": "04deda49bfe2f1c286d1125398488640b7d4ff41",
"size": "3200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flexget/plugins/sites/wordpress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3364620"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
META_FILENAME = '{}_metadata.json'
MEASUREMENT_FILENAME = '{}_measurement_{}.png'
ALIGNMENT_FILENAME = '{}_alignment_guide.png'
LUT_JSON_FILENAME = '{}.json'
LUT_CUBE_FILENAME = '{}.cube'
LUT_PNG_FILENAME = '{}.png'
|
{
"content_hash": "e3ad6769c5ab31ce85c1522db820806d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 46,
"avg_line_length": 36,
"alnum_prop": 0.6759259259259259,
"repo_name": "faymontage/lut-maker",
"id": "73b0042e8f6acfc3650b946a02b9976095056a68",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lut_maker/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11098"
}
],
"symlink_target": ""
}
|
import os
import time
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
import mock
from nose.tools import eq_
import amo
import amo.tests
import mkt
from mkt.api.models import Nonce
from mkt.developers.models import ActivityLog
from mkt.files.models import File, FileUpload
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps import cron
from mkt.webapps.cron import (clean_old_signed, mkt_gc, update_app_trending,
update_downloads)
from mkt.webapps.models import Addon, Webapp
from mkt.webapps.tasks import _get_trending
class TestLastUpdated(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_catchall(self):
"""Make sure the catch-all last_updated is stable and accurate."""
# Nullify all datestatuschanged so the public add-ons hit the
# catch-all.
(File.objects.filter(status=amo.STATUS_PUBLIC)
.update(datestatuschanged=None))
Addon.objects.update(last_updated=None)
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):
eq_(addon.last_updated, addon.created)
# Make sure it's stable.
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):
eq_(addon.last_updated, addon.created)
class TestHideDisabledFiles(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
msg = 'Moving disabled file: %s => %s'
def setUp(self):
self.addon = Webapp.objects.get(pk=337141)
self.version = self.addon.latest_version
self.f1 = self.version.all_files[0]
@mock.patch('mkt.files.models.os')
def test_leave_nondisabled_files(self, os_mock):
stati = [(amo.STATUS_PUBLIC, amo.STATUS_PUBLIC)]
for addon_status, file_status in stati:
self.addon.update(status=addon_status)
File.objects.update(status=file_status)
cron.hide_disabled_files()
assert not os_mock.path.exists.called, (addon_status, file_status)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_user_disabled_addon(self, m_storage, mv_mock):
# Use Addon.objects.update so the signal handler isn't called.
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_PUBLIC, disabled_by_user=True)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f1 was moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
# There's only 1 file.
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_admin_disabled_addon(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_DISABLED)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f1 was moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
# There's only 1 file.
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_disabled_file(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_REJECTED)
File.objects.filter(id=self.f1.id).update(status=amo.STATUS_DISABLED)
cron.hide_disabled_files()
# f1 should have been moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_ignore_deleted_versions(self, m_storage, mv_mock):
# Apps only have 1 file and version delete only deletes one.
self.version.delete()
mv_mock.reset_mock()
# Create a new version/file just like the one we deleted.
version = Version.objects.create(addon=self.addon)
File.objects.create(version=version, filename='f2')
cron.hide_disabled_files()
# Mock shouldn't have been called.
assert not mv_mock.called, mv_mock.call_args
class TestWeeklyDownloads(amo.tests.TestCase):
def setUp(self):
self.app = Webapp.objects.create(type=amo.ADDON_WEBAPP,
status=amo.STATUS_PUBLIC)
def get_app(self):
return Webapp.objects.get(pk=self.app.pk)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_weekly_downloads(self, _mock):
client = mock.Mock()
raw = {
'facets': {
'installs': {
'_type': 'date_histogram',
'entries': [
{'count': 3,
'time': 1390780800000,
'total': 19.0},
{'count': 62,
'time': 1391385600000,
'total': 236.0}
]
}
}
}
client.raw.return_value = raw
_mock.return_value = client
eq_(self.app.weekly_downloads, 0)
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.weekly_downloads, 255)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_total_downloads(self, _mock):
client = mock.Mock()
raw = {
'facets': {
'installs': {
u'_type': u'statistical',
u'count': 49,
u'total': 6638.0
}
}
}
client.raw.return_value = raw
_mock.return_value = client
eq_(self.app.total_downloads, 0)
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.total_downloads, 6638)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_monolith_error(self, _mock):
client = mock.Mock()
client.side_effect = ValueError
client.raw.side_effect = Exception
_mock.return_value = client
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.weekly_downloads, 0)
eq_(self.app.total_downloads, 0)
class TestCleanup(amo.tests.TestCase):
def setUp(self):
self.file = os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
'1', 'x.z')
def test_not_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed()
assert storage.exists(self.file)
def test_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed(-60)
assert not storage.exists(self.file)
@mock.patch('lib.crypto.packaged.sign_app')
class TestSignApps(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(id=337141)
self.app.update(is_packaged=True)
self.app2 = amo.tests.app_factory(
name=u'Mozillaball ょ', app_slug='test',
is_packaged=True, version_kw={'version': '1.0',
'created': None})
self.app3 = amo.tests.app_factory(
name='Test app 3', app_slug='test3', status=amo.STATUS_REJECTED,
is_packaged=True, version_kw={'version': '1.0',
'created': None})
def test_by_webapp(self, sign_mock):
v1 = self.app.current_version
call_command('sign_apps', webapps=str(v1.pk))
file1 = v1.all_files[0]
assert sign_mock.called_with(((file1.file_path,
file1.signed_file_path),))
def test_all(self, sign_mock):
v1 = self.app.current_version
v2 = self.app2.current_version
call_command('sign_apps')
file1 = v1.all_files[0]
file2 = v2.all_files[0]
eq_(len(sign_mock.mock_calls), 2)
eq_(sign_mock.mock_calls[0][1][:2],
(file1.file_path, file1.signed_file_path))
eq_(sign_mock.mock_calls[1][1][:2],
(file2.file_path, file2.signed_file_path))
class TestUpdateTrending(amo.tests.TestCase):
def setUp(self):
self.app = Webapp.objects.create(type=amo.ADDON_WEBAPP,
status=amo.STATUS_PUBLIC)
@mock.patch('mkt.webapps.tasks._get_trending')
def test_trending_saved(self, _mock):
_mock.return_value = 12.0
update_app_trending()
eq_(self.app.get_trending(), 12.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 12.0)
# Test running again updates the values as we'd expect.
_mock.return_value = 2.0
update_app_trending()
eq_(self.app.get_trending(), 2.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 133.0, 'date': date(2013, 8, 26)},
{'count': 122.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 133 + 122 = 255
# Prior 3 weeks get averaged: (133 + 122) / 3 = 85
# (255 - 85) / 85 = 2.0
eq_(_get_trending(self.app.id), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_threshold(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 49.0, 'date': date(2013, 8, 26)},
{'count': 50.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 49 + 50 = 99
# 99 is less than 100 so we return 0.0.
eq_(_get_trending(self.app.id), 0.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_monolith_error(self, _mock):
client = mock.Mock()
client.side_effect = ValueError
_mock.return_value = client
eq_(_get_trending(self.app.id), 0.0)
@mock.patch('os.stat')
@mock.patch('os.listdir')
@mock.patch('os.remove')
class TestGarbage(amo.tests.TestCase):
def setUp(self):
self.user = UserProfile.objects.create(
email='gc_test@example.com', name='gc_test')
amo.log(amo.LOG.CUSTOM_TEXT, 'testing', user=self.user,
created=datetime(2001, 1, 1))
def test_garbage_collection(self, rm_mock, ls_mock, stat_mock):
eq_(ActivityLog.objects.all().count(), 1)
mkt_gc()
eq_(ActivityLog.objects.all().count(), 0)
def test_nonce(self, rm_mock, ls_mock, stat_mock):
nonce = Nonce.objects.create(nonce='a', timestamp=1, client_key='b')
nonce.update(created=self.days_ago(2))
eq_(Nonce.objects.count(), 1)
mkt_gc()
eq_(Nonce.objects.count(), 0)
def test_dump_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1000)
mkt_gc()
assert rm_mock.call_args_list[0][0][0].endswith('lol')
def test_new_no_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1)
mkt_gc()
assert not rm_mock.called
def test_old_and_new(self, rm_mock, ls_mock, stat_mock):
fu_new = FileUpload.objects.create(path='/tmp/bar', name='bar')
fu_new.created = self.days_ago(5)
fu_old = FileUpload.objects.create(path='/tmp/foo', name='foo')
fu_old.update(created=self.days_ago(91))
mkt_gc()
eq_(FileUpload.objects.count(), 1)
assert rm_mock.called
eq_(rm_mock.call_args[0][0], fu_old.path)
def test_old_no_path(self, rm_mock, ls_mock, stat_mock):
fu_old = FileUpload.objects.create(path='', name='foo')
fu_old.update(created=self.days_ago(91))
mkt_gc()
eq_(FileUpload.objects.count(), 0)
assert not rm_mock.called
class StatMock(object):
def __init__(self, days_ago):
self.st_mtime = time.mktime(
(datetime.now() - timedelta(days_ago)).timetuple())
self.st_size = 100
|
{
"content_hash": "6af06994c3dc00abd571a3ff53c5a839",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 78,
"avg_line_length": 34.80163043478261,
"alnum_prop": 0.5878816272351058,
"repo_name": "andymckay/zamboni",
"id": "6223f8a5b5bad078b9b6272ed063ca1ded3bf0f8",
"size": "12833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/webapps/tests/test_crons.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357533"
},
{
"name": "JavaScript",
"bytes": "524153"
},
{
"name": "Python",
"bytes": "3863676"
},
{
"name": "Shell",
"bytes": "14980"
}
],
"symlink_target": ""
}
|
""" Implementation of webservices API 0.9 """
import tornado.httpserver
import tornado.ioloop
import tornado.web
class WebService(tornado.web.Application):
""" A implementation of web services for tornado web server.
import tornado.httpserver
import tornado.ioloop
from tornadows import webservices
from tornadows import xmltypes
from tornadows import soaphandler
from tornadows.soaphandler import webservice
class MyService(soaphandler.SoapHandler):
@webservice(_params=[xmltypes.Integer, xmltypes.Integer],_returns=xmltypes.Integer)
def sum(self, value1, value2):
result = value1 + value2
return result
if __name__ == "__main__":
app = webservices.WebService("MyService",MyService)
ws_server = tornado.httpserver.HTTPServer(app)
ws_server.listen(8080)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self,services,object=None,wsdl=None):
""" Initializes the application for web services
Instances of this class are callable and can be passed to
HTTPServer of tornado to serve the web services.
The constructor for this class takes the name for the web
service (service), the class with the web service (object)
and wsdl with the wsdl file path (if this exist).
"""
if isinstance(services,list) and object == None:
srvs = []
for s in services:
srv = s[0]
obj = s[1]
srvs.append((r"/"+str(srv),obj))
srvs.append((r"/"+str(srv)+"/",obj))
tornado.web.Application.__init__(self,srvs)
else:
self._service = services
self._object = object
self._services = [(r"/"+str(self._service),self._object),
(r"/"+str(self._service)+"/",self._object),]
tornado.web.Application.__init__(self,self._services)
|
{
"content_hash": "3c5139ad23e3b4b62a8e8c23227d05cb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 86,
"avg_line_length": 31.818181818181817,
"alnum_prop": 0.6914285714285714,
"repo_name": "leonevo/euao",
"id": "594fbc09c70d695202472728d7902a4237fcfd01",
"size": "2359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornadows/webservices.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "188862"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
}
|
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: Boolean
:param alias_evaluate_target_health: *Required for alias resource record sets* Indicates
whether this Resource Record Set should respect the health status of
any health checks associated with the ALIAS target record which it is
linked to.
:type health_check: str
:param health_check: Health check to associate with this record
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check, failover=failover)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName and
NextRecordType to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name, type=self.next_record_type)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover = failover
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health }
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
elif self.identifier is not None and self.failover is not None:
weight = self.FailoverBody % {"identifier": self.identifier, "failover":
self.failover}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
elif self.identifier is not None and self.failover is not None:
rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
elif name == 'Failover':
self.failover = value
elif name == 'HealthCheckId':
self.health_check = value
def startElement(self, name, attrs, connection):
return None
|
{
"content_hash": "516d11df7657ee66f61eaffc7d1101ae",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 133,
"avg_line_length": 37.627167630057805,
"alnum_prop": 0.5833013288270988,
"repo_name": "ychen820/microblog",
"id": "664739b8552c00fed1a27849401e59f44a689875",
"size": "14254",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/boto/route53/record.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
import os
import io
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.io import savemat, loadmat
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
MATLAB_FILENAME = "pipeline_out.mat"
def load_MAT(var_name):
loaded = loadmat(file_name=MATLAB_FILENAME, variable_names=[var_name])
if var_name in loaded:
return loaded[var_name]
else:
print("MATLAB File Load Error")
return None
# Font must be provided in working directory
FONT_SIZE = 12
FONT = ImageFont.truetype("fonts/FreeMono.otf", size=FONT_SIZE)
FONT_BOLD = ImageFont.truetype("fonts/FreeMonoBold.otf", size=FONT_SIZE)
FONT_EM = ImageFont.truetype(
"fonts/FreeMonoBoldOblique.otf", size=2 * FONT_SIZE)
class LabeledFrame(object):
"""
Wrapper class for PIL.Image
"""
def __init__(self, filename, frame_id, intensity_predicted=-1, intensity_actual=-1, bounds=(-2, -1)):
# bounds are the bounds of the frame id range (for arange)
# PIL.Image "isn't meant to be subclassed", so we have to wrap it
self.frame_id = frame_id
self.filename = filename
self.intensity_predicted = intensity_predicted
self.intensity_actual = intensity_actual
self.bounds = bounds
# filename=None when testing. Generate an empty test image
if filename is None:
self.clean_image = Image.new("RGB", (320, 240), "navy")
draw = ImageDraw.Draw(self.clean_image)
draw.text((120, 100), "test" + str(self.frame_id),
"wheat", font=FONT_EM)
self.filename = "test" + str(self.frame_id)
else:
try:
self.clean_image = Image.open(filename)
self.clean_image = self.clean_image.convert(mode="RGB")
except IOError:
print("ERROR: Failed to open " + filename)
self.clean_image = Image.new("RGB", (400, 400), "grey")
def label(self):
"""
Draw information onto the frame
"""
error = abs(self.intensity_actual - self.intensity_predicted)
# if error == 0:
# e_color = "aqua"
# elif error < 0.75:
# e_color = "chartreuse"
# elif error < 2.5:
# e_color = "gold"
# else:
# e_color = "crimson"
e_color = "white"
self.labeled_image = self.clean_image.copy()
draw = ImageDraw.Draw(self.labeled_image)
draw.text((10, 490 - 5 * (FONT_SIZE + 10)), "Filename: " +
os.path.basename(self.filename), "white", font=FONT)
draw.text((10, 490 - 4 * (FONT_SIZE + 10)), "Frame ID: " +
str(self.frame_id), "white", font=FONT)
# draw.text((10, 3 * (FONT_SIZE + 10)),
# "Intensities", "white", font=FONT_BOLD)
draw.text((10, 490 - 3 * (FONT_SIZE + 10)), "Ground Truth: " +
str(self.intensity_actual), "white", font=FONT)
draw.text((10, 490 - 2 * (FONT_SIZE + 10)), "Predicted: " +
str(self.intensity_predicted), "white", font=FONT)
draw.text((10, 490 - 1 * (FONT_SIZE + 10)),
"Error: " + str(error), e_color, font=FONT)
return self.labeled_image
def overlay_image(self, image):
"""
Overlay an image (like a graph) in the bottom right-hand corner of frame
:param image: the image to insert
:returns: the new image
"""
self.labeled_image.paste(image, (470, 380), image)
return self.labeled_image
def test_lf_fdata_fframes():
"""
Test for the LabeledFrame class with fake data & frames generated on the fly
"""
# IO dirs should exist
output_dir = "out/"
# What to append to a frame ID to get the corresponding image file
file_suffix = ".png"
# IDs for each frame
frame_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# Intensities for each frame
intensities_predicted = [1, 1, 1, 2, 4, 6, 6, 6, 6, 3, 2, 1, 0]
intensities_actual = [1, 0, 0, 2, 3, 5, 6, 6, 5, 3, 1, 1, 1]
gif_images = []
# Loop through provided frame ids
for f_id, i_pred, i_act in zip(frame_ids, intensities_predicted, intensities_actual):
#print("Loading " + input_dir + str(f_id) + file_suffix)
#frame = LabeledFrame(input_dir + str(f_id) + file_suffix, f_id, i_pred, i_act)
frame = LabeledFrame(None, f_id, i_pred, i_act)
#print("Labelling " + frame.filename)
l_image = frame.label()
print("Saving labeled " + str(frame.frame_id) +
file_suffix + " to " + output_dir)
l_image.save(output_dir + str(frame.frame_id) + file_suffix)
gif_images.append(l_image)
# Generate GIF
print("Saving animated GIF")
gif_images[0].save(output_dir + "animated.gif", format="gif",
save_all=True, append_images=gif_images[1:], duration=500)
def test_lf_rdata_fframes():
"""
Test for the LabeledFrame class with real data, but frames generated on the fly
"""
# IO dirs should exist
output_dir = "out/"
# What to append to a frame ID to get the corresponding image file
file_suffix = ".png"
# Intensities for each frame
intensities_predicted = load_MAT("dec_values").flatten().tolist()
intensities_actual = load_MAT("test_label").flatten().tolist()
# IDs for each frame
frame_ids = range(len(intensities_predicted))
gif_images = []
# Loop through provided frame ids
for f_id, i_pred, i_act in zip(frame_ids, intensities_predicted, intensities_actual):
#print("Loading " + input_dir + str(f_id) + file_suffix)
#frame = LabeledFrame(input_dir + str(f_id) + file_suffix, f_id, i_pred, i_act)
frame = LabeledFrame(None, f_id, i_pred, i_act)
print("Labeling " + frame.filename)
l_image = frame.label()
#print("Saving labeled " + str(frame.frame_id) + file_suffix + " to " + output_dir)
# l_image.save(output_dir+str(frame.frame_id)+file_suffix)
gif_images.append(l_image)
# Generate GIF
print("Saving animated GIF")
gif_images[0].save(output_dir + "animated.gif", format="gif",
save_all=True, append_images=gif_images[1:], duration=500)
def test_lf_rdata_rframes_nc():
"""
Test for the LabeledFrame class with real data, and real frames, but the data and frames don't correspond
"""
# IO dirs should exist
input_dir = "images/jh123t1aeaff"
output_dir = "out/"
# What to append to a frame ID to get the corresponding image file
file_suffix = ".png"
# Intensities for each frame
intensities_predicted = load_MAT("dec_values").flatten().tolist()
intensities_actual = load_MAT("test_label").flatten().tolist()
# IDs for each frame
frame_ids = range(56, 360)
gif_images = []
plt.figure(figsize=(1.5, 1.15), dpi=100)
plt.axis('off')
plt.plot(frame_ids, intensities_predicted, "b-", label="predicted")
plt.plot(frame_ids, intensities_actual, "r-", label="actual")
#plt.vlines(self.frame_id,-1, 10)
#plt.legend(loc='upper right')
data_max = max(intensities_predicted + intensities_actual)
data_min = min(intensities_predicted + intensities_actual)
# Loop through provided frame ids
for f_id, i_pred, i_act in zip(frame_ids, intensities_predicted, intensities_actual):
#print("Loading " + input_dir + str(f_id) + file_suffix)
frame = LabeledFrame(input_dir + ('0' if f_id < 100 else '') +
str(f_id) + file_suffix, f_id, i_pred, i_act)
#frame = LabeledFrame(None, f_id, i_pred, i_act)
print("Labeling " + frame.filename)
l_image = frame.label()
# Add vertical line for this frame
ln = plt.vlines(f_id, data_min, data_max,
linestyles='solid', linewidth=".5", zorder=3)
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True,
bbox_inches='tight', pad_inches=0)
# Remove the vert line for the next figure
ln.remove()
buf.seek(0)
overlay = Image.open(buf)
l_image = frame.overlay_image(overlay)
#print("Saving labeled " + str(frame.frame_id) + file_suffix + " to " + output_dir)
# l_image.save(output_dir+str(frame.frame_id)+file_suffix)
gif_images.append(l_image)
# Generate GIF
print("Saving animated GIF")
gif_images[0].save(output_dir + "animated.gif", format="gif",
save_all=True, append_images=gif_images[1:], duration=120)
def test_77out(images, output_dir):
"""
Test for the LabeledFrame class with real data and real frames from the leave 77
out experiment
:param images: a list of full paths to the frames used
:param output_dir: the path to the directory where the animated GIF should be saved
"""
# Intensities for each frame
intensities_predicted = load_MAT("dec_values").flatten().tolist()
intensities_actual = load_MAT("test_label").flatten().tolist()
# IDs for each frame
frame_ids = range(0, len(intensities_actual))
gif_images = []
plt.figure(figsize=(1.5, 1.15), dpi=100)
plt.axis('off')
plt.plot(frame_ids, intensities_predicted, "b-", label="predicted")
plt.plot(frame_ids, intensities_actual, "r-", label="actual")
#plt.vlines(self.frame_id,-1, 10)
#plt.legend(loc='upper right')
data_max = max(intensities_predicted + intensities_actual)
data_min = min(intensities_predicted + intensities_actual)
# Loop through provided frame ids
for p, f_id, i_pred, i_act in zip(images, frame_ids, intensities_predicted, intensities_actual):
#print("Loading " + input_dir + str(f_id) + file_suffix)
frame = LabeledFrame(p, f_id, i_pred, i_act)
print("Labeling " + frame.filename)
l_image = frame.label()
# Add vertical line for this frame
ln = plt.vlines(f_id, data_min, data_max,
linestyles='solid', linewidth=".5", zorder=3)
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True,
bbox_inches='tight', pad_inches=0)
# Remove the vert line for the next figure
ln.remove()
buf.seek(0)
overlay = Image.open(buf)
l_image = frame.overlay_image(overlay)
gif_images.append(l_image)
# Generate GIF
print("Saving animated GIF")
gif_images[0].save(output_dir + "animated.gif", format="gif",
save_all=True, append_images=gif_images[1:], duration=220)
|
{
"content_hash": "9c6cbca044a22fc9f532ade5b07bcfcb",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 109,
"avg_line_length": 35.56953642384106,
"alnum_prop": 0.5989573636194377,
"repo_name": "abyrne55/osvr-review",
"id": "1e85037f6a7a46d76b8eb1ae74e5f3f51f06d1b0",
"size": "10880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image-labeler/labeler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71438"
}
],
"symlink_target": ""
}
|
'''
Covenant Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse
from resources.lib.modules import source_utils, dom_parser, client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['filmwebbooster.pl']
self.base_link = 'http://178.19.110.218/filmweb/'
self.search_more = 'wiecejzrodel.php'
self.search_tvshow = 'search.php'
self.search_movie = 'search_film.php'
def create_search_more(self, title, localtitle, year):
return {'tytul':localtitle, 'engTitle':title, 'rok':year}
def movie(self, imdb, title, localtitle, aliases, year):
result = {}
result['url'] = urlparse.urljoin(self.base_link, self.search_movie)
result['post'] = {'engTitle':title, 'szukany':localtitle, 'rok':year}
result['more'] = self.create_search_more(title, localtitle, year)
return result
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
result = {}
result['url'] = urlparse.urljoin(self.base_link, self.search_tvshow)
result['post'] = {'title':localtvshowtitle}
result['more'] = self.create_search_more(tvshowtitle, localtvshowtitle, year)
return result
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
url['more']['se'] = season + '-' + episode
url['post']['odcinek'] = episode
url['post']['sezon'] = season
return url
def get_info_from_others(self, sources):
infos = []
for source in sources:
info = source['info']
if info :
infos.append(info)
infos.sort()
if infos:
return infos[0]
return ''
def sources(self, url, hostDict, hostprDict):
try:
search_url = url['url']
post = url['post']
search_more_post = url['more']
result = client.request(search_url, post=post)
sources = []
if not result.startswith('http'):
return sources
valid, host = source_utils.is_host_valid(result, hostDict)
q = source_utils.check_sd_url(result)
first_found = {'source': host, 'quality': q, 'language': 'pl', 'url': result, 'info': '', 'direct': False, 'debridonly': False}
search_url = urlparse.urljoin(self.base_link, self.search_more)
result = client.request(search_url, post=search_more_post)
result = dom_parser.parse_dom(result, 'a')
for el in result :
desc = el.content
info = desc[desc.find("(") + 1:desc.find(")")]
lang = 'pl'
if info.lower() == 'eng':
lang='en'
info=None
link = el.attrs['href']
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: continue
q = source_utils.check_sd_url(link)
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
first_found['info'] = self.get_info_from_others(sources)
sources.append(first_found)
return sources
except:
return sources
def resolve(self, url):
return url
|
{
"content_hash": "8bd3aa227860457d829460d9b9822383",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 147,
"avg_line_length": 38.909909909909906,
"alnum_prop": 0.5489696689048391,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "62b69af0bfebcd91c73df67a319706d921295661",
"size": "4344",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "script.module.uncoded/lib/resources/lib/sources/pl/filmwebbooster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_r2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "15f19906463adc0920ef7ffba92c4f65",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 20.76923076923077,
"alnum_prop": 0.6703703703703704,
"repo_name": "anhstudios/swganh",
"id": "6312809966b2124ae71b3a3bf13901d1bc8e279f",
"size": "415",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/intangible/pet/shared_r2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import gym
from gym import spaces
import numpy as np
# from os import path
import snakeoil_gym as snakeoil
import numpy as np
import copy
import collections as col
import os
import time
class TorcsEnv:
terminal_judge_start = 100 # If after 100 timestep still no progress, terminated
termination_limit_progress = 2 # [km/h], episode terminates if car is running slower than this limit
termination_limit_stuck_cnt = 50
default_speed = 50
initial_reset = True
def __init__(self, throttle=False, gear_change=False):
self.throttle = throttle
self.gear_change = gear_change
self.stuck_cnt = 0
self.pre_action_0 = np.zeros(3, dtype=np.float32)
self.pre_action_1 = np.zeros(3, dtype=np.float32)
print("launch torcs")
os.system('pkill torcs')
time.sleep(0.5)
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
if throttle is False:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
else:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf, 255])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf, 0])
self.observation_space = spaces.Box(low=low, high=high)
def step(self, u):
# convert thisAction to the actual torcs actionstr
client = self.client
this_action = self.agent_to_torcs(u)
# Apply Action
action_torcs = client.R.d
# Steering
action_torcs['steer'] = this_action['steer'] # in [-1, 1]
# Simple Automatic Throttle Control by Snakeoil
if self.throttle is False:
target_speed = self.default_speed
if client.S.d['speedX'] < target_speed - (client.R.d['steer'] * 50):
client.R.d['accel'] += .01
else:
client.R.d['accel'] -= .01
if client.R.d['accel'] > 0.2:
client.R.d['accel'] = 0.2
if client.S.d['speedX'] < 10:
client.R.d['accel'] += 1 / (client.S.d['speedX'] + .1)
# Traction Control System
if ((client.S.d['wheelSpinVel'][2] + client.S.d['wheelSpinVel'][3]) -
(client.S.d['wheelSpinVel'][0] + client.S.d['wheelSpinVel'][1]) > 5):
action_torcs['accel'] -= .2
else:
action_torcs['accel'] = this_action['accel']
action_torcs['brake'] = this_action['brake']
# Automatic Gear Change by Snakeoil
if self.gear_change is True:
action_torcs['gear'] = this_action['gear']
else:
# Automatic Gear Change by Snakeoil is possible
action_torcs['gear'] = 1
if self.throttle:
if client.S.d['speedX'] > 80:
action_torcs['gear'] = 2
if client.S.d['speedX'] > 110:
action_torcs['gear'] = 3
if client.S.d['speedX'] > 140:
action_torcs['gear'] = 4
if client.S.d['speedX'] > 170:
action_torcs['gear'] = 5
if client.S.d['speedX'] > 200:
action_torcs['gear'] = 6
# Save the privious full-obs from torcs for the reward calculation
obs_pre = copy.deepcopy(client.S.d)
self.pre_action_0 = self.pre_action_1
self.pre_action_1 = np.array([action_torcs['steer'],
action_torcs['accel'],
action_torcs['brake']])
# One-Step Dynamics Update #################################
# Apply the Agent's action into torcs
client.respond_to_server()
# Get the response of TORCS
client.get_servers_input()
client.get_servers_input_tcp()
# Get the current full-observation from torcs
obs = client.S.d
# Make an obsevation from a raw observation vector from TORCS
self.observation = self.make_observation(obs)
# Reward setting Here #######################################
# direction-dependent positive reward
track = np.array(obs['track'])
trackPos = np.array(obs['trackPos'])
sp = np.array(obs['speedX'])
damage = np.array(obs['damage'])
rpm = np.array(obs['rpm'])
reward = 0
# collision detection
if obs['damage'] - obs_pre['damage'] > 0:
print("collide")
reward = -200
episode_terminate = True
client.R.d['meta'] = True
# Episode is terminated if the car is out of track
if (abs(track.any()) > 1 or abs(trackPos) > 1):
print("Out of track")
reward = -200
episode_terminate = True
client.R.d['meta'] = True
# Episode is terminated if the agent stuck for a long time
progress = sp * np.cos(obs['angle'])
if self.terminal_judge_start < self.time_step:
if progress < self.termination_limit_progress:
self.stuck_cnt += 1
if self.stuck_cnt > self.termination_limit_stuck_cnt:
print("No progress")
reward = -200
episode_terminate = True
client.R.d['meta'] = True
else:
self.stuck_cnt = 0
# Episode is terminated if the agent runs backward
if np.cos(obs['angle']) < 0:
print("Run backward")
reward = -200
episode_terminate = True
client.R.d['meta'] = True
if client.R.d['meta'] is True: # Send a reset signal
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d['meta'], {}
def reset(self, relaunch=False):
#print("Reset")
self.time_step = 0
self.stuck_cnt = 0
if self.initial_reset is not True:
self.client.R.d['meta'] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS every episode suffers the memory leak bug!
if relaunch is True:
self.reset_torcs()
print("### TORCS is RELAUNCHED ###")
# Modify here if you use multiple tracks in the environment
self.client = snakeoil.Client(p=3001) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
client.get_servers_input_tcp()
obs = client.S.d # Get the current full-observation from torcs
self.observation = self.make_observation(obs)
self.initial_reset = False
return self.get_obs()
def end(self):
os.system('pkill torcs')
def get_obs(self):
return self.observation
def reset_torcs(self):
#print("relaunch torcs")
os.system('pkill torcs')
time.sleep(0.5)
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
def agent_to_torcs(self, u):
torcs_action = {'steer': u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({'accel': u[1]})
torcs_action.update({'brake': u[2]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({'gear': int(u[3])})
return torcs_action
def make_observation(self, raw_obs):
names = ['focus', 'speedX', 'speedY', 'speedZ', 'angle', 'damage',
'opponents', 'rpm', 'track', 'trackPos', 'wheelSpinVel',
'img', 'pre_action_0', 'pre_action_1', 'distFromStart']
Observation = col.namedtuple('Observaion', names)
return Observation(focus = np.array(raw_obs['focus'], dtype =
np.float32) / 200.,
speedX = np.array(raw_obs['speedX'],
dtype=np.float32) / 200.,
speedY = np.array(raw_obs['speedY'], dtype =
np.float32) / 200.,
speedZ = np.array(raw_obs['speedZ'],
dtype=np.float32) / 200.,
angle = np.array(raw_obs['angle'], dtype =
np.float32) / 3.1416,
damage = np.array(raw_obs['damage'] / 200., dtype = np.float32),
opponents = np.array(raw_obs['opponents'],
dtype = np.float32) / 200.,
rpm = np.array(raw_obs['rpm'], dtype =
np.float32) / 1000,
track = np.array(raw_obs['track'], dtype =
np.float32) / 200.,
trackPos = np.array(raw_obs['trackPos'], dtype=np.float32),
wheelSpinVel = np.array(raw_obs['wheelSpinVel'],
dtype=np.float32) / 200.,
img=raw_obs['img'],
pre_action_0 = self.pre_action_0,
pre_action_1 = self.pre_action_1,
distFromStart = raw_obs['distFromStart'])
|
{
"content_hash": "c51e7ca99555b9ca9683f4413db1e298",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 105,
"avg_line_length": 38.07905138339921,
"alnum_prop": 0.506746937928171,
"repo_name": "YunzhuLi/InfoGAIL",
"id": "b0641ccb6a0883465a8e8535206f719d0d618b46",
"size": "9634",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wgail_info_0/gym_torcs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "449392"
},
{
"name": "C",
"bytes": "1222309"
},
{
"name": "C++",
"bytes": "3766464"
},
{
"name": "CSS",
"bytes": "2093"
},
{
"name": "HTML",
"bytes": "731905"
},
{
"name": "JavaScript",
"bytes": "360"
},
{
"name": "M",
"bytes": "831"
},
{
"name": "M4",
"bytes": "5404"
},
{
"name": "Makefile",
"bytes": "383529"
},
{
"name": "Python",
"bytes": "155368"
},
{
"name": "RobotFramework",
"bytes": "23937"
},
{
"name": "Roff",
"bytes": "302175"
},
{
"name": "Shell",
"bytes": "59343"
},
{
"name": "TeX",
"bytes": "156855"
},
{
"name": "XSLT",
"bytes": "27681"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, unicode_literals
__all__ = ["corner", "hist2d"]
__version__ = "0.2.0"
__author__ = "Dan Foreman-Mackey (danfm@nyu.edu)"
__copyright__ = "Copyright 2013 Daniel Foreman-Mackey"
__contributors__ = [
# Alphabetical by first name.
"Adrian Price-Whelan @adrn",
"Brendon Brewer @eggplantbren",
"Ekta Patel @ekta1224",
"Emily Rice @emilurice",
"Geoff Ryan @geoffryan",
"Guillaume @ceyzeriat",
"Kelle Cruz @kelle",
"Kyle Barbary @kbarbary",
"Marco Tazzari @mtazzari",
"Phil Marshall @drphilmarshall",
"Pierre Gratier @pirg",
"Stephan Hoyer @shoyer",
"Will Vousden @willvousden",
"Wolfgang Kerzendorf @wkerzendorf",
]
import logging
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False,
maxlike=None, maxpost=None, maxlike_color="r", maxpost_color="g",
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles.
(default: `.2f`)
title_args : dict (optional)
Any extra keyword arguments to send to the `add_title` command.
extents : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds (extents) or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
plot_datapoints : bool (optional)
Draw the individual data points.
max_n_ticks: int (optional)
maximum number of ticks to try to use
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim), tight_layout=False)
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
fig.set_tight_layout(False)
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(zip(b[:-1], b[1:])).flatten()
y0 = np.array(zip(n, n)).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None:
ax.axvline(truths[i], color=truth_color, ls='-', lw=2)
if maxlike is not None:
ax.axvline(maxlike[i], color=maxlike_color, ls='-.', lw=2)
if maxpost is not None:
ax.axvline(maxpost[i], color=maxpost_color, ls=':', lw=2)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls='--', color=color, lw=1.5)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_05, q_50, q_95 = quantile(x, [0.05, 0.50, 0.95], weights=weights)
q_m, q_p = q_50-q_05, q_95-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
# Add the title to the axis.
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_visible(False)
ax.set_frame_on(False)
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, **hist2d_kwargs)
if truths is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color, ms=8)
ax.axvline(truths[j], color=truth_color, lw=2)
ax.axhline(truths[i], color=truth_color, lw=2)
if maxlike is not None:
ax.plot(maxlike[j], maxlike[i], "o", color=maxlike_color, ms=8)
ax.axvline(maxlike[j], color=maxlike_color, ls='-.', lw=2)
ax.axhline(maxlike[i], color=maxlike_color, ls='-.', lw=2)
if maxpost is not None:
ax.plot(maxpost[j], maxpost[i], "d", color=maxpost_color, ms=8)
ax.axvline(maxpost[j], color=maxpost_color, ls=':', lw=2)
ax.axhline(maxpost[i], color=maxpost_color, ls=':', lw=2)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
return fig
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 3.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [rgba_color] + [list(rgba_color) for l in levels]
for i, l in enumerate(levels):
contour_cmap[i+1][-1] *= float(len(levels) - i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=range, weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if plot_contours or plot_density:
ax.contourf(X2, Y2, H2.T, [V[-1], H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[H.max()], V, [0]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
|
{
"content_hash": "745b79c35fbe0942b2d091b081e371a3",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 79,
"avg_line_length": 36.21980198019802,
"alnum_prop": 0.549559892843475,
"repo_name": "PBGraff/SwiftGRB_PEanalysis",
"id": "02ed13d4f537e719ae07cfc173a95a5b16be13e3",
"size": "18316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "triangle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "122073"
},
{
"name": "C++",
"bytes": "89724"
},
{
"name": "Makefile",
"bytes": "4635"
},
{
"name": "Python",
"bytes": "33927"
}
],
"symlink_target": ""
}
|
import logging
import uuid
import ldap
import six
# Django
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.conf import settings as django_settings
from django.core.signals import setting_changed
# django-auth-ldap
from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings
from django_auth_ldap.backend import LDAPBackend as BaseLDAPBackend
from django_auth_ldap.backend import populate_user
from django.core.exceptions import ImproperlyConfigured
# radiusauth
from radiusauth.backends import RADIUSBackend as BaseRADIUSBackend
# tacacs+ auth
import tacacs_plus
# social
from social_core.backends.saml import OID_USERID
from social_core.backends.saml import SAMLAuth as BaseSAMLAuth
from social_core.backends.saml import SAMLIdentityProvider as BaseSAMLIdentityProvider
# Ansible Tower
from awx.conf.license import feature_enabled
from awx.sso.models import UserEnterpriseAuth
logger = logging.getLogger('awx.sso.backends')
class LDAPSettings(BaseLDAPSettings):
defaults = dict(BaseLDAPSettings.defaults.items() + {
'ORGANIZATION_MAP': {},
'TEAM_MAP': {},
'GROUP_TYPE_PARAMS': {},
}.items())
def __init__(self, prefix='AUTH_LDAP_', defaults={}):
super(LDAPSettings, self).__init__(prefix, defaults)
# If a DB-backed setting is specified that wipes out the
# OPT_NETWORK_TIMEOUT, fall back to a sane default
if ldap.OPT_NETWORK_TIMEOUT not in getattr(self, 'CONNECTION_OPTIONS', {}):
options = getattr(self, 'CONNECTION_OPTIONS', {})
options[ldap.OPT_NETWORK_TIMEOUT] = 30
self.CONNECTION_OPTIONS = options
class LDAPBackend(BaseLDAPBackend):
'''
Custom LDAP backend for AWX.
'''
settings_prefix = 'AUTH_LDAP_'
def __init__(self, *args, **kwargs):
self._dispatch_uid = uuid.uuid4()
super(LDAPBackend, self).__init__(*args, **kwargs)
setting_changed.connect(self._on_setting_changed, dispatch_uid=self._dispatch_uid)
def __del__(self):
setting_changed.disconnect(dispatch_uid=self._dispatch_uid)
def _on_setting_changed(self, sender, **kwargs):
# If any AUTH_LDAP_* setting changes, force settings to be reloaded for
# this backend instance.
if kwargs.get('setting', '').startswith(self.settings_prefix):
self._settings = None
def _get_settings(self):
if self._settings is None:
self._settings = LDAPSettings(self.settings_prefix)
return self._settings
def _set_settings(self, settings):
self._settings = settings
settings = property(_get_settings, _set_settings)
def authenticate(self, username, password):
if self.settings.START_TLS and ldap.OPT_X_TLS_REQUIRE_CERT in self.settings.CONNECTION_OPTIONS:
# with python-ldap, if you want to set connection-specific TLS
# parameters, you must also specify OPT_X_TLS_NEWCTX = 0
# see: https://stackoverflow.com/a/29722445
# see: https://stackoverflow.com/a/38136255
self.settings.CONNECTION_OPTIONS[ldap.OPT_X_TLS_NEWCTX] = 0
if not self.settings.SERVER_URI:
return None
if not feature_enabled('ldap'):
logger.error("Unable to authenticate, license does not support LDAP authentication")
return None
try:
user = User.objects.get(username=username)
if user and (not user.profile or not user.profile.ldap_dn):
return None
except User.DoesNotExist:
pass
try:
for setting_name, type_ in [
('GROUP_SEARCH', 'LDAPSearch'),
('GROUP_TYPE', 'LDAPGroupType'),
]:
if getattr(self.settings, setting_name) is None:
raise ImproperlyConfigured(
"{} must be an {} instance.".format(setting_name, type_)
)
return super(LDAPBackend, self).authenticate(username, password)
except Exception:
logger.exception("Encountered an error authenticating to LDAP")
return None
def get_user(self, user_id):
if not self.settings.SERVER_URI:
return None
if not feature_enabled('ldap'):
logger.error("Unable to get_user, license does not support LDAP authentication")
return None
return super(LDAPBackend, self).get_user(user_id)
# Disable any LDAP based authorization / permissions checking.
def has_perm(self, user, perm, obj=None):
return False
def has_module_perms(self, user, app_label):
return False
def get_all_permissions(self, user, obj=None):
return set()
def get_group_permissions(self, user, obj=None):
return set()
class LDAPBackend1(LDAPBackend):
settings_prefix = 'AUTH_LDAP_1_'
class LDAPBackend2(LDAPBackend):
settings_prefix = 'AUTH_LDAP_2_'
class LDAPBackend3(LDAPBackend):
settings_prefix = 'AUTH_LDAP_3_'
class LDAPBackend4(LDAPBackend):
settings_prefix = 'AUTH_LDAP_4_'
class LDAPBackend5(LDAPBackend):
settings_prefix = 'AUTH_LDAP_5_'
def _decorate_enterprise_user(user, provider):
user.set_unusable_password()
user.save()
enterprise_auth, _ = UserEnterpriseAuth.objects.get_or_create(user=user, provider=provider)
return enterprise_auth
def _get_or_set_enterprise_user(username, password, provider):
created = False
try:
user = User.objects.all().prefetch_related('enterprise_auth').get(username=username)
except User.DoesNotExist:
user = User(username=username)
enterprise_auth = _decorate_enterprise_user(user, provider)
logger.debug("Created enterprise user %s via %s backend." %
(username, enterprise_auth.get_provider_display()))
created = True
if created or user.is_in_enterprise_category(provider):
return user
logger.warn("Enterprise user %s already defined in Tower." % username)
class RADIUSBackend(BaseRADIUSBackend):
'''
Custom Radius backend to verify license status
'''
def authenticate(self, username, password):
if not django_settings.RADIUS_SERVER:
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to authenticate, license does not support RADIUS authentication")
return None
return super(RADIUSBackend, self).authenticate(username, password)
def get_user(self, user_id):
if not django_settings.RADIUS_SERVER:
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to get_user, license does not support RADIUS authentication")
return None
user = super(RADIUSBackend, self).get_user(user_id)
if not user.has_usable_password():
return user
def get_django_user(self, username, password=None):
return _get_or_set_enterprise_user(username, password, 'radius')
class TACACSPlusBackend(object):
'''
Custom TACACS+ auth backend for AWX
'''
def authenticate(self, username, password):
if not django_settings.TACACSPLUS_HOST:
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to authenticate, license does not support TACACS+ authentication")
return None
try:
# Upstream TACACS+ client does not accept non-string, so convert if needed.
auth = tacacs_plus.TACACSClient(
django_settings.TACACSPLUS_HOST.encode('utf-8'),
django_settings.TACACSPLUS_PORT,
django_settings.TACACSPLUS_SECRET.encode('utf-8'),
timeout=django_settings.TACACSPLUS_SESSION_TIMEOUT,
).authenticate(
username.encode('utf-8'), password.encode('utf-8'),
authen_type=tacacs_plus.TAC_PLUS_AUTHEN_TYPES[django_settings.TACACSPLUS_AUTH_PROTOCOL],
)
except Exception as e:
logger.exception("TACACS+ Authentication Error: %s" % (e.message,))
return None
if auth.valid:
return _get_or_set_enterprise_user(username, password, 'tacacs+')
def get_user(self, user_id):
if not django_settings.TACACSPLUS_HOST:
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to get user, license does not support TACACS+ authentication")
return None
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class TowerSAMLIdentityProvider(BaseSAMLIdentityProvider):
'''
Custom Identity Provider to make attributes to what we expect.
'''
def get_user_permanent_id(self, attributes):
uid = attributes[self.conf.get('attr_user_permanent_id', OID_USERID)]
if isinstance(uid, six.string_types):
return uid
return uid[0]
def get_attr(self, attributes, conf_key, default_attribute):
"""
Get the attribute 'default_attribute' out of the attributes,
unless self.conf[conf_key] overrides the default by specifying
another attribute to use.
"""
key = self.conf.get(conf_key, default_attribute)
value = attributes[key] if key in attributes else None
# In certain implementations (like https://pagure.io/ipsilon) this value is a string, not a list
if isinstance(value, (list, tuple)):
value = value[0]
if conf_key in ('attr_first_name', 'attr_last_name', 'attr_username', 'attr_email') and value is None:
logger.warn("Could not map user detail '%s' from SAML attribute '%s'; "
"update SOCIAL_AUTH_SAML_ENABLED_IDPS['%s']['%s'] with the correct SAML attribute.",
conf_key[5:], key, self.name, conf_key)
return six.text_type(value) if value is not None else value
class SAMLAuth(BaseSAMLAuth):
'''
Custom SAMLAuth backend to verify license status
'''
def get_idp(self, idp_name):
idp_config = self.setting('ENABLED_IDPS')[idp_name]
return TowerSAMLIdentityProvider(idp_name, **idp_config)
def authenticate(self, *args, **kwargs):
if not all([django_settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID, django_settings.SOCIAL_AUTH_SAML_SP_PUBLIC_CERT,
django_settings.SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, django_settings.SOCIAL_AUTH_SAML_ORG_INFO,
django_settings.SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, django_settings.SOCIAL_AUTH_SAML_SUPPORT_CONTACT,
django_settings.SOCIAL_AUTH_SAML_ENABLED_IDPS]):
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to authenticate, license does not support SAML authentication")
return None
user = super(SAMLAuth, self).authenticate(*args, **kwargs)
# Comes from https://github.com/omab/python-social-auth/blob/v0.2.21/social/backends/base.py#L91
if getattr(user, 'is_new', False):
_decorate_enterprise_user(user, 'saml')
elif user and not user.is_in_enterprise_category('saml'):
return None
return user
def get_user(self, user_id):
if not all([django_settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID, django_settings.SOCIAL_AUTH_SAML_SP_PUBLIC_CERT,
django_settings.SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, django_settings.SOCIAL_AUTH_SAML_ORG_INFO,
django_settings.SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, django_settings.SOCIAL_AUTH_SAML_SUPPORT_CONTACT,
django_settings.SOCIAL_AUTH_SAML_ENABLED_IDPS]):
return None
if not feature_enabled('enterprise_auth'):
logger.error("Unable to get_user, license does not support SAML authentication")
return None
return super(SAMLAuth, self).get_user(user_id)
def _update_m2m_from_groups(user, ldap_user, rel, opts, remove=True):
'''
Hepler function to update m2m relationship based on LDAP group membership.
'''
should_add = False
if opts is None:
return
elif not opts:
pass
elif opts is True:
should_add = True
else:
if isinstance(opts, six.string_types):
opts = [opts]
for group_dn in opts:
if not isinstance(group_dn, six.string_types):
continue
if ldap_user._get_groups().is_member_of(group_dn):
should_add = True
if should_add:
rel.add(user)
elif remove and user in rel.all():
rel.remove(user)
@receiver(populate_user, dispatch_uid='populate-ldap-user')
def on_populate_user(sender, **kwargs):
'''
Handle signal from LDAP backend to populate the user object. Update user
organization/team memberships according to their LDAP groups.
'''
from awx.main.models import Organization, Team
user = kwargs['user']
ldap_user = kwargs['ldap_user']
backend = ldap_user.backend
# Prefetch user's groups to prevent LDAP queries for each org/team when
# checking membership.
ldap_user._get_groups().get_group_dns()
# If the LDAP user has a first or last name > $maxlen chars, truncate it
for field in ('first_name', 'last_name'):
max_len = User._meta.get_field(field).max_length
field_len = len(getattr(user, field))
if field_len > max_len:
setattr(user, field, getattr(user, field)[:max_len])
logger.warn(six.text_type(
'LDAP user {} has {} > max {} characters'
).format(user.username, field, max_len))
# Update organization membership based on group memberships.
org_map = getattr(backend.settings, 'ORGANIZATION_MAP', {})
for org_name, org_opts in org_map.items():
org, created = Organization.objects.get_or_create(name=org_name)
remove = bool(org_opts.get('remove', True))
admins_opts = org_opts.get('admins', None)
remove_admins = bool(org_opts.get('remove_admins', remove))
_update_m2m_from_groups(user, ldap_user, org.admin_role.members, admins_opts,
remove_admins)
users_opts = org_opts.get('users', None)
remove_users = bool(org_opts.get('remove_users', remove))
_update_m2m_from_groups(user, ldap_user, org.member_role.members, users_opts,
remove_users)
# Update team membership based on group memberships.
team_map = getattr(backend.settings, 'TEAM_MAP', {})
for team_name, team_opts in team_map.items():
if 'organization' not in team_opts:
continue
org, created = Organization.objects.get_or_create(name=team_opts['organization'])
team, created = Team.objects.get_or_create(name=team_name, organization=org)
users_opts = team_opts.get('users', None)
remove = bool(team_opts.get('remove', True))
_update_m2m_from_groups(user, ldap_user, team.member_role.members, users_opts,
remove)
# Update user profile to store LDAP DN.
profile = user.profile
if profile.ldap_dn != ldap_user.dn:
profile.ldap_dn = ldap_user.dn
profile.save()
|
{
"content_hash": "615bc113ce96ae9a00016335de095a91",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 121,
"avg_line_length": 38.6,
"alnum_prop": 0.6397668393782383,
"repo_name": "wwitzel3/awx",
"id": "7b4bcfbafc903f398fe749127fe2a5c006ecfed2",
"size": "15508",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "awx/sso/backends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303046"
},
{
"name": "Dockerfile",
"bytes": "5713"
},
{
"name": "HTML",
"bytes": "496559"
},
{
"name": "JavaScript",
"bytes": "3513112"
},
{
"name": "Makefile",
"bytes": "21133"
},
{
"name": "PowerShell",
"bytes": "10176"
},
{
"name": "Python",
"bytes": "3904288"
},
{
"name": "Shell",
"bytes": "13833"
}
],
"symlink_target": ""
}
|
# the fact that the old ID was still used in the Terms od, on reloading we now fail
# __setter__ validation on value for non generated values needs to happen
# deal with how to initiailize a header (ie remove the of dependence?) maybe deferred relationshiph resolution could solve this?
# pass tvpairstores to tvpair so that we can walk all the way back up the chain if needs be
# nlx_qual_ is not getting split correctly to nlx_qual:
"""
Usage:
obo_io.py <obofile>
obo_io.py --ttl <obofile> [<ttlfile>]
obo_io.py --help
Options:
-h --help show this
-t --ttl convert obo file to ttl and exit
obo_io.py
python .obo file parser and writer for the obo 1.2 spec defined at
https://oboformat.googlecode.com/svn/trunk/doc/GO.format.obo-1_2.html
acts as a command line script or as a python module
can also output to ttl format but conversion is currently ill defined
ALWAYS MANUALLY CHECK YOUR OUTPUT THIS SUCKER IS FLAKY
"""
__title__ = 'obo_io'
__author__ = 'Tom Gillespie'
import os
import inspect
from datetime import datetime
from getpass import getuser
from collections import OrderedDict as od
from docopt import docopt
from IPython import embed
N = -1 # use to define 'many ' for tag counts
TW = 4 # tab width
od.__repr__ = dict.__repr__
# this is our current (horrible) conversion from obo to ttl
obo_tag_to_ttl = {
'id':'%s rdf:type owl:Class ;\n',
'name':' ' * TW + 'rdfs:label "%s"@en ;\n',
'def':' ' * TW + 'nsu:definition "%s"@en ;\n',
'acronym':' ' * TW + 'nsu:acronym "%s"@en ;\n',
'synonym':' ' * TW + 'nsu:synonym "%s"@en ;\n',
'is_a':' ' * TW + 'rdfs:subClassOf %s ;\n',
#'xref':
}
def id_fix(value):
""" fix @prefix values for ttl """
if value.startswith('KSC_M'):
pass
else:
value = value.replace(':','_')
if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'):
value = 'obo:' + value
elif value.startswith('birnlex') or value.startswith('nlx'):
value = 'nifstd:' + value
elif value.startswith('MESH'):
value = ':'.join(value.split('_'))
else:
value = ':' + value
return value
class OboFile:
""" Python representation of the obo file structure split into tag-value
pair stanzas the header is currently its own special stanza.
type_def = ('<header>','<stanza>')
Usage: To load an obo file from somwhere on disk initialize an OboFile
instance with the full path to the obo file you want to load.
To write an obofile call obofileinstance.write(). If the filename for
the obofile exists it will not overwrite what you have but will append
a number to the end.
To output to ttl format call str_to_write = obofileinstance.__ttl__()
and then write str_to_write to file. TODO implement .writettl()
"""
def __init__(self, filename=None, header=None, terms=None, typedefs=None, instances=None):
self.filename = filename
self.Terms = od()
self.Terms.names = {}
self.Typedefs = od()
self.Typedefs.names = {}
self.Instances = od()
self.Instances.names = {}
self.Headers = od() #LOL STUPID FIXME
self.Headers.names = {} # FIXME do not want? what about imports?
if filename is not None: # FIXME could spec filename here?
#od_types = {type_.__name__:type_od for type_,type_od in zip((Term, Typedef, Instance),(self.Terms,self.Typedefs,self.Instances))}
#LOL GETATTR
with open(filename, 'rt') as f:
data = f.read()
#deal with \<newline> escape
data = data.replace(' \n','\n') # FXIME need for arbitrary whitespace
data = data.replace('\<newline>\n',' ')
# TODO remove \n!.+\n
sections = data.split('\n[')
header_block = sections[0]
self.header = Header(header_block, self)
stanzas = sections[1:]
for block in stanzas:
block_type, block = block.split(']\n',1)
type_ = stanza_types[block_type]
#odt = od_type[block_type]
t = type_(block, self) # FIXME :/
self.add_tvpair_store(t)
elif header is not None:
self.header = header
self.Terms = terms # TODO this should take iters not ods
self.Typedefs = typedefs
self.Instances = instances
elif header is None:
self.header = None
def add_tvpair_store(self, tvpair_store):
# TODO resolve terms
#add store to od
#add store to od.__dict__
#add store to od.names
tvpair_store.append_to_obofile(self)
def write(self, filename, type_='obo'): #FIXME this is bugged
""" Write file, will not overwrite files with the same name
outputs to obo by default but can also output to ttl if
passed type_='ttl' when called.
"""
if os.path.exists(filename):
name, ext = filename.rsplit('.',1)
try:
prefix, num = name.rsplit('_',1)
n = int(num)
n += 1
filename = prefix + '_' + str(n) + '.' + ext
except ValueError:
filename = name + '_1.' + ext
print('file exists, renaming to %s' % filename)
self.write(filename)
else:
with open(filename, 'wt', encoding='utf-8') as f:
if type_ == 'obo':
f.write(str(self)) # FIXME this is incredibly slow for big files :/
elif type_ == 'ttl':
f.write(self.__ttl__())
else:
raise TypeError('No exporter for file type %s!' % type_)
def __ttl__(self):
#stores = [self.header.__ttl__()]
stores = []
stores += [s.__ttl__() for s in self.Terms.values()]
stores += [s.__ttl__() for s in self.Typedefs.values()]
stores += [s.__ttl__() for s in self.Instances.values()]
return '\n'.join(stores)
def __str__(self):
stores = [str(self.header)]
stores += [str(s) for s in self.Terms.values()]
stores += [str(s) for s in self.Typedefs.values()]
stores += [str(s) for s in self.Instances.values()]
return '\n'.join(stores) + '\n'
def __repr__(self):
s = 'OboFile instance with %s Terms, %s Typedefs, and %s Instances' % (
len(self.Terms), len(self.Typedefs), len(self.Instances))
return s
class TVPair: #TODO these need to be parented to something!
""" Python representation of obo tag-value pairs, all tag-value pairs that
require specially structured values are implemented below in the
special children section.
_type_ = '<tag-value pair>'
_type_def = ('<tag>', '<value>', '{<trailing modifiers>}', '<comment>')
"""
_reserved_ids = ('OBO:TYPE','OBO:TERM','OBO:TERM_OR_TYPE','OBO:INSTANCE')
_escapes = {
'\\n':'\n',
'\W':' ',
'\\t':'\t',
'\:':':',
'\,':',',
'\\"':'"',
'\\\\':'\\',
'\(':'(',
'\)':')',
'\[':'[',
'\]':']',
'\{':'{',
'\}':'}',
}
def __init__(self, line=None, tag=None, value=None, modifiers=None, comment=None, parent=None, type_od=None, **kwargs): # TODO kwargs for specific tags
self.parent = parent
self.type_od = type_od
if line is not None:
self.parse(line)
#print(self)
self.validate(warn=True)
else:
self.make(tag, value, modifiers, comment, **kwargs)
self.validate()
@staticmethod
def factory(tag, value=None, modifiers=None, comment=None, dict_=None, parent=None, type_od=None, **kwargs):
tvp = TVPair(tag=tag, value=value, modifiers=comment, comment=comment, parent=None, type_od=type_od, **kwargs)
if dict_:
dict_[TVPair.esc_(tag)] = tvp
else:
return tvp
def validate(self, warn=False): # TODO
if self.tag == 'id':
if self.value in self._reserved_ids:
raise AttributeError('You may not use reserved term %s as an id.' % self.value)
# TODO validate kwargs
#
#warn if we are loading an ontology and there is an error but don't fail
#id
#name
#def
#synonym
if not warn:
#print('PLS IMPLMENT ME! ;_;')
pass # TODO
def _value(self):
return self.value
@property
def __value(self):
return self._value()
def parse(self, line):
# we will handle extra parse values by sticking them on the tvpair instance
try:
tag, value = line.split(':',1)
self.tag = tag
value.strip()
comm_split = value.split('\!')
try:
# comment
tail, comment = comm_split[-1].split('!',1)
comment = comment.strip()
comm_split[-1] = tail
value = '\!'.join(comm_split)
except ValueError:
comment = None
value = value.strip()
# DEAL WITH TRAILING MODIFIERS
trailing_modifiers = None
if tag in special_children:
self._value = special_children[tag].parse(value, self)
self.value = self.__value
if type(self.value) == DynamicValue:
self._comment = self._value.target.name.value # LOL
self.comment = self.__comment
else:
self.comment = comment
else:
self.value = value
self.comment = comment
except BaseException as e:
embed()
raise
self.tag = tag
self.trailing_modifiers = trailing_modifiers
def _comment(self):
return self.comment
@property
def __comment(self):
return self._comment()
def make(self, tag, value=None, modifiers=None, comment=None, **kwargs):
self.tag = tag
self.trailing_modifiers = modifiers
self.comment = comment
if tag in special_children:
kwargs['tvpair'] = self
self._value = special_children[tag](**kwargs)
self.value = self.__value
if type(self.value) == DynamicValue:
self._comment = self._value.target.name.value # LOL
self.comment = self.__comment
else:
self.value = value
def __eq__(self, other):
if type(self) == type(other):
if self.value == other.value:
return True
else:
return False
else:
return False
def __ne__(self, other):
return not other == self
def __str__(self):
string = '{}: {}'.format(self.tag, self._value())
if self.trailing_modifiers:
string += " " + str(self.trailing_modifiers)
if self.comment:
# TODO: autofill is_a comments
string += " ! " + self._comment()
return string
def __ttl__(self):
if self.tag in obo_tag_to_ttl:
if self.tag == 'id':
value = id_fix(self.value)
elif self.tag == 'def':
value = self._value.text.replace('"','\\"')
elif self.tag == 'synonym':
value = self._value.text.lower()
elif self.tag == 'is_a':
if type(self._value.target) == str: # we dangling
value = self._value.target_id
else:
value = id_fix(self._value.target.id_.value)
elif self.tag == 'name':
value = self.value.lower() # capitalize only proper nouns as needed
else:
value = self.value
return obo_tag_to_ttl[self.tag] % value
else:
return ''
def __repr__(self):
return str(self)
@staticmethod
def esc(string):
for f, r in TVPair._escapes:
string = string.replace(f, r)
return string
@staticmethod
def esc_(string):
""" fix strings for use as names in classes """
if string == 'id': # dont clobber id
return 'id_'
elif string == 'def': # avoid syntax errors
return 'def_'
return string.replace('-','_').replace(':','')
class TVPairStore:
"""
Ancestor class for stanzas and headers.
"""
def __new__(cls, *args, **kwargs):
cls._tags = od()
for tag, limit in cls._all_tags:
cls._tags[tag] = limit
cls.__new__ = cls.___new__ # enforce runonce
return super().__new__(cls)
@classmethod
def ___new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, block=None, obofile=None, tvpairs=None):
# keep _tags out of self.__dict__ and add new tags for all instances
if obofile is not None:
type_od = getattr(obofile, self.__class__.__name__+'s')
else:
type_od = None
#raise TypeError('TVPairStores need an OboFile, even if it is a fake one.') # FIXME just don't check stuff instead?
for tag, limit in self._tags.items():
if limit == N:
self.__dict__[TVPair.esc_(tag)] = [] # may need a list
if block is not None:
lines = block.split('\n')
for line in lines:
if line:
if line[0] != '!': # we do not parse comments
tvpair = TVPair(line, parent=self, type_od=type_od)
self.add_tvpair(tvpair)
warn = True
else:
for tvpair in tvpairs: # FIXME, sorta need a way to get the type_od to them more naturally?
self.add_tvpair(tvpair)
warn = False
#clean up empty tags
to_pop = []
for tag, value in self.__dict__.items():
if not value:
to_pop.append(tag)
for tag in to_pop:
self.__dict__.pop(tag)
self.validate(warn)
def append_to_obofile(self, obofile):
raise NotImplemented('Please implement me in your subclass!')
def add_tvpair(self, tvpair):
tag = tvpair.tag
dict_tag = TVPair.esc_(tag)
if tag not in self.__dict__:
if tag not in self._tags:
print('TAG NOT IN', tag)
self._tags[tag] = N
print(self._tags[tag])
self.__dict__[dict_tag] = []
elif self._tags[tag] == N:
self.__dict__[dict_tag] = []
if self._tags[tag] == N:
try:
self.__dict__[dict_tag].append(tvpair)
except KeyError:
embed()
raise
else:
self.__dict__[dict_tag] = tvpair
@property
def tvpairs(self):
return self._tvpairs()
def _tvpairs(self, source_dict=None):
index = tuple(self._tags)
def key_(tvpair):
out = index.index(tvpair.tag)
if self._tags[tvpair.tag] == N:
tosort = []
for tvp in self.__dict__[TVPair.esc_(tvpair.tag)]:
tosort.append(tvp._value())
sord = sorted(tosort, key=lambda a: a.lower()) # FIXME isn't quit right
out += sord.index(tvpair._value()) / (len(sord) + 1)
return out
tosort = []
if not source_dict:
source_dict = self.__dict__
for tvp in source_dict.values():
if type(tvp) == list:
tosort.extend(tvp)
elif type(tvp) == property:
embed()
else:
tosort.append(tvp)
return sorted(tosort, key=key_)
def __ttl__(self):
block = ''.join(tvpair.__ttl__() for tvpair in self.tvpairs)
return block.rstrip('\n').rstrip(';') + '.\n'
def __str__(self):
return '\n'.join(str(tvpair) for tvpair in self.tvpairs) + '\n'
def __repr__(self):
return ' '.join(str(tvpair) for tvpair in self.tvpairs) + ' '
def validate(self, warn=False):
tags = []
for tag, tvp in self.__dict__.items():
#print(tvp)
if tvp:
if type(tvp) == list:
tags.append(tvp[0].tag)
else:
try:
tags.append(tvp.tag)
except AttributeError:
embed()
raise
else:
raise AttributeError('Tag %s has no values!' % tag)
for tag in self._r_tags:
if tag not in tags:
if warn:
print('probably a multipart definition') # TODO
#raise ImportWarning('%s %s is missing a required tag %s' %
#(self.__class__.__name__, str(self), tag))
else:
raise AttributeError('%s must have a tag of type %s' %
(self.__class__.__name__, tag))
class Header(TVPairStore):
""" Header class. """
_r_tags = ('format-version', )
_r_defaults = ('1.2',)
_all_tags = (
('format-version', 1),
('data-version', 1),
('date', 1),
('saved-by', 1),
('auto-generated-by', 1),
('ontology', 1),
('import', N),
('subsetdef', N),
('synonymtypedef', N),
('idspace', N), # PREFIX http://uri
('id-mapping', N),
('default-relationship-id-previx', 1),
('default-namespace', 1),
('remark', N),
)
_datetime_fmt = '%d:%m:%Y %H:%M' # WE USE ZULU
def append_to_obofile(self, obofile):
obofile.header = self
def __str__(self):
""" When we write to file overwrite the relevant variables without
also overwriting the original data.
"""
updated = {k:v for k, v in self.__dict__.items()}
print(updated.keys())
TVPair.factory('date', datetime.strftime(datetime.utcnow(), self._datetime_fmt),dict_=updated)
TVPair.factory('auto-generated-by', __title__, dict_=updated)
TVPair.factory('saved-by', getuser(), dict_=updated)
tvpairs = self._tvpairs(updated)
return '\n'.join(str(tvpair) for tvpair in tvpairs) + '\n'
class Stanza(TVPairStore):
""" Stanza class.
_types = ('Term', 'Typedef', 'Instance')
"""
_type_ = '<stanza>'
_type_def = ('[<Stanza name>]','<tag-value pair>')
_r_tags = ['id', 'name',]
_all_tags = (
('id', 1),
('is_anonymous', 1),
('name',1),
('namespace', 1),
('alt_id', N),
('def', 1),
('comment', 1),
('subset', N),
('synonym', N),
('acronym', N), # i think it is just better to add this
('xref', N),
('instance_of', 1), ##
('domain', 1), #
('range', 1), #
('is_anti_symmetric', 1), #
('is_cyclic', 1), #
('is_reflexive', 1), #
('is_symmetric', 1), #
('is_transitive', 1), #
('is_a', N),
('inverse_of', 1), #
('transitive_over', N), #
('intersection_of', N), # no relationships, typedefs
('union_of', N), # min 2, no relationships, typedefs
('disjoint_from', N), # no relationships, typedefs
('relationship', N),
('property_value', N), ##
('is_obsolete', 1),
('replaced_by', N),
('consider', N),
('created_by', 1),
('creation_date', 1),
)
_typedef_only_tags = [
'domian',
'range',
'inverse_of',
'transitive_over',
'is_cyclic',
'is_reflexive',
'is_symmetric',
'is_anti_symmetric',
'is_transitive',
'is_metadata_tag',
]
def __new__(cls, *args, **kwargs):
cls._all_tags = [tag for tag in cls._all_tags if tag[0] not in cls._bad_tags]
instance = super().__new__(cls, *args, **kwargs)
cls.__new__ = super().__new__ # enforce runonce
return instance # we return here so we chain the runonce
def __init__(self, block=None, obofile=None, tvpairs=None):
if block is not None and obofile is not None:
super().__init__(block, obofile)
else:
super().__init__(tvpairs=tvpairs)
if obofile is not None:
self.append_to_obofile(obofile)
else:
#print('Please be sure to add this to the typd_od yourself!')
pass # TODO
def append_to_obofile(self, obofile):
type_od = getattr(obofile, self.__class__.__name__+'s')
callbacks = type_od.get(self.id_.value, None)
if type(callbacks) == list:
for callback in callbacks:
callback(self) # fill in is_a
type_od.pop(self.id_.value) # reset the order
elif type(callbacks) == type(self):
print(self.id_)
if set(self.__dict__) == set(callbacks.__dict__):
pass
else:
callbacks.__dict__.update(self.__dict__) # last one wins
#raise ValueError('IT WOULD SEEM WE ALREADY EXIST! PLS HALP') # TODO
type_od[self.id_.value] = self
type_od.__dict__[TVPair.esc_(self.id_.value)] = self
if self.name.value not in type_od.names: # add to names
type_od.names[self.name.value] = self
elif type(type_od.names[self.name.value]) == list:
type_od.names[self.name.value].append(self)
else:
existing = type_od.names.pop(self.name.value)
type_od.names[self.name.value] = [existing, self]
def __str__(self):
return '['+ self.__class__.__name__ +']\n' + super().__str__()
class Term(Stanza):
_bad_tags = ['instance_of']
def __new__(cls, *args, **kwargs):
cls._bad_tags += cls._typedef_only_tags
instance = super().__new__(cls, *args, **kwargs)
cls.__new__ = super().__new__
return instance
def dedupe_synonyms(self):
if getattr(self, 'synonym', None):
last_wins = {}
for s in self.synonym:
if type(s._value) == str:
print(s._value)
key = s.value
else:
key = s._value.text
last_wins[key] = s
self.synonym = sorted(list(last_wins.values()),key=lambda a:a.value)
class Typedef(Stanza):
_bad_tags = ('union_of', 'intersection_of', 'disjoint_from', 'instance_of')
class Instance(Stanza):
_r_tags = ['instance_of',]
def __new__(cls, *args, **kwargs):
cls._bad_tags += cls._typedef_only_tags
cls._r_tags = super()._r_tags + cls._r_tags
instance = super().__new__(cls, *args, **kwargs)
cls.__new__ = super().__new__
return instance
stanza_types = {type_.__name__:type_ for type_ in (Term, Typedef, Instance)}
###
# Special children
###
class Value:
tag = None
seps = ' ',
brackets = {'[':']', '{':'}', '(':')', '<':'>', '"':'"', ' ':' '}
brackets.update({v:k for k, v in brackets.items()})
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, value, *args):
self.value = value
def value(self):
raise NotImplemented('Impl in subclass pls.')
def __str__(self):
return str(self.value())
def __repr__(self):
return str(self.value())
def __call__(self):
return self.value()
@classmethod
def parse(cls, value, *args):
if type(value) == tuple: # make nice for super()
new_args = value
elif type(value) == str:
new_args = value,
#kwargs = {name:value for name, value in zip(cls.fields, split)}
#return cls.__new__(cls, **kwargs)
#instance = cls.__new__(cls, *new_args)
instance = cls(*new_args)
return instance
class DynamicValue(Value):
""" callbacks need to be isolated here for relationship, is_a and internal xrefs"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def get_target(self, tvpair):
def callback(target):
print(target, 'calling back',self)
self.target = target
self.target = 'DANGLING'
if tvpair.type_od is None: # TODO need a way to fill these in on add
self.target += ' ' + self.target_id
return
target = tvpair.type_od.get(self.target_id, None)
if type(target) == list:
tvpair.type_od[self.target_id].append(callback)
elif target is None:
tvpair.type_od[self.target_id] = [callback]
else: # its a Term or something
#print('map', test.id_, 'to', tag, value)
self.target = target
def value(self):
#for arg, sep in (self.args, self.seps): # TODO
#print(arg, sep)
return str(target)
class Is_a(DynamicValue):
tag = 'is_a'
seps = ' ',
def __init__(self, target_id, tvpair):
self.target_id = target_id
self.get_target(tvpair)
#print('yes i have a target id you lying sack of shit',self.target_id)
def value(self):
if type(self.target) == str:
return self.target
else:
return str(self.target.id_.value)
@classmethod
def parse(cls, value, tvpair):
target_id = value
split = (target_id, tvpair)
return super().parse(split)
class Relationship(DynamicValue):
tag = 'relationship'
seps = ' ', ' '
def __init__(self, typedef, target_id, tvpair):
self.typedef = typedef #FIXME this is also an issue
self.target_id = target_id
self.get_target(tvpair)
def value(self):
if type(self.target) == str:
return self.target
else:
return str(self.target.id_.value)
@classmethod
def parse(cls, value, tvpair):
typedef, target_id = value.split(' ')
split = (typedef, target_id, tvpair)
return super().parse(split)
class Def_(Value):
tag = 'def'
seps = '"', '['
def __init__(self, text, xrefs=[], **kwargs):
self.text = text
self.xrefs = xrefs
def value(self):
out = ''
out += self.seps[0]
out += self.text
out += self.seps[0]
out += ' '
out += self.seps[1]
out += ', '.join([str(xref) for xref in self.xrefs])
out += self.brackets[self.seps[1]]
return out
@classmethod
def parse(cls, value, tvpair):
try:
text, xrefs = value[1:-1].split('" [')
except ValueError:
raise ValueError('No xrefs found! Please add square brackets [] at the end of each def:') # FIXME?!?
xrefs = [Xref.parse(xref, tvpair) for xref in xrefs.split(',')]
split = (text, xrefs)
return super().parse(split)
class Id_mapping(Value):
tag = 'id-mapping'
seps = ' ', ' '
def __init__(self, id_, target, **kwargs):
self.id_ = id_
self.target = target
def value(self):
out = ''
out += self.id_
out += self.seps[0]
out += self.target
return out
@classmethod
def parse(cls, value, *args):
id_, target = value.split(' ')
split = (id_, target)
return super().parse(split)
class Idspace(Value):
tag = 'idspace'
seps = ' ', ' ', '"'
def __init__(self, name, uri, desc=None, **kwargs):
self.name = name
self.uri = uri
self.desc = desc
def value(self):
out = ''
out += self.name
out += self.seps[0]
out += self.uri
if self.desc:
out += self.seps[1]
out += self.seps[2]
out += self.desc
out += self.seps[2]
return out
@classmethod
def parse(cls, value, *args):
name, uri_description = value.split(' ', 1)
uri, description = uri_description.split(' "')
description = description[:-1]
split = (name, uri, description)
return super().parse(split)
class Property_value(Value):
tag = 'property_value'
seps = ' ', ' ', ' '
def __init__(self, type_id, val, datatype=None, **kwargs):
self.type_id = type_id
self.val = val
self.datatype = datatype
def value(self):
s = self.seps[0]
out = ''
out += self.type_id + s + self.val
if self.datatype:
out += s + self.datatype
return out
@classmethod
def parse(cls, value, *args):
type_id, val_datatype = value.split(' ', 1)
try:
val, datatype = val_datatype.split(' ', 1)
except ValueError:
val = val_datatype
datatype = None
split = (type_id, val, datatype)
return super().parse(split)
class Subsetdef(Value):
tag = 'subsetdef'
seps = ' ', '"'
filed = 'name', 'desc'
def __init__(self, name, desc, **kwargs):
self.name = name
self.desc = desc
def value(self):
return self.name + self.seps[0] + self.seps[1] + self.desc + self.seps[1]
@classmethod
def parse(cls, value, *args):
name, description = value.split(' "', 1)
description = description[:-1]
split = (name, description)
return super().parse(split)
class Synonym(Value):
tag = 'synonym'
seps = '"', ' ', ' ', '['
def __init__(self, text, scope=None, typedef=None, xrefs=[], **kwargs):
self.text = text
self.scope = scope # FIXME scope defaults
self.typedef = typedef
self.xrefs = xrefs
def value(self):
out = ''
out += self.seps[0]
out += self.text
out += self.seps[0]
if self.scope:
out += self.seps[1]
out += self.scope
if self.typedef:
out += self.seps[2]
out += self.typedef
out += ' '
out += self.seps[3]
out += ', '.join([str(xref) for xref in self.xrefs])
out += self.brackets[self.seps[3]]
return out
@classmethod
def parse(cls, value, tvpair):
try:
text, scope_typedef_xrefs = value[1:-1].split('" ', 1)
except ValueError:
raise ValueError('Malformed synonym: line you are probably missing xrefs.') # FIXME?!?
try:
if scope_typedef_xrefs.startswith('['): # single space no scopedef typeref
scope_typedef, xrefs = scope_typedef_xrefs.split('[', 1)
else:
scope_typedef, xrefs = scope_typedef_xrefs.split(' [', 1)
except ValueError:
raise ValueError('No xrefs found! Please add square brackets [] at the end of each synonym:') # FIXME?!?
xrefs = [Xref.parse(xref, tvpair) for xref in xrefs.split(',')]
scope_typedef.strip().rstrip()
if scope_typedef:
try:
scope, typedef = scope_typedef.split(' ')
except ValueError: # TODO look in tvpair.parent.header for synonymtypedef
scope = scope_typedef
typedef = None
else:
scope = None
typedef = None
split = (text, scope, typedef, xrefs)
return super().parse(split)
class Synonymtypedef(Value):
tag = 'synonymtypedef'
seps = ' ', '"', ' '
def __init__(self, name, desc, scope=None, **kwargs): #FIXME '' instead of None?
self.name = name
self.desc = desc
self.scope = scope
def value(self):
out = ''
out += self.name
out += self.seps[0]
out += self.seps[1]
out += self.desc
out += self.seps[1]
if self.scope:
out += self.seps[2]
out += self.scope
return out
@classmethod
def parse(cls, value, *args):
name, description_scope = value.split(' "', 1)
description, scope = description_scope.split('"', 1) # FIXME escapes :/
scope = scope.strip()
split = (name, description, scope)
return super().parse(split)
class Xref(Value): # TODO link internal ids, finalize will require cleanup, lots of work required here
tag = 'xref'
seps = ' ', '"'
def __init__(self, name, desc=None, **kwargs):
self.name = name
self.desc = desc
def value(self):
out = ''
out += self.name
if self.desc:
out += self.seps[0]
out += self.seps[1]
out += self.desc
out += self.seps[1]
return out
@classmethod
def parse(cls, value, tvpair):
value.strip().rstrip() # in case we get garbage in from a bad split
try:
name, description = value.split(' "', 1)
description = description[:-1]
except ValueError:
name = value # TODO dangling stuff?
description = None
split = (name, description)
return super().parse(split)
special_children = {sc.tag:sc for sc in (Subsetdef, Synonymtypedef, Idspace, Id_mapping, Def_, Synonym, Xref, Relationship, Is_a)}
def deNone(*args):
for arg in args:
if arg == None:
yield ''
else:
yield arg
__all__ = [OboFile.__name__, TVPair.__name__, Header.__name__, Term.__name__, Typedef.__name__, Instance.__name__]
def main():
args = docopt(__doc__, version='obo_io 0')
if args['--ttl']:
filename = args['<obofile>']
if os.path.exists(filename):
of = OboFile(filename)
if args['<ttlfile>']:
ttlfilename = args['<ttlfile>']
else:
fname, ext = filename.rsplit('.',1)
if ext != 'obo': # FIXME pretty sure a successful parse should be the measure here?
# TODO TEST ME!
raise TypeError('%s has wrong extension %s != obo !' % (filename, ext) )
ttlfilename = fname + '.ttl'
of.write(ttlfilename, type_='ttl')
else:
raise FileNotFoundError('No file named %s exists at that path!' % filename)
else:
filename = args['<obofile>']
of = OboFile(filename=filename)
ttl = of.__ttl__()
embed()
if __name__ == '__main__':
main()
|
{
"content_hash": "ed85e585f4abe9ba83d75066a64418e0",
"timestamp": "",
"source": "github",
"line_count": 1077,
"max_line_length": 156,
"avg_line_length": 32.54131847725162,
"alnum_prop": 0.5150226838245784,
"repo_name": "tgbugs/methodsOntology",
"id": "d4c92a3c66af9bbd38a2e8a561e0561f28ec4b36",
"size": "36016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/obo_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36016"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('granted', '0005_resource'),
]
operations = [
migrations.CreateModel(
name='UserResourceAccessRoleMapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('access_role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='granted.AccessRole')),
('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='granted.Resource')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='granted.User')),
],
options={
'db_table': 'user_resource_access_role_mapping',
'verbose_name': 'User Resource Access Role Mapping',
'verbose_name_plural': 'User Resource Access Role Mappings',
},
),
migrations.AlterUniqueTogether(
name='userresourceaccessrolemapping',
unique_together=set([('user', 'resource', 'access_role')]),
),
]
|
{
"content_hash": "46c4001408c2bf2b32c734bf786c656b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 121,
"avg_line_length": 41.11764705882353,
"alnum_prop": 0.600143061516452,
"repo_name": "mahithmukundan/Granted",
"id": "48700419b7a90b76614ed259b5abea3d2954a581",
"size": "1470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "granted/migrations/0006_auto_20170728_0556.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1578"
},
{
"name": "Python",
"bytes": "24766"
}
],
"symlink_target": ""
}
|
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isna, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex,
Categorical)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.core.series import remove_na
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.errors import PerformanceWarning
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
LooseVersion('0.19.0'))
except ImportError:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData():
def test_remove_na_deprecation(self):
# see gh-16971
with tm.assert_produces_warning(FutureWarning):
remove_na(Series([]))
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp('20130101'), pd.NaT])
result = s.fillna(Timestamp('20130101', tz='US/Eastern'))
expected = Series([Timestamp('20130101'),
Timestamp('2013-01-01', tz='US/Eastern')],
dtype='object')
assert_series_equal(result, expected)
# where (we ignore the errors=)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna('foo')
expected = Series([Timestamp('20130101'),
'foo'])
assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = 'foo'
assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
@pytest.mark.parametrize('fill_value, expected_output', [
('a', ['a', 'a', 'b', 'a', 'a']),
({1: 'a', 3: 'b', 4: 'b'}, ['a', 'a', 'b', 'b', 'b']),
({1: 'a'}, ['a', 'a', 'b', np.nan, np.nan]),
({1: 'a', 3: 'b'}, ['a', 'a', 'b', 'b', np.nan]),
(Series('a'), ['a', np.nan, 'b', np.nan, np.nan]),
(Series('a', index=[1]), ['a', 'a', 'b', np.nan, np.nan]),
(Series({1: 'a', 3: 'b'}), ['a', 'a', 'b', 'b', np.nan]),
(Series(['a', 'b'], index=[3, 4]), ['a', np.nan, 'b', 'a', 'b'])
])
def test_fillna_categorical(self, fill_value, expected_output):
# GH 17033
# Test fillna for a Categorical series
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
exp = Series(Categorical(expected_output, categories=['a', 'b']))
tm.assert_series_equal(s.fillna(fill_value), exp)
def test_fillna_categorical_raise(self):
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna('d')
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna(Series('d'))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna({1: 'd', 3: 'a'})
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "list"'):
s.fillna(['a', 'b'])
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "tuple"'):
s.fillna(('a', 'b'))
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'):
s.fillna(DataFrame({1: ['a'], 3: ['b']}))
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_na', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
@tm.capture_stdout
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self, datetime_series):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, datetime_series.fillna, value=0,
method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
td1[1] = iNaT
assert isna(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
ser = Series(["hi", "", nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
def test_notna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([True, True, True, False, True])
tm.assert_series_equal(ser.notna(), expected)
ser = Series(["hi", "", nan])
expected = Series([True, True, False])
tm.assert_series_equal(ser.notna(), expected)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
# TODO: what is this test doing? why are result an expected
# the same call to fillna?
with tm.assert_produces_warning(PerformanceWarning):
# TODO: release-note fillna performance warning
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
with tm.assert_produces_warning(PerformanceWarning):
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in datetime_series.index],
index=datetime_series.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = string_series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(**kwargs), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([nan, nan, 3, nan, nan, nan, 7, nan, nan])
expected = Series([nan, nan, 3., 4., 5., 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., 4., nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit=1)
expected = Series([nan, nan, 3., 4., nan, 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., 7.])
result = s.interpolate(method='linear', limit_area='outside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit=1)
expected = Series([nan, 3., 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([3., 3., 3., nan, nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='outside',
direction='backward')
# raises an error even if limit type is wrong.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_area='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_all_good(self):
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
@pytest.mark.parametrize("check_scipy", [
False,
pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
if check_scipy:
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
@td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
@td.skip_if_no_scipy
def test_interp_datetime64(self):
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ['polynomial', 'spline'])
def test_no_order(self, method):
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method=method)
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
@td.skip_if_no('scipy', min_version='0.15')
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_smooth(self):
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
@td.skip_if_no_scipy
def test_spline_interpolation(self):
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_error(self):
# see gh-10633
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
|
{
"content_hash": "da08f04c8e86fa7c94b8d016428fec79",
"timestamp": "",
"source": "github",
"line_count": 1324,
"max_line_length": 79,
"avg_line_length": 39.222809667673715,
"alnum_prop": 0.5369817642641197,
"repo_name": "cython-testbed/pandas",
"id": "dcc4845f274ba8465a5ecc52a04b947016518d76",
"size": "51981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/series/test_missing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14136208"
},
{
"name": "Shell",
"bytes": "27731"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
"""Quotas for DB instances and resources."""
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
from oslo.config import cfg
from trove.common import exception
from trove.openstack.common import importutils
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.quota.models import Resource
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class DbQuotaDriver(object):
"""
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the local
database.
"""
def __init__(self, resources):
self.resources = resources
def get_quota_by_tenant(self, tenant_id, resource):
"""Get a specific quota by tenant."""
quotas = Quota.find_all(tenant_id=tenant_id, resource=resource).all()
if len(quotas) == 0:
return Quota(tenant_id, resource, self.resources[resource].default)
return quotas[0]
def get_all_quotas_by_tenant(self, tenant_id, resources):
"""
Retrieve the quotas for the given tenant.
:param resources: A list of the registered resource to get.
:param tenant_id: The ID of the tenant to return quotas for.
"""
all_quotas = Quota.find_all(tenant_id=tenant_id).all()
result_quotas = dict((quota.resource, quota)
for quota in all_quotas
if quota.resource in resources)
if len(result_quotas) != len(resources):
for resource in resources:
# Not in the DB, return default value
if resource not in result_quotas:
quota = Quota(tenant_id,
resource,
self.resources[resource].default)
result_quotas[resource] = quota
return result_quotas
def get_quota_usage_by_tenant(self, tenant_id, resource):
"""Get a specific quota usage by tenant."""
quotas = QuotaUsage.find_all(tenant_id=tenant_id,
resource=resource).all()
if len(quotas) == 0:
return QuotaUsage.create(tenant_id=tenant_id,
in_use=0,
reserved=0,
resource=resource)
return quotas[0]
def get_all_quota_usages_by_tenant(self, tenant_id, resources):
"""
Retrieve the quota usagess for the given tenant.
:param tenant_id: The ID of the tenant to return quotas for.
:param resources: A list of the registered resources to get.
"""
all_usages = QuotaUsage.find_all(tenant_id=tenant_id).all()
result_usages = dict((usage.resource, usage)
for usage in all_usages
if usage.resource in resources)
if len(result_usages) != len(resources):
for resource in resources:
# Not in the DB, return default value
if resource not in result_usages:
usage = QuotaUsage.create(tenant_id=tenant_id,
in_use=0,
reserved=0,
resource=resource)
result_usages[resource] = usage
return result_usages
def get_defaults(self, resources):
"""Given a list of resources, retrieve the default quotas.
:param resources: A list of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = resource.default
return quotas
def reserve(self, tenant_id, resources, deltas):
"""Check quotas and reserve resources for a tenant.
This method checks quotas against current usage,
reserved resources and the desired deltas.
If any of the proposed values is over the defined quota, an
QuotaExceeded exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation objects which were created.
:param tenant_id: The ID of the tenant reserving the resources.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
"""
unregistered_resources = [delta for delta in deltas
if delta not in resources]
if unregistered_resources:
raise exception.QuotaResourceUnknown(unknown=
unregistered_resources)
quotas = self.get_all_quotas_by_tenant(tenant_id, deltas.keys())
quota_usages = self.get_all_quota_usages_by_tenant(tenant_id,
deltas.keys())
overs = [resource for resource in deltas
if (int(deltas[resource]) > 0 and
(quota_usages[resource].in_use +
quota_usages[resource].reserved +
int(deltas[resource])) > quotas[resource].hard_limit)]
if overs:
raise exception.QuotaExceeded(overs=sorted(overs))
reservations = []
for resource in deltas:
reserved = deltas[resource]
usage = quota_usages[resource]
usage.reserved = reserved
usage.save()
resv = Reservation.create(usage_id=usage.id,
delta=usage.reserved,
status=Reservation.Statuses.RESERVED)
reservations.append(resv)
return reservations
def commit(self, reservations):
"""Commit reservations.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
for reservation in reservations:
usage = QuotaUsage.find_by(id=reservation.usage_id)
usage.in_use += reservation.delta
usage.reserved -= reservation.delta
reservation.status = Reservation.Statuses.COMMITTED
usage.save()
reservation.save()
def rollback(self, reservations):
"""Roll back reservations.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
for reservation in reservations:
usage = QuotaUsage.find_by(id=reservation.usage_id)
usage.reserved -= reservation.delta
reservation.status = Reservation.Statuses.ROLLEDBACK
usage.save()
reservation.save()
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._resources = {}
if not quota_driver_class:
quota_driver_class = CONF.quota_driver
if isinstance(quota_driver_class, basestring):
quota_driver_class = importutils.import_object(quota_driver_class,
self._resources)
self._driver = quota_driver_class
def __contains__(self, resource):
return resource in self._resources
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a dictionary of resources."""
for resource in resources:
self.register_resource(resource)
def get_quota_by_tenant(self, tenant_id, resource):
"""Get a specific quota by tenant."""
return self._driver.get_quota_by_tenant(tenant_id, resource)
def get_defaults(self):
"""Retrieve the default quotas."""
return self._driver.get_defaults(self._resources)
def get_all_quotas_by_tenant(self, tenant_id):
"""Retrieve the quotas for the given tenant.
:param tenant_id: The ID of the tenant to return quotas for.
"""
return self._driver.get_all_quotas_by_tenant(tenant_id,
self._resources)
def reserve(self, tenant_id, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
QuotaExceeded exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param tenant_id: The ID of the tenant to reserve quotas for.
"""
reservations = self._driver.reserve(tenant_id, self._resources, deltas)
LOG.debug(_("Created reservations %(reservations)s") %
{'reservations': reservations})
return reservations
def commit(self, reservations):
"""Commit reservations.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
try:
self._driver.commit(reservations)
except Exception:
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % {'reservations': reservations})
def rollback(self, reservations):
"""Roll back reservations.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
try:
self._driver.rollback(reservations)
except Exception:
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % {'reservations': reservations})
@property
def resources(self):
return sorted(self._resources.keys())
QUOTAS = QuotaEngine()
''' Define all kind of resources here '''
resources = [Resource(Resource.INSTANCES, 'max_instances_per_user'),
Resource(Resource.BACKUPS, 'max_backups_per_user')]
if CONF.trove_volume_support:
resources.append(Resource(Resource.VOLUMES, 'max_volumes_per_user'))
QUOTAS.register_resources(resources)
def run_with_quotas(tenant_id, deltas, f):
""" Quota wrapper """
reservations = QUOTAS.reserve(tenant_id, **deltas)
result = None
try:
result = f()
except Exception:
QUOTAS.rollback(reservations)
raise
else:
QUOTAS.commit(reservations)
return result
|
{
"content_hash": "293a6e322bb5c4a373307c9365cf6e9d",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 35.53312302839117,
"alnum_prop": 0.5886008522727273,
"repo_name": "citrix-openstack-build/trove",
"id": "27c995aebf443236a1997d9c8c73de284a0de5a6",
"size": "11945",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/quota/quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1725275"
},
{
"name": "Shell",
"bytes": "5512"
}
],
"symlink_target": ""
}
|
from untwisted.server import create_server
from untwisted.event import ACCEPT, LOAD
from untwisted import core
class EchoServer:
def __init__(self, server):
server.add_map(ACCEPT, lambda server, con:
con.add_map(LOAD, lambda con, data: con.dump(data)))
if __name__ == '__main__':
EchoServer(create_server('0.0.0.0', 1234, 5))
core.gear.mainloop()
|
{
"content_hash": "5d46b16700e85a2b2f22512bde2b8536",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 24.6875,
"alnum_prop": 0.640506329113924,
"repo_name": "iogf/untwisted",
"id": "cd5f398d1a338ade63ef002dd3706525f745dd48",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/echo_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53752"
},
{
"name": "Shell",
"bytes": "3745"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, division
import screed
import docopt
CLI = """
USAGE:
fasplit <fasta> <prefix>
"""
opts = docopt.docopt(CLI)
prefix = opts['<prefix>']
with screed.open(opts['<fasta>']) as fh:
for record in fh:
fname = "{}{}.fasta".format(prefix, record.name)
with open(fname, 'w') as ofh:
print(">", record.name, sep='', file=ofh)
print(record.sequence, file=ofh)
|
{
"content_hash": "7f5fb89018ba89f0199d39b782b468ed",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 24,
"alnum_prop": 0.6096491228070176,
"repo_name": "kdmurray91/kwip-experiments",
"id": "19c7f91e6b67eef97aa131619fe3df8c5bce5937",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abcd-tree-sim/splitfa.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "329242"
},
{
"name": "Python",
"bytes": "56929"
},
{
"name": "R",
"bytes": "17363"
},
{
"name": "Shell",
"bytes": "7080"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.sensors.emr`."""
import warnings
from airflow.providers.amazon.aws.sensors.emr import EmrJobFlowSensor, EmrStepSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.emr`.",
DeprecationWarning,
stacklevel=2,
)
|
{
"content_hash": "0e8f5591d2389b87b016e6e85b66d33f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 92,
"avg_line_length": 32.09090909090909,
"alnum_prop": 0.7592067988668555,
"repo_name": "bolkedebruin/airflow",
"id": "aca71619089e7436487a0897489bb81d2d82495b",
"size": "1142",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/sensors/emr_step.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
"""
Cross Site Flashing semi passive plugin: Tries to retrieve the crossdomain.xml
file and display it for review
"""
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Normal requests for XSF analysis"
def run(PluginInfo):
url_list = []
for File in ["crossdomain.xml", "clientaccesspolicy.xml"]:
for url in ServiceLocator.get_component("target").GetAsList(['target_url', 'top_url']):
url_list.append(url + "/" + File) # Compute all URL + File combinations
# The requester framework component will unique the URLs
TransactionList = ServiceLocator.get_component("requester").GetTransactions(True, url_list)
# Even though we have transaction list, those transactions do not have id
# because our proxy stores the transactions and not the requester. So the
# best way is to use the url list to retrieve transactions while making the
# report
return ServiceLocator.get_component("plugin_helper").TransactionTableForURLList(True, url_list, "GET")
|
{
"content_hash": "8846f1bb8cda8f67d4f9c3a5479b8e81",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 106,
"avg_line_length": 45.52173913043478,
"alnum_prop": 0.7373447946513849,
"repo_name": "DarKnight--/owtf",
"id": "6b9c1cfed2db6c8f499577ae800eb54567a60206",
"size": "1047",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "plugins/web/semi_passive/Testing_for_Cross_site_flashing@OWTF-DV-004.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "163045"
},
{
"name": "JavaScript",
"bytes": "16960"
},
{
"name": "Python",
"bytes": "893900"
},
{
"name": "Shell",
"bytes": "62217"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^$', 'meliscore.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'front.views.home', name='home'),
url(r'^score/(?P<itemid>[\S]+)$', 'front.views.score', name='score'),
url(r'^score/$', 'front.views.score', name='score'),
url(r'^sweetspot/(?P<itemid>[\S]+)$', 'front.views.sweetspot', name='sweetspot'),
url(r'^salespeed/(?P<itemid>[\S]+)$', 'front.views.salespeed', name='salespeed'),
url(r'^admin/', include(admin.site.urls)),
]
|
{
"content_hash": "28540ce1e9e104730d0728be79fba7ba",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 85,
"avg_line_length": 38.125,
"alnum_prop": 0.6049180327868853,
"repo_name": "jairot/meliscore",
"id": "f699f78a98b8cbce837082dbb0c381c82d0b6306",
"size": "610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meliscore/meliscore/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "16563"
},
{
"name": "CSS",
"bytes": "902296"
},
{
"name": "HTML",
"bytes": "20242"
},
{
"name": "JavaScript",
"bytes": "5276852"
},
{
"name": "Python",
"bytes": "24479"
}
],
"symlink_target": ""
}
|
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Session100Session(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Session100Session - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'id': 'ResourceId',
'name': 'ResourceName',
'oem': 'ResourceOem'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'id': 'Id',
'name': 'Name',
'oem': 'Oem'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._id = None
self._name = None
self._oem = None
@property
def odata_context(self):
"""
Gets the odata_context of this Session100Session.
:return: The odata_context of this Session100Session.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this Session100Session.
:param odata_context: The odata_context of this Session100Session.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this Session100Session.
:return: The odata_id of this Session100Session.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this Session100Session.
:param odata_id: The odata_id of this Session100Session.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this Session100Session.
:return: The odata_type of this Session100Session.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this Session100Session.
:param odata_type: The odata_type of this Session100Session.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this Session100Session.
:return: The description of this Session100Session.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Session100Session.
:param description: The description of this Session100Session.
:type: ResourceDescription
"""
self._description = description
@property
def id(self):
"""
Gets the id of this Session100Session.
:return: The id of this Session100Session.
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Session100Session.
:param id: The id of this Session100Session.
:type: ResourceId
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Session100Session.
:return: The name of this Session100Session.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Session100Session.
:param name: The name of this Session100Session.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this Session100Session.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Session100Session.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Session100Session.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Session100Session.
:type: ResourceOem
"""
self._oem = oem
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "0c3152750673b97102e3976489cd53c5",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 113,
"avg_line_length": 26.053435114503817,
"alnum_prop": 0.5665104014063873,
"repo_name": "jlongever/redfish-client-python",
"id": "2a062e2283534a7f3a68bef8cb4c11a16b36f628",
"size": "6843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "on_http_redfish_1_0/models/session_1_0_0_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "939832"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import pyeapi
pyeapi.load_config('nodes.conf')
node = pyeapi.connect_to('veos01')
output = node.enable('show version')
print(('My System MAC address is', output[0]['result']['systemMacAddress']))
|
{
"content_hash": "2d8746a95d86a98e912b5820b350175c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7257383966244726,
"repo_name": "mzbenami/pyeapi",
"id": "af1cb3e764348ab0c9267f72258dda105c68db0c",
"size": "259",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "examples/sysmac.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "599128"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
import thrift.annotation.thrift.ttypes
import thrift.annotation.scope.ttypes
import thrift.annotation.cpp.ttypes
import thrift.lib.thrift.standard.ttypes
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'PatchOp', 'GeneratePatch', 'AssignOnlyPatch', 'BoolPatch', 'BytePatch', 'I16Patch', 'I32Patch', 'I64Patch', 'FloatPatch', 'DoublePatch', 'StringPatch', 'BinaryPatch', 'DurationPatch', 'TimePatch']
class PatchOp:
"""
The meaning of the patch op field ids, in all properly formulated patch
definitions.
Patch field ids are interpreted at runtime, as a dynamic patch protocol,
without any additional schema derived from IDL patch definitions.
"""
Assign = 1
Clear = 2
PatchPrior = 3
EnsureUnion = 4
EnsureStruct = 5
PatchAfter = 6
Remove = 7
Add = 8
Put = 9
Unspecified = 0
_VALUES_TO_NAMES = {
1: "Assign",
2: "Clear",
3: "PatchPrior",
4: "EnsureUnion",
5: "EnsureStruct",
6: "PatchAfter",
7: "Remove",
8: "Add",
9: "Put",
0: "Unspecified",
}
_NAMES_TO_VALUES = {
"Assign": 1,
"Clear": 2,
"PatchPrior": 3,
"EnsureUnion": 4,
"EnsureStruct": 5,
"PatchAfter": 6,
"Remove": 7,
"Add": 8,
"Put": 9,
"Unspecified": 0,
}
class GeneratePatch:
"""
An annotation that indicates a patch representation
should be generated for the associated definition.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('GeneratePatch')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.GeneratePatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.GeneratePatch, self)
def _to_py_deprecated(self):
return self
class AssignOnlyPatch:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AssignOnlyPatch')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.AssignOnlyPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.AssignOnlyPatch, self)
def _to_py_deprecated(self):
return self
class BoolPatch:
"""
A patch for a boolean value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- invert: If the bool value should be inverted.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.assign = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.invert = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BoolPatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.BOOL, 1)
oprot.writeBool(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.invert != None:
oprot.writeFieldBegin('invert', TType.BOOL, 9)
oprot.writeBool(self.invert)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'invert' in json_obj and json_obj['invert'] is not None:
self.invert = json_obj['invert']
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.invert is not None:
value = pprint.pformat(self.invert, indent=0)
value = padding.join(value.splitlines(True))
L.append(' invert=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'invert',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.BoolPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.BoolPatch, self)
def _to_py_deprecated(self):
return self
class BytePatch:
"""
A patch for an 8-bit integer value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BYTE:
self.assign = iprot.readByte()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BYTE:
self.add = iprot.readByte()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BytePatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.BYTE, 1)
oprot.writeByte(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.BYTE, 8)
oprot.writeByte(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if self.assign > 0x7f or self.assign < -0x80:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = json_obj['add']
if self.add > 0x7f or self.add < -0x80:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.BytePatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.BytePatch, self)
def _to_py_deprecated(self):
return self
class I16Patch:
"""
A patch for a 16-bit integer value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.assign = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I16:
self.add = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('I16Patch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.I16, 1)
oprot.writeI16(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.I16, 8)
oprot.writeI16(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if self.assign > 0x7fff or self.assign < -0x8000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = json_obj['add']
if self.add > 0x7fff or self.add < -0x8000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.I16Patch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.I16Patch, self)
def _to_py_deprecated(self):
return self
class I32Patch:
"""
A patch for a 32-bit integer value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clears any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.assign = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.add = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('I32Patch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.I32, 1)
oprot.writeI32(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.I32, 8)
oprot.writeI32(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if self.assign > 0x7fffffff or self.assign < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = json_obj['add']
if self.add > 0x7fffffff or self.add < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.I32Patch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.I32Patch, self)
def _to_py_deprecated(self):
return self
class I64Patch:
"""
A patch for a 64-bit integer value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.assign = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.add = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('I64Patch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.I64, 1)
oprot.writeI64(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.I64, 8)
oprot.writeI64(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = long(json_obj['assign'])
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = long(json_obj['add'])
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.I64Patch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.I64Patch, self)
def _to_py_deprecated(self):
return self
class FloatPatch:
"""
A patch for a 32-bit floating point value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.FLOAT:
self.assign = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.FLOAT:
self.add = iprot.readFloat()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FloatPatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.FLOAT, 1)
oprot.writeFloat(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.FLOAT, 8)
oprot.writeFloat(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = float(json_obj['assign'])
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = float(json_obj['add'])
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.FloatPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.FloatPatch, self)
def _to_py_deprecated(self):
return self
class DoublePatch:
"""
A patch for an 64-bit floating point value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.assign = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.DOUBLE:
self.add = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DoublePatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.DOUBLE, 1)
oprot.writeDouble(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.DOUBLE, 8)
oprot.writeDouble(self.add)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = float(json_obj['assign'])
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = float(json_obj['add'])
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.DoublePatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.DoublePatch, self)
def _to_py_deprecated(self):
return self
class StringPatch:
"""
A patch for a string value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear a given string.
- prepend: Prepend to a given value.
- append: Append to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.assign = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.prepend = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.append = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('StringPatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.STRING, 1)
oprot.writeString(self.assign.encode('utf-8')) if UTF8STRINGS and not isinstance(self.assign, bytes) else oprot.writeString(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.prepend != None:
oprot.writeFieldBegin('prepend', TType.STRING, 8)
oprot.writeString(self.prepend.encode('utf-8')) if UTF8STRINGS and not isinstance(self.prepend, bytes) else oprot.writeString(self.prepend)
oprot.writeFieldEnd()
if self.append != None:
oprot.writeFieldBegin('append', TType.STRING, 9)
oprot.writeString(self.append.encode('utf-8')) if UTF8STRINGS and not isinstance(self.append, bytes) else oprot.writeString(self.append)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'prepend' in json_obj and json_obj['prepend'] is not None:
self.prepend = json_obj['prepend']
if 'append' in json_obj and json_obj['append'] is not None:
self.append = json_obj['append']
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.prepend is not None:
value = pprint.pformat(self.prepend, indent=0)
value = padding.join(value.splitlines(True))
L.append(' prepend=%s' % (value))
if self.append is not None:
value = pprint.pformat(self.append, indent=0)
value = padding.join(value.splitlines(True))
L.append(' append=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'prepend',
'append',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.StringPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.StringPatch, self)
def _to_py_deprecated(self):
return self
class BinaryPatch:
"""
A patch for a binary value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear a given binary.
- prepend: Prepend to a given value.
- append: Append to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.assign = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.prepend = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.append = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('BinaryPatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.STRING, 1)
oprot.writeString(self.assign)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.prepend != None:
oprot.writeFieldBegin('prepend', TType.STRING, 8)
oprot.writeString(self.prepend)
oprot.writeFieldEnd()
if self.append != None:
oprot.writeFieldBegin('append', TType.STRING, 9)
oprot.writeString(self.append)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = json_obj['assign']
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'prepend' in json_obj and json_obj['prepend'] is not None:
self.prepend = json_obj['prepend']
if 'append' in json_obj and json_obj['append'] is not None:
self.append = json_obj['append']
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.prepend is not None:
value = pprint.pformat(self.prepend, indent=0)
value = padding.join(value.splitlines(True))
L.append(' prepend=%s' % (value))
if self.append is not None:
value = pprint.pformat(self.append, indent=0)
value = padding.join(value.splitlines(True))
L.append(' append=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'prepend',
'append',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.BinaryPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.BinaryPatch, self)
def _to_py_deprecated(self):
return self
class DurationPatch:
"""
A patch for a Duration value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.assign = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.assign.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.add = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.add.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DurationPatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.STRUCT, 1)
self.assign.write(oprot)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.STRUCT, 8)
self.add.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.assign.readFromJson(json_obj['assign'], is_text=False, relax_enum_validation=relax_enum_validation, custom_set_cls=set_cls, custom_dict_cls=dict_cls)
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.add.readFromJson(json_obj['add'], is_text=False, relax_enum_validation=relax_enum_validation, custom_set_cls=set_cls, custom_dict_cls=dict_cls)
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.DurationPatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.DurationPatch, self)
def _to_py_deprecated(self):
return self
class TimePatch:
"""
A patch for a Time value.
Attributes:
- assign: Assigns to a (set) value.
If set, all other patch operations are ignored.
Note: Only modifies set field values.
- clear: Clear any set value.
- add: Add to a given value.
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.assign = thrift.lib.thrift.standard.ttypes.TimeStruct()
self.assign.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.clear = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRUCT:
self.add = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.add.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('TimePatch')
if self.assign != None:
oprot.writeFieldBegin('assign', TType.STRUCT, 1)
self.assign.write(oprot)
oprot.writeFieldEnd()
if self.clear != None:
oprot.writeFieldBegin('clear', TType.BOOL, 2)
oprot.writeBool(self.clear)
oprot.writeFieldEnd()
if self.add != None:
oprot.writeFieldBegin('add', TType.STRUCT, 8)
self.add.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'assign' in json_obj and json_obj['assign'] is not None:
self.assign = thrift.lib.thrift.standard.ttypes.TimeStruct()
self.assign.readFromJson(json_obj['assign'], is_text=False, relax_enum_validation=relax_enum_validation, custom_set_cls=set_cls, custom_dict_cls=dict_cls)
if 'clear' in json_obj and json_obj['clear'] is not None:
self.clear = json_obj['clear']
if 'add' in json_obj and json_obj['add'] is not None:
self.add = thrift.lib.thrift.standard.ttypes.DurationStruct()
self.add.readFromJson(json_obj['add'], is_text=False, relax_enum_validation=relax_enum_validation, custom_set_cls=set_cls, custom_dict_cls=dict_cls)
def __repr__(self):
L = []
padding = ' ' * 4
if self.assign is not None:
value = pprint.pformat(self.assign, indent=0)
value = padding.join(value.splitlines(True))
L.append(' assign=%s' % (value))
if self.clear is not None:
value = pprint.pformat(self.clear, indent=0)
value = padding.join(value.splitlines(True))
L.append(' clear=%s' % (value))
if self.add is not None:
value = pprint.pformat(self.add, indent=0)
value = padding.join(value.splitlines(True))
L.append(' add=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __dir__(self):
return (
'assign',
'clear',
'add',
)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module("apache.thrift.op.patch.thrift_types")
return thrift.python.converter.to_python_struct(python_types.TimePatch, self)
def _to_py3(self):
import importlib
import thrift.py3.converter
py3_types = importlib.import_module("apache.thrift.op.patch.types")
return thrift.py3.converter.to_py3_struct(py3_types.TimePatch, self)
def _to_py_deprecated(self):
return self
all_structs.append(GeneratePatch)
GeneratePatch.thrift_spec = (
)
GeneratePatch.thrift_struct_annotations = {
}
GeneratePatch.thrift_field_annotations = {
}
all_structs.append(AssignOnlyPatch)
AssignOnlyPatch.thrift_spec = (
)
AssignOnlyPatch.thrift_struct_annotations = {
}
AssignOnlyPatch.thrift_field_annotations = {
}
all_structs.append(BoolPatch)
BoolPatch.thrift_spec = (
None, # 0
(1, TType.BOOL, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
(9, TType.BOOL, 'invert', None, None, 2, ), # 9
)
BoolPatch.thrift_struct_annotations = {
}
BoolPatch.thrift_field_annotations = {
}
def BoolPatch__init__(self, assign=None, clear=None, invert=None,):
self.assign = assign
self.clear = clear
self.invert = invert
BoolPatch.__init__ = BoolPatch__init__
def BoolPatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('invert', None)
self.__dict__ = state
BoolPatch.__getstate__ = lambda self: self.__dict__.copy()
BoolPatch.__setstate__ = BoolPatch__setstate__
all_structs.append(BytePatch)
BytePatch.thrift_spec = (
None, # 0
(1, TType.BYTE, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.BYTE, 'add', None, None, 2, ), # 8
)
BytePatch.thrift_struct_annotations = {
}
BytePatch.thrift_field_annotations = {
}
def BytePatch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
BytePatch.__init__ = BytePatch__init__
def BytePatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
BytePatch.__getstate__ = lambda self: self.__dict__.copy()
BytePatch.__setstate__ = BytePatch__setstate__
all_structs.append(I16Patch)
I16Patch.thrift_spec = (
None, # 0
(1, TType.I16, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.I16, 'add', None, None, 2, ), # 8
)
I16Patch.thrift_struct_annotations = {
}
I16Patch.thrift_field_annotations = {
}
def I16Patch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
I16Patch.__init__ = I16Patch__init__
def I16Patch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
I16Patch.__getstate__ = lambda self: self.__dict__.copy()
I16Patch.__setstate__ = I16Patch__setstate__
all_structs.append(I32Patch)
I32Patch.thrift_spec = (
None, # 0
(1, TType.I32, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.I32, 'add', None, None, 2, ), # 8
)
I32Patch.thrift_struct_annotations = {
}
I32Patch.thrift_field_annotations = {
}
def I32Patch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
I32Patch.__init__ = I32Patch__init__
def I32Patch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
I32Patch.__getstate__ = lambda self: self.__dict__.copy()
I32Patch.__setstate__ = I32Patch__setstate__
all_structs.append(I64Patch)
I64Patch.thrift_spec = (
None, # 0
(1, TType.I64, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.I64, 'add', None, None, 2, ), # 8
)
I64Patch.thrift_struct_annotations = {
}
I64Patch.thrift_field_annotations = {
}
def I64Patch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
I64Patch.__init__ = I64Patch__init__
def I64Patch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
I64Patch.__getstate__ = lambda self: self.__dict__.copy()
I64Patch.__setstate__ = I64Patch__setstate__
all_structs.append(FloatPatch)
FloatPatch.thrift_spec = (
None, # 0
(1, TType.FLOAT, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.FLOAT, 'add', None, None, 2, ), # 8
)
FloatPatch.thrift_struct_annotations = {
}
FloatPatch.thrift_field_annotations = {
}
def FloatPatch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
FloatPatch.__init__ = FloatPatch__init__
def FloatPatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
FloatPatch.__getstate__ = lambda self: self.__dict__.copy()
FloatPatch.__setstate__ = FloatPatch__setstate__
all_structs.append(DoublePatch)
DoublePatch.thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'assign', None, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.DOUBLE, 'add', None, None, 2, ), # 8
)
DoublePatch.thrift_struct_annotations = {
}
DoublePatch.thrift_field_annotations = {
}
def DoublePatch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
DoublePatch.__init__ = DoublePatch__init__
def DoublePatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
DoublePatch.__getstate__ = lambda self: self.__dict__.copy()
DoublePatch.__setstate__ = DoublePatch__setstate__
all_structs.append(StringPatch)
StringPatch.thrift_spec = (
None, # 0
(1, TType.STRING, 'assign', True, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.STRING, 'prepend', True, None, 2, ), # 8
(9, TType.STRING, 'append', True, None, 2, ), # 9
)
StringPatch.thrift_struct_annotations = {
}
StringPatch.thrift_field_annotations = {
}
def StringPatch__init__(self, assign=None, clear=None, prepend=None, append=None,):
self.assign = assign
self.clear = clear
self.prepend = prepend
self.append = append
StringPatch.__init__ = StringPatch__init__
def StringPatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('prepend', None)
state.setdefault('append', None)
self.__dict__ = state
StringPatch.__getstate__ = lambda self: self.__dict__.copy()
StringPatch.__setstate__ = StringPatch__setstate__
all_structs.append(BinaryPatch)
BinaryPatch.thrift_spec = (
None, # 0
(1, TType.STRING, 'assign', False, None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.STRING, 'prepend', False, None, 2, ), # 8
(9, TType.STRING, 'append', False, None, 2, ), # 9
)
BinaryPatch.thrift_struct_annotations = {
}
BinaryPatch.thrift_field_annotations = {
}
def BinaryPatch__init__(self, assign=None, clear=None, prepend=None, append=None,):
self.assign = assign
self.clear = clear
self.prepend = prepend
self.append = append
BinaryPatch.__init__ = BinaryPatch__init__
def BinaryPatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('prepend', None)
state.setdefault('append', None)
self.__dict__ = state
BinaryPatch.__getstate__ = lambda self: self.__dict__.copy()
BinaryPatch.__setstate__ = BinaryPatch__setstate__
all_structs.append(DurationPatch)
DurationPatch.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'assign', [thrift.lib.thrift.standard.ttypes.DurationStruct, thrift.lib.thrift.standard.ttypes.DurationStruct.thrift_spec, False], None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.STRUCT, 'add', [thrift.lib.thrift.standard.ttypes.DurationStruct, thrift.lib.thrift.standard.ttypes.DurationStruct.thrift_spec, False], None, 2, ), # 8
)
DurationPatch.thrift_struct_annotations = {
"thrift.uri": "facebook.com/thrift/type/DurationPatch",
}
DurationPatch.thrift_field_annotations = {
}
def DurationPatch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
DurationPatch.__init__ = DurationPatch__init__
def DurationPatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
DurationPatch.__getstate__ = lambda self: self.__dict__.copy()
DurationPatch.__setstate__ = DurationPatch__setstate__
all_structs.append(TimePatch)
TimePatch.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'assign', [thrift.lib.thrift.standard.ttypes.TimeStruct, thrift.lib.thrift.standard.ttypes.TimeStruct.thrift_spec, False], None, 1, ), # 1
(2, TType.BOOL, 'clear', None, None, 2, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
(8, TType.STRUCT, 'add', [thrift.lib.thrift.standard.ttypes.DurationStruct, thrift.lib.thrift.standard.ttypes.DurationStruct.thrift_spec, False], None, 2, ), # 8
)
TimePatch.thrift_struct_annotations = {
"thrift.uri": "facebook.com/thrift/type/TimePatch",
}
TimePatch.thrift_field_annotations = {
}
def TimePatch__init__(self, assign=None, clear=None, add=None,):
self.assign = assign
self.clear = clear
self.add = add
TimePatch.__init__ = TimePatch__init__
def TimePatch__setstate__(self, state):
state.setdefault('assign', None)
state.setdefault('clear', None)
state.setdefault('add', None)
self.__dict__ = state
TimePatch.__getstate__ = lambda self: self.__dict__.copy()
TimePatch.__setstate__ = TimePatch__setstate__
fix_spec(all_structs)
del all_structs
|
{
"content_hash": "5cb26e527060379ef166235f6e0f5aad",
"timestamp": "",
"source": "github",
"line_count": 2329,
"max_line_length": 339,
"avg_line_length": 37.21983683984543,
"alnum_prop": 0.6691123031666378,
"repo_name": "facebook/fbthrift",
"id": "dd13c85d8a194eeb3aa5ff98514d4ca8b2e4022c",
"size": "86799",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/compiler/test/fixtures/patch/gen-py/thrift/lib/thrift/patch/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateApplication(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateApplication Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateApplication, self).__init__(temboo_session, '/Library/Twilio/Applications/UpdateApplication')
def new_input_set(self):
return UpdateApplicationInputSet()
def _make_result_set(self, result, path):
return UpdateApplicationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateApplicationChoreographyExecution(session, exec_id, path)
class UpdateApplicationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateApplication
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIVersion(self, value):
"""
Set the value of the APIVersion input for this Choreo. ((optional, string) Requests to this application's URLs will start a new TwiML session with this API version. Either 2010-04-01 or 2008-08-01. Defaults to your account's default API version.)
"""
super(UpdateApplicationInputSet, self)._set_input('APIVersion', value)
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(UpdateApplicationInputSet, self)._set_input('AccountSID', value)
def set_ApplicationSID(self, value):
"""
Set the value of the ApplicationSID input for this Choreo. ((required, string) The id of the application to update.)
"""
super(UpdateApplicationInputSet, self)._set_input('ApplicationSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(UpdateApplicationInputSet, self)._set_input('AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) A human readable description of the new application. Maximum 64 characters.)
"""
super(UpdateApplicationInputSet, self)._set_input('FriendlyName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(UpdateApplicationInputSet, self)._set_input('ResponseFormat', value)
def set_SmsFallbackMethod(self, value):
"""
Set the value of the SmsFallbackMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the SmsFallbackUrl. Must be either GET or POST. Defaults to POST.)
"""
super(UpdateApplicationInputSet, self)._set_input('SmsFallbackMethod', value)
def set_SmsFallbackURL(self, value):
"""
Set the value of the SmsFallbackURL input for this Choreo. ((optional, string) A URL that Twilio will request if an error occurs requesting or executing the TwiML defined by SmsUrl.)
"""
super(UpdateApplicationInputSet, self)._set_input('SmsFallbackURL', value)
def set_SmsMethod(self, value):
"""
Set the value of the SmsMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the SmsUrl. Must be either GET or POST. Defaults to POST.)
"""
super(UpdateApplicationInputSet, self)._set_input('SmsMethod', value)
def set_SmsStatusCallback(self, value):
"""
Set the value of the SmsStatusCallback input for this Choreo. ((optional, string) Twilio will make a POST request to this URL to pass status parameters (such as sent or failed) to your application.)
"""
super(UpdateApplicationInputSet, self)._set_input('SmsStatusCallback', value)
def set_SmsURL(self, value):
"""
Set the value of the SmsURL input for this Choreo. ((optional, string) The URL that Twilio should request when somebody sends an SMS to a phone number assigned to this application.)
"""
super(UpdateApplicationInputSet, self)._set_input('SmsURL', value)
def set_StatusCallbackMethod(self, value):
"""
Set the value of the StatusCallbackMethod input for this Choreo. ((optional, string) The HTTP method Twilio will use to make requests to the StatusCallback URL. Either GET or POST. Defaults to POST.)
"""
super(UpdateApplicationInputSet, self)._set_input('StatusCallbackMethod', value)
def set_StatusCallback(self, value):
"""
Set the value of the StatusCallback input for this Choreo. ((optional, string) The URL that Twilio will request to pass status parameters (such as call ended) to your application.)
"""
super(UpdateApplicationInputSet, self)._set_input('StatusCallback', value)
def set_VoiceApplicationSID(self, value):
"""
Set the value of the VoiceApplicationSID input for this Choreo. ((optional, string) The 34 character sid of the application Twilio should use to handle phone calls to this number.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceApplicationSID', value)
def set_VoiceCallerIDLookup(self, value):
"""
Set the value of the VoiceCallerIDLookup input for this Choreo. ((optional, string) Do a lookup of a caller's name from the CNAM database and post it to your app. Either true or false. Defaults to false.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceCallerIDLookup', value)
def set_VoiceFallbackMethod(self, value):
"""
Set the value of the VoiceFallbackMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the VoiceFallbackUrl. Either GET or POST. Defaults to POST.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceFallbackMethod', value)
def set_VoiceFallbackURL(self, value):
"""
Set the value of the VoiceFallbackURL input for this Choreo. ((optional, string) A URL that Twilio will request if an error occurs requesting or executing the TwiML at Url.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceFallbackURL', value)
def set_VoiceMethod(self, value):
"""
Set the value of the VoiceMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the VoiceUrl. Must be either GET or POST. Defaults to POST.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceMethod', value)
def set_VoiceURL(self, value):
"""
Set the value of the VoiceURL input for this Choreo. ((optional, string) The URL that Twilio should request when somebody dials a phone number assigned to this application.)
"""
super(UpdateApplicationInputSet, self)._set_input('VoiceURL', value)
class UpdateApplicationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateApplication Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class UpdateApplicationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateApplicationResultSet(response, path)
|
{
"content_hash": "d84a1bbfd8f6aba8eaf82ca0ac3a3a70",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 254,
"avg_line_length": 56.18493150684932,
"alnum_prop": 0.6986468365232232,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "b821f66a7e6cf962afae1f90e2306b63b9e1feeb",
"size": "9078",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Twilio/Applications/UpdateApplication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.