text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
import pytest
def assert_true(condition):
__tracebackhide__ = True
assert condition
def assert_false(condition):
__tracebackhide__ = True
assert not condition
def assert_raises(expected_exception, *args, **kwargs):
__tracebackhide__ = True
return pytest.raises(expected_exception, *args, **kwargs)
def assert_equal(first, second):
__tracebackhide__ = True
assert first == second
def assert_not_equal(first, second):
__tracebackhide__ = True
assert first != second
@pytest.fixture
def in_tmp_path(tmp_path):
cwd = os.getcwd()
os.chdir(tmp_path)
yield tmp_path
os.chdir(cwd)
|
{
"content_hash": "661c78e243830543fce1696ab487323d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 61,
"avg_line_length": 18.11111111111111,
"alnum_prop": 0.6687116564417178,
"repo_name": "matthew-brett/delocate",
"id": "ec2d3175faa25037a1375c035ff9f841a228b703",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "delocate/tests/pytest_tools.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2768"
},
{
"name": "Makefile",
"bytes": "227"
},
{
"name": "Python",
"bytes": "369715"
},
{
"name": "Shell",
"bytes": "8987"
}
],
"symlink_target": ""
}
|
class FormData(dict):
''' A dict that can be built from a form or formset instance, and will fill
itself with request.POST-like data, allowing easier testing of form submissions.
See forms_inline.TestTranslationsInline for example uses.
'''
def __init__(self, form_or_set):
if hasattr(form_or_set, 'forms'):
# It is a formset
self.update(FormData(form_or_set.management_form))
for form in form_or_set:
self.update(FormData(form))
else:
# It is a form
for field in form_or_set:
value = field.value()
initial = form_or_set.initial.get(field.name, field.field.initial)
if value is not None:
self[field.html_name] = value
if initial is not None:
self[field.html_initial_name] = initial
def set_form_field(self, form, name, value):
key = form[name].html_name
if value is None:
self.pop(key, None)
else:
self[key] = value
def set_formset_field(self, formset, index, name, value):
key = formset[index][name].html_name
if value is None:
self.pop(key, None)
else:
self[key] = value
|
{
"content_hash": "ed969fdffdf115c9f0cd2d6b3b494d72",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 84,
"avg_line_length": 37.02857142857143,
"alnum_prop": 0.5547839506172839,
"repo_name": "philippeowagner/django-hvad",
"id": "de822b6d7d12410fc0f1b7f312df7960476ef69b",
"size": "1297",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "hvad/test_utils/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13766"
},
{
"name": "Python",
"bytes": "445037"
}
],
"symlink_target": ""
}
|
import random
import math
import numpy
def gen_toy_data(data_dim=500, set_size=100, freq_range=[20,40], phase_range=[20,40], amplitude_range=[10,50], delay=5, input_noise=0.0, target_noise=0.0):
# generates toy wavy data
# data_dim is the number of points per wave
# set_size is the number of waves
# the target is delayed by delay
# sets starts empty
input_set = []
target_set = []
# set ranges extrema
[min_freq, max_freq] = freq_range
[min_phase, max_phase] = phase_range
[min_amplitude,max_amplitude] = amplitude_range
# generate set_size signals
for nb in range(set_size):
input_wave = []
target_wave = []
# pick random freq, phase and amplitude
freq1 = random.randint(min_freq, max_freq)
phase1 = random.randint(min_phase, max_phase)
amplitude1 = random.randint(min_amplitude,max_amplitude)
# amplitude1 = 1.0
freq2 = random.randint(min_freq, max_freq)
phase2 = random.randint(0, freq2)
amplitude2 = random.random()
# test: remove randomness
# freq1 = 25
# phase1 = 0
# amplitude1 = 10
# generate a signal of data_dim points
for i in range(data_dim):
# generate data point
# generate input noise
noise = input_noise * (2.0*random.random()-1.0)
point1 = noise + amplitude1 * math.sin(2.0*math.pi*(i+phase1)/freq1)
point2 = noise + amplitude2 * math.sin(2.0*math.pi*(i+phase2)/freq2)
# add to input_wave
input_wave.append(numpy.array([point1]))
# input_wave.append([point1, point2])
# test: make inputs different sizes
# if random.random()<0.5:
# input_wave.append(numpy.array([point1]))
# else:
# input_wave.append(numpy.array([point1, point1]))
# generate target point delayed
if i<delay:
target1 = 0
target2 = 0
else:
# generate target noise
noise = target_noise * (2.0*random.random()-1.0)
target1 = noise + amplitude1 * math.sin(2.0*math.pi*(i+phase1-delay)/freq1)
target2 = noise + amplitude2 * math.sin(2.0*math.pi*(i+phase2-delay)/freq2)
# add to target_wave
# target_wave.append([target1, target2])
target_wave.append(numpy.array([target1]))
# add signals to data sets
input_set.append(numpy.array(input_wave))
target_set.append(numpy.array(target_wave))
return numpy.array([input_set, target_set])
|
{
"content_hash": "0316910f2981e558a097a52fed0d7f67",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 155,
"avg_line_length": 33.50617283950617,
"alnum_prop": 0.5644804716285925,
"repo_name": "grezesf/Research",
"id": "1cbf1af4deabbeb7c886c395132643e4f0264a86",
"size": "2732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Reservoirs/Task1_Toy_Examples/lib_task1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1537965"
},
{
"name": "PLSQL",
"bytes": "2457"
},
{
"name": "PostScript",
"bytes": "123584"
},
{
"name": "Python",
"bytes": "62253"
},
{
"name": "Shell",
"bytes": "2500"
},
{
"name": "TeX",
"bytes": "284079"
}
],
"symlink_target": ""
}
|
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec
from .psOperators import (
PSOperators,
ps_StandardEncoding,
ps_array,
ps_boolean,
ps_dict,
ps_integer,
ps_literal,
ps_mark,
ps_name,
ps_operator,
ps_procedure,
ps_procmark,
ps_real,
ps_string,
)
import re
from collections.abc import Callable
from string import whitespace
import logging
log = logging.getLogger(__name__)
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = br"""
\(
(
(
[^()]* \ [()]
)
|
(
[^()]* \( [^()]* \)
)
)*
[^()]*
\)
"""
stringPat = b"".join(stringPat.split())
stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass
class PSTokenizer(object):
def __init__(self, buf=b'', encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
self.encoding = encoding
def read(self, n=-1):
"""Read at most 'n' bytes from the buffer, or less if the read
hits EOF before obtaining 'n' bytes.
If 'n' is negative or omitted, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def close(self):
if not self.closed:
self.closed = True
del self.buf, self.pos
def getnexttoken(self,
# localize some stuff, for performance
len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match):
self.skipwhite()
if self.pos >= self.len:
return None, None
pos = self.pos
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b'{}[]':
tokentype = 'do_special'
token = char
elif char == b'%':
tokentype = 'do_comment'
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b'(':
tokentype = 'do_string'
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b'<':
tokentype = 'do_hexstring'
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError('bad hexstring at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError('bad token at character %d' % pos)
else:
if char == b'/':
tokentype = 'do_literal'
m = endmatch(buf, pos+1)
else:
tokentype = ''
m = endmatch(buf, pos)
if m is None:
raise PSTokenError('bad token at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
def skipwhite(self, whitematch=skipwhiteRE.match):
_, nextpos = whitematch(self.buf, self.pos).span()
self.pos = nextpos
def starteexec(self):
self.pos = self.pos + 1
self.dirtybuf = self.buf[self.pos:]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
self.pos = 4
def stopeexec(self):
if not hasattr(self, 'dirtybuf'):
return
self.buf = self.dirtybuf
del self.dirtybuf
class PSInterpreter(PSOperators):
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
self.encoding = encoding
self.dictstack = [systemdict, userdict]
self.stack = []
self.proclevel = 0
self.procmark = ps_procmark()
self.fillsystemdict()
def fillsystemdict(self):
systemdict = self.dictstack[0]
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
systemdict[']'] = ps_operator(']', self.do_makearray)
systemdict['true'] = ps_boolean(1)
systemdict['false'] = ps_boolean(0)
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
systemdict['FontDirectory'] = ps_dict({})
self.suckoperators(systemdict, self.__class__)
def suckoperators(self, systemdict, klass):
for name in dir(klass):
attr = getattr(self, name)
if isinstance(attr, Callable) and name[:3] == 'ps_':
name = name[3:]
systemdict[name] = ps_operator(name, attr)
for baseclass in klass.__bases__:
self.suckoperators(systemdict, baseclass)
def interpret(self, data, getattr=getattr):
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
getnexttoken = tokenizer.getnexttoken
do_token = self.do_token
handle_object = self.handle_object
try:
while 1:
tokentype, token = getnexttoken()
if not token:
break
if tokentype:
handler = getattr(self, tokentype)
object = handler(token)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
'ps error:\n'
'- - - - - - -\n'
'%s\n'
'>>>\n'
'%s\n'
'- - - - - - -',
self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
raise
def handle_object(self, object):
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
if object.type != 'operatortype':
object = self.resolve_name(object.value)
if object.literal:
self.push(object)
else:
if object.type == 'proceduretype':
self.call_procedure(object)
else:
object.function()
else:
self.push(object)
def call_procedure(self, proc):
handle_object = self.handle_object
for item in proc.value:
handle_object(item)
def resolve_name(self, name):
dictstack = self.dictstack
for i in range(len(dictstack)-1, -1, -1):
if name in dictstack[i]:
return dictstack[i][name]
raise PSError('name error: ' + str(name))
def do_token(self, token,
int=int,
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if '#' in token:
hashpos = token.find('#')
try:
base = int(token[:hashpos])
num = int(token[hashpos+1:], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_comment(self, token):
pass
def do_literal(self, token):
return ps_literal(token[1:])
def do_string(self, token):
return ps_string(token[1:-1])
def do_hexstring(self, token):
hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + '0'
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i:i+2], 16)))
cleanstr = "".join(cleanstr)
return ps_string(cleanstr)
def do_special(self, token):
if token == '{':
self.proclevel = self.proclevel + 1
return self.procmark
elif token == '}':
proc = []
while 1:
topobject = self.pop()
if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == '[':
return self.mark
elif token == ']':
return ps_name(']')
else:
raise PSTokenError('huh?')
def push(self, object):
self.stack.append(object)
def pop(self, *types):
stack = self.stack
if not stack:
raise PSError('stack underflow')
object = stack[-1]
if types:
if object.type not in types:
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
del stack[-1]
return object
def do_makearray(self):
array = []
while 1:
topobject = self.pop()
if topobject == self.mark:
break
array.append(topobject)
array.reverse()
self.push(ps_array(array))
def close(self):
"""Remove circular references."""
del self.stack
del self.dictstack
def unpack_item(item):
tp = type(item.value)
if tp == dict:
newitem = {}
for key, value in item.value.items():
newitem[key] = unpack_item(value)
elif tp == list:
newitem = [None] * len(item.value)
for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i])
if item.type == 'proceduretype':
newitem = tuple(newitem)
else:
newitem = item.value
return newitem
def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
fontName = fontName.decode()
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
interpreter.interpret(data)
fontdir = interpreter.dictstack[0]['FontDirectory'].value
if fontName in fontdir:
rawfont = fontdir[fontName]
else:
# fall back, in case fontName wasn't found
fontNames = list(fontdir.keys())
if len(fontNames) > 1:
fontNames.remove("Helvetica")
fontNames.sort()
rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)
|
{
"content_hash": "fee8b1fd7ebf0b80e6e61bb29076f522",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 94,
"avg_line_length": 24.945454545454545,
"alnum_prop": 0.6487921699291962,
"repo_name": "fonttools/fonttools",
"id": "a6c8b8b5ac062c0754f75b9c32e8a661bbfffea0",
"size": "9604",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "Lib/fontTools/misc/psLib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3522"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "5442538"
}
],
"symlink_target": ""
}
|
import jsonschema
import mock
from rally.plugins.common.runners import rps
from rally.task import runner
from tests.unit import fakes
from tests.unit import test
RUNNERS_BASE = "rally.task.runner."
RUNNERS = "rally.plugins.common.runners."
class RPSScenarioRunnerTestCase(test.TestCase):
def setUp(self):
super(RPSScenarioRunnerTestCase, self).setUp()
self.task = mock.MagicMock()
def test_validate(self):
config = {
"type": "rps",
"times": 1,
"rps": 100,
"max_concurrency": 50,
"max_cpu_count": 8,
"timeout": 1
}
rps.RPSScenarioRunner.validate(config)
def test_rps_parameter_validate(self):
config = {
"type": "rps",
"rps": 0.0000001
}
rps.RPSScenarioRunner.validate(config)
def test_rps_parameter_validate_failed(self):
config = {
"type": "rps",
"rps": 0
}
self.assertRaises(jsonschema.ValidationError,
rps.RPSScenarioRunner.validate, config)
def test_validate_failed(self):
config = {"type": "rps", "a": 10}
self.assertRaises(jsonschema.ValidationError,
rps.RPSScenarioRunner.validate, config)
@mock.patch(RUNNERS + "rps.LOG")
@mock.patch(RUNNERS + "rps.time")
@mock.patch(RUNNERS + "rps.threading.Thread")
@mock.patch(RUNNERS + "rps.multiprocessing.Queue")
@mock.patch(RUNNERS + "rps.runner")
def test__worker_process(self, mock_runner, mock_queue, mock_thread,
mock_time, mock_log):
def time_side():
time_side.last += 0.03
time_side.count += 1
return time_side.last
time_side.last = 0
time_side.count = 0
mock_time.time = time_side
mock_thread_instance = mock.MagicMock(
isAlive=mock.MagicMock(return_value=False))
mock_thread.return_value = mock_thread_instance
mock_event = mock.MagicMock(
is_set=mock.MagicMock(return_value=False))
times = 4
max_concurrent = 3
fake_ram_int = iter(range(10))
context = {"users": [{"tenant_id": "t1", "credential": "c1",
"id": "uuid1"}]}
info = {"processes_to_start": 1, "processes_counter": 1}
rps._worker_process(mock_queue, fake_ram_int, 1, 10, times,
max_concurrent, context, "Dummy", "dummy",
(), mock_event, info)
self.assertEqual(times, mock_log.debug.call_count)
self.assertEqual(times + 1, mock_thread.call_count)
self.assertEqual(times + 1, mock_thread_instance.start.call_count)
self.assertEqual(times + 1, mock_thread_instance.join.call_count)
# NOTE(rvasilets): `times` + 1 here because `times` the number of
# scenario repetition and one more need on "initialization" stage
# of the thread stuff.
self.assertEqual(1, mock_time.sleep.call_count)
self.assertEqual(2, mock_thread_instance.isAlive.call_count)
self.assertEqual(times * 4 - 1, mock_time.time.count)
self.assertEqual(times, mock_runner._get_scenario_context.call_count)
for i in range(times):
scenario_context = mock_runner._get_scenario_context(context)
call = mock.call(args=(mock_queue,
(i, "Dummy", "dummy",
scenario_context, ())),
target=mock_runner._worker_thread)
self.assertIn(call, mock_thread.mock_calls)
@mock.patch(RUNNERS + "rps.runner._run_scenario_once")
def test__worker_thread(self, mock__run_scenario_once):
mock_queue = mock.MagicMock()
args = ("some_args",)
runner._worker_thread(mock_queue, args)
self.assertEqual(1, mock_queue.put.call_count)
expected_calls = [mock.call(("some_args",))]
self.assertEqual(expected_calls, mock__run_scenario_once.mock_calls)
@mock.patch(RUNNERS + "rps.time.sleep")
def test__run_scenario(self, mock_sleep):
config = {"times": 20, "rps": 20, "timeout": 5, "max_concurrency": 15}
runner_obj = rps.RPSScenarioRunner(self.task, config)
runner_obj._run_scenario(fakes.FakeScenario, "do_it",
fakes.FakeContext({}).context, {})
self.assertEqual(len(runner_obj.result_queue), config["times"])
for result_batch in runner_obj.result_queue:
for result in result_batch:
self.assertIsNotNone(runner.ScenarioRunnerResult(result))
@mock.patch(RUNNERS + "rps.time.sleep")
def test__run_scenario_exception(self, mock_sleep):
config = {"times": 4, "rps": 10}
runner_obj = rps.RPSScenarioRunner(self.task, config)
runner_obj._run_scenario(fakes.FakeScenario, "something_went_wrong",
fakes.FakeContext({}).context, {})
self.assertEqual(len(runner_obj.result_queue), config["times"])
for result_batch in runner_obj.result_queue:
for result in result_batch:
self.assertIsNotNone(runner.ScenarioRunnerResult(result))
@mock.patch(RUNNERS + "rps.time.sleep")
def test__run_scenario_aborted(self, mock_sleep):
config = {"times": 20, "rps": 20, "timeout": 5}
runner_obj = rps.RPSScenarioRunner(self.task, config)
runner_obj.abort()
runner_obj._run_scenario(fakes.FakeScenario, "do_it",
fakes.FakeUser().context, {})
self.assertEqual(len(runner_obj.result_queue), 0)
for result in runner_obj.result_queue:
self.assertIsNotNone(runner.ScenarioRunnerResult(result))
@mock.patch(RUNNERS + "constant.multiprocessing.Queue")
@mock.patch(RUNNERS + "rps.multiprocessing.cpu_count")
@mock.patch(RUNNERS + "rps.RPSScenarioRunner._log_debug_info")
@mock.patch(RUNNERS +
"rps.RPSScenarioRunner._create_process_pool")
@mock.patch(RUNNERS + "rps.RPSScenarioRunner._join_processes")
def test_that_cpu_count_is_adjusted_properly(
self, mock__join_processes, mock__create_process_pool,
mock__log_debug_info, mock_cpu_count, mock_queue):
samples = [
{
"input": {"times": 20, "rps": 20, "max_concurrency": 10,
"max_cpu_count": 1},
"real_cpu": 2,
"expected": {
# max_cpu_used equals to min(max_cpu_count, real_cpu)
"max_cpu_used": 1,
# processes_to_start equals to
# min(max_cpu_used, times, max_concurrency))
"processes_to_start": 1,
"rps_per_worker": 20,
"times_per_worker": 20,
"times_overhead": 0,
"concurrency_per_worker": 10,
"concurrency_overhead": 0
}
},
{
"input": {"times": 20, "rps": 9, "max_concurrency": 5,
"max_cpu_count": 3},
"real_cpu": 4,
"expected": {
"max_cpu_used": 3,
"processes_to_start": 3,
"rps_per_worker": 3,
"times_per_worker": 6,
"times_overhead": 2,
"concurrency_per_worker": 1,
"concurrency_overhead": 2
}
},
{
"input": {"times": 10, "rps": 20, "max_concurrency": 12,
"max_cpu_count": 20},
"real_cpu": 20,
"expected": {
"max_cpu_used": 20,
"processes_to_start": 10,
"rps_per_worker": 2,
"times_per_worker": 1,
"times_overhead": 0,
"concurrency_per_worker": 1,
"concurrency_overhead": 2
}
},
{
"input": {"times": 20, "rps": 20, "max_concurrency": 10,
"max_cpu_count": 20},
"real_cpu": 20,
"expected": {
"max_cpu_used": 20,
"processes_to_start": 10,
"rps_per_worker": 2,
"times_per_worker": 2,
"times_overhead": 0,
"concurrency_per_worker": 1,
"concurrency_overhead": 0
}
}
]
for sample in samples:
mock__log_debug_info.reset_mock()
mock_cpu_count.reset_mock()
mock__create_process_pool.reset_mock()
mock__join_processes.reset_mock()
mock_queue.reset_mock()
mock_cpu_count.return_value = sample["real_cpu"]
runner_obj = rps.RPSScenarioRunner(self.task, sample["input"])
runner_obj._run_scenario(fakes.FakeScenario, "do_it",
fakes.FakeUser().context, {})
mock_cpu_count.assert_called_once_with()
mock__log_debug_info.assert_called_once_with(
times=sample["input"]["times"],
timeout=0,
max_cpu_used=sample["expected"]["max_cpu_used"],
processes_to_start=sample["expected"]["processes_to_start"],
rps_per_worker=sample["expected"]["rps_per_worker"],
times_per_worker=sample["expected"]["times_per_worker"],
times_overhead=sample["expected"]["times_overhead"],
concurrency_per_worker=(
sample["expected"]["concurrency_per_worker"]),
concurrency_overhead=(
sample["expected"]["concurrency_overhead"]))
args, kwargs = mock__create_process_pool.call_args
self.assertIn(sample["expected"]["processes_to_start"], args)
self.assertIn(rps._worker_process, args)
mock__join_processes.assert_called_once_with(
mock__create_process_pool.return_value,
mock_queue.return_value)
def test_abort(self):
config = {"times": 4, "rps": 10}
runner_obj = rps.RPSScenarioRunner(self.task, config)
self.assertFalse(runner_obj.aborted.is_set())
runner_obj.abort()
self.assertTrue(runner_obj.aborted.is_set())
|
{
"content_hash": "fabdbfaa1dcb7a63e8c54a99325b57b5",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 78,
"avg_line_length": 38.589090909090906,
"alnum_prop": 0.531379570297776,
"repo_name": "amit0701/rally",
"id": "ffdb9f5a3b165b20df0ffa0736f187bbc107a235",
"size": "11242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/common/runners/test_rps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "41314"
},
{
"name": "Mako",
"bytes": "17949"
},
{
"name": "Python",
"bytes": "3136918"
},
{
"name": "Shell",
"bytes": "39567"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from botplugin import BotPluginInterface
COMMANDS = {'/uptime': 'Time this bot is up and running'}
class BotPlugin(BotPluginInterface):
startTime = 0
def __init__(self, logger):
BotPluginInterface.__init__(self)
self.commands = COMMANDS
self.startTime = datetime.now().replace(microsecond=0)
def getname(self):
return "Uptime tracker"
def getdescription(self, command=""):
if command:
return self.commands[command]
return "Time this bot is up and running"
def getcommands(self):
return self.commands
def reply(self, message):
uptime = datetime.now().replace(microsecond=0) - self.startTime
return str(uptime)
|
{
"content_hash": "1ffb7f4fca3d35ba6be162229c27d946",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.656498673740053,
"repo_name": "artemy/telegrambot",
"id": "7f6cf01949d130f04cabd92e17827ee927f1be9a",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/uptime/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13783"
},
{
"name": "Shell",
"bytes": "1593"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.schema import Column
from sqlalchemy.orm import sessionmaker
from sqlalchemy.types import String, DateTime, Integer, Boolean
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Mail(Base):
__tablename__ = 'mails'
_id = Column(Integer, primary_key = True)
_To = Column(String(50))
_From = Column(String(50))
_Content = Column(String(200))
_Datetime = Column(DateTime)
_Status = Column(Boolean)
def __init__(self, To, From, Content, Datetime, Status):
self._To = To
self._From = From
self._Content = Content
self._Datetime = Datetime
self._Status = Status
def __repr__(self):
return "<Mail(_To = '%s', _From = '%s', _Content = '%s', _Datetime = '%r', _Status = '%r')>"\
% (self._To, self._From, self._Content, self._Datetime, self._Status)
class SQLAlchemyUtils(object):
def __init__(self, dialect = "mysql", driver = "mysqldb", username = "root",\
password = "wjy", host = "localhost", dbName = "mailDB"):
self._dbName = dbName
self._url = '%s+%s://%s:%s@%s/' % (dialect, driver, username, password, host)
def database_exists(self):
engine = create_engine(self._url)
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % self._dbName)
return bool(engine.execute(text).scalar())
def init_database(self):
if self.database_exists() is False:
query = "CREATE DATABASE " + self._dbName
create_engine(self._url, echo = False).execute(query)
url = self._url + self._dbName
engine = create_engine(url, echo = False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind = engine)
self._session = Session()
def insert_database(self, entry):
self._session.add(entry)
self._session.commit()
################
# USAGE EXAMPLE#
################
if __name__ == '__main__':
db = SQLAlchemyUtils(dbName = "Maildb")
db.init_database()
entry = Mail(To = 'jianywan@126.com', From = 'jianywan@gmail.com',
Content = 'http://docs.sqlalchemy.org/en/rel_1_0/_modules/examples/adjacency_list/adjacency_list.html', Datetime = datetime.today(), Status = True)
db.insert_database(entry)
|
{
"content_hash": "4b6f54fb44ff802807b21619669a63ee",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 152,
"avg_line_length": 33.298507462686565,
"alnum_prop": 0.6629314208874943,
"repo_name": "Wangjianyong/mailer-beebee",
"id": "45084c69de49e416bab60e9270092f0365bd91b8",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/mysql.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8677"
},
{
"name": "Python",
"bytes": "48617"
}
],
"symlink_target": ""
}
|
from logging import getLogger
from multiprocessing import Queue
from .log_configures import configure_logger
def log_listener_process(queue: Queue, log_level: int, echo: bool, file_log: bool, file_path: str, prefix: str):
"""
Configure logger at LogListenerProcess. Get log data from Queue, and handle it.
Args:
queue: multiprocessing.Queue object
log_level: Set log level. 1 == DEBUG, 2 == INFO, 3 == WARNING
echo: stdoutへログを出力するフラグ
file_log: ログファイルへの記録を有効化
file_path: ログファイルを格納するディレクトリのパス
prefix: ログファイル名へのプレフィクス
"""
path = file_path if file_path else "log"
configure_logger(log_level, echo, file_log=file_log, file_path=path, prefix=prefix)
while True:
try:
record = queue.get()
if record is None:
break
logger = getLogger(record.name)
logger.handle(record)
except Exception as e:
import sys
import traceback
print(e.args, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print("[End ListenerProcess]")
|
{
"content_hash": "bf9522c20587755b523aca6c0ddd4270",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 112,
"avg_line_length": 34.03030303030303,
"alnum_prop": 0.6268922528940338,
"repo_name": "pddg/qkouserver",
"id": "c48a740cd98babf19044efe1499880cc780af724",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "log_modules/log_process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87540"
},
{
"name": "Shell",
"bytes": "3826"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from trnltk.morphology.model.lexeme import SecondarySyntacticCategory, SyntacticCategory
from trnltk.morphology.contextless.parser.suffixapplier import *
from trnltk.morphology.model.morpheme import SuffixForm
from trnltk.morphology.model.morphemecontainer import MorphemeContainer
class PredefinedPaths(object):
def __init__(self, root_map, suffix_graph):
self._root_map = root_map
self._suffix_graph = suffix_graph
self._morpheme_container_map = {}
def _find_root(self, root_str, syntactic_category, secondary_syntactic_category):
if self._root_map.has_key(root_str):
roots_for_root_str = self._root_map[root_str]
for root in roots_for_root_str:
if root.lexeme.syntactic_category == syntactic_category and root.lexeme.secondary_syntactic_category == secondary_syntactic_category:
return root
raise Exception(u'Unable to find _root {}+{}+{}'.format(root_str, syntactic_category, secondary_syntactic_category))
def _add_transition(self, morpheme_container, suffix_form_application_str, suffix, to_state, whole_word):
suffix_form = SuffixForm(suffix_form_application_str)
suffix_form.suffix = suffix
new_morpheme_container = try_suffix_form(morpheme_container, suffix_form, to_state, whole_word)
if not new_morpheme_container:
raise Exception('Unable to add transition {} {} {} {} {}'.format(morpheme_container, suffix_form_application_str, suffix, to_state, whole_word))
return new_morpheme_container
def _find_to_state(self, state, suffix):
for (out_suffix, out_state) in state.outputs:
if out_suffix == suffix:
return out_state
return None
def _discover_intermediate_state_and_suffix(self, state, suffix):
# go only one level
found_state = None
found_suffix = None
for (out_suffix, out_state) in state.outputs:
for (deep_out_suffix, deep_out_state) in out_state.outputs:
if deep_out_suffix == suffix:
if found_state:
raise Exception(u'Output state not found for {} {}. Tried states that are accessible, but found two states :{}, {}'.format(state, suffix, found_state, deep_out_state))
else:
found_state = out_state
found_suffix = out_suffix
return found_state, found_suffix
def _follow_path(self, root, path_edges):
morpheme_container = MorphemeContainer(root, self._suffix_graph.get_default_root_state(root), u'')
for path_edge in path_edges:
suffix = None
suffix_form_application_str = None
if isinstance(path_edge, tuple):
suffix = path_edge[0]
suffix_form_application_str = path_edge[1] if len(path_edge) > 1 else u''
else:
suffix = path_edge
suffix_form_application_str = u''
surface_so_far = morpheme_container.get_surface_so_far()
path_result = surface_so_far + suffix_form_application_str
to_state = self._find_to_state(morpheme_container.get_last_state(), suffix)
if not to_state:
intermediate_state, intermediate_suffix = self._discover_intermediate_state_and_suffix(morpheme_container.get_last_state(), suffix)
if not intermediate_state:
raise Exception(u'Also tried to discover intermediate states, but unable to find output state for {} {}'.format(to_state, suffix))
morpheme_container = self._add_transition(morpheme_container, u'', intermediate_suffix, intermediate_state, surface_so_far)
to_state = self._find_to_state(morpheme_container.get_last_state(), suffix)
if not to_state:
raise Exception(u'Unable to find output state which has been suggested by intermediate state before, for {} {}'.format(to_state, suffix))
morpheme_container = self._add_transition(morpheme_container, suffix_form_application_str, suffix, to_state, path_result)
else:
morpheme_container = self._add_transition(morpheme_container, suffix_form_application_str, suffix, to_state, path_result)
return morpheme_container
def _add_morpheme_container(self, root, path_tuples):
morpheme_container = self._follow_path(root, path_tuples)
if not self._morpheme_container_map.has_key(root):
self._morpheme_container_map[root] = []
self._morpheme_container_map[root].append(morpheme_container)
def has_paths(self, lexeme):
if not self._morpheme_container_map:
raise Exception(u"Predefined paths are not yet created. Maybe you forgot to run 'create_predefined_paths' ?")
return self._morpheme_container_map.has_key(lexeme)
def get_paths(self, lexeme):
if not self._morpheme_container_map:
raise Exception("Predefined paths are not yet created. Maybe you forgot to run 'create_predefined_paths' ?")
return self._morpheme_container_map[lexeme]
def create_predefined_paths(self):
self._create_predefined_path_of_di()
self._create_predefined_path_of_yi()
self._create_predefined_path_of_ben()
self._create_predefined_path_of_sen()
self._create_predefined_path_of_o_pron_pers()
self._create_predefined_path_of_biz()
self._create_predefined_path_of_siz()
self._create_predefined_path_of_onlar_pron_pers()
self._create_predefined_path_of_bu_pron_demons()
self._create_predefined_path_of_su_pron_demons()
self._create_predefined_path_of_o_pron_demons()
self._create_predefined_path_of_bunlar_pron_demons()
self._create_predefined_path_of_sunlar_pron_demons()
self._create_predefined_path_of_onlar_pron_demons()
self._create_predefined_path_of_kendi()
self._create_predefined_path_of_hepsi()
self._create_predefined_path_of_herkes()
self._create_predefined_path_of_question_particles()
self._create_predefined_path_of_ne()
self._create_predefined_path_of_ora_bura_sura_nere()
self._create_predefined_path_of_iceri_disari()
self._create_predefined_path_of_bazilari_bazisi()
self._create_predefined_path_of_kimileri_kimisi_kimi()
self._create_predefined_path_of_birileri_birisi_biri()
self._create_predefined_path_of_hicbirisi_hicbiri()
self._create_predefined_path_of_birbiri()
self._create_predefined_path_of_cogu_bircogu_coklari_bircoklari()
self._create_predefined_path_of_birkaci()
self._create_predefined_path_of_cumlesi()
self._create_predefined_path_of_digeri_digerleri()
def _create_predefined_path_of_di(self):
root_di = self._find_root(u'di', SyntacticCategory.VERB, None)
Positive = self._suffix_graph.get_suffix(u'Pos')
Negative = self._suffix_graph.get_suffix(u'Neg')
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Fut'), u'yecek')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Fut'), u'yeceğ')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Future_to_Adj'), u'yecek')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Future_to_Adj'), u'yeceğ')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Noun'), u'yecek')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Noun'), u'yeceğ')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Adj'), u'yecek')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Adj'), u'yeceğ')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Prog'), u'yor')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'PresPart'), u'yen')])
self._add_morpheme_container(root_di, [(self._suffix_graph.get_suffix(u'Able'), u'yebil'), Positive])
self._add_morpheme_container(root_di, [(self._suffix_graph.get_suffix(u'Able'), u'ye'), (Negative, "me")])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'Opt'), u'ye')])
self._add_morpheme_container(root_di, [Positive, (self._suffix_graph.get_suffix(u'ByDoingSo'), u'yerek')])
def _create_predefined_path_of_yi(self):
root_yi = self._find_root(u'yi', SyntacticCategory.VERB, None)
Positive = self._suffix_graph.get_suffix(u'Pos')
Negative = self._suffix_graph.get_suffix(u'Neg')
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Fut'), u'yecek')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Fut'), u'yeceğ')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Future_to_Adj'), u'yecek')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Future_to_Adj'), u'yeceğ')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Noun'), u'yecek')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Noun'), u'yeceğ')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Adj'), u'yecek')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'FutPart_Adj'), u'yeceğ')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Prog'), u'yor')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'PresPart'), u'yen')])
self._add_morpheme_container(root_yi, [(self._suffix_graph.get_suffix(u'Able'), u'yebil'), Positive])
self._add_morpheme_container(root_yi, [(self._suffix_graph.get_suffix(u'Able'), u'ye'), (Negative, "me")])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'Opt'), u'ye')])
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'ByDoingSo'), u'yerek')])
# different from "demek"
self._add_morpheme_container(root_yi, [Positive, (self._suffix_graph.get_suffix(u'AfterDoingSo'), u'yip')])
def _create_predefined_path_of_ben(self):
root_ben = self._find_root(u'ben', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
root_ban = self._find_root(u'ban', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A1Sg_Pron = self._suffix_graph.get_suffix(u'A1Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_ban, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'imle')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'im')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_ben, [A1Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_sen(self):
root_sen = self._find_root(u'sen', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
root_san = self._find_root(u'san', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A2Sg_Pron = self._suffix_graph.get_suffix(u'A2Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_san, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'inle')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_sen, [A2Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_o_pron_pers(self):
root_o = self._find_root(u'o', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'nu')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'na')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nda')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'ndan')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nla')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nunla')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nun')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'nca')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_biz(self):
root_biz = self._find_root(u'biz', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A1Pl_Pron = self._suffix_graph.get_suffix(u'A1Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'imle')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'im')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_biz, [A1Pl_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_biz, [(A1Pl_Pron, u'ler'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_siz(self):
root_siz = self._find_root(u'siz', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A2Pl_Pron = self._suffix_graph.get_suffix(u'A2Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'inle')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_siz, [A2Pl_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_siz, [(A2Pl_Pron, u'ler'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_onlar_pron_pers(self):
root_o = self._find_root(u'o', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.PERSONAL)
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ı')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'da')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'dan')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'la')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'ın')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'ca')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_bu_pron_demons(self):
root_bu = self._find_root(u'bu', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'nu')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'na')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nda')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'ndan')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nla')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nunla')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nun')])
self._add_morpheme_container(root_bu, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_su_pron_demons(self):
root_su = self._find_root(u'şu', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'nu')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'na')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nda')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'ndan')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nla')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nunla')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nun')])
self._add_morpheme_container(root_su, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_o_pron_demons(self):
root_o = self._find_root(u'o', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'nu')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'na')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nda')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'ndan')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nla')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'nunla')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nun')])
self._add_morpheme_container(root_o, [A3Sg_Pron, Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_bunlar_pron_demons(self):
root_bu = self._find_root(u'bu', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ı')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'da')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'dan')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'la')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'ın')])
self._add_morpheme_container(root_bu, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_sunlar_pron_demons(self):
root_su = self._find_root(u'şu', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ı')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'da')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'dan')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'la')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'ın')])
self._add_morpheme_container(root_su, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_onlar_pron_demons(self):
root_o = self._find_root(u'o', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.DEMONSTRATIVE)
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ı')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'a')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'da')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'dan')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'la')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'ın')])
self._add_morpheme_container(root_o, [(A3Pl_Pron, 'nlar'), Pnon_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_kendi(self):
root_kendi = self._find_root(u'kendi', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.REFLEXIVE)
A1Sg_Pron = self._suffix_graph.get_suffix(u'A1Sg_Pron')
P1Sg_Pron = self._suffix_graph.get_suffix(u'P1Sg_Pron')
A2Sg_Pron = self._suffix_graph.get_suffix(u'A2Sg_Pron')
P2Sg_Pron = self._suffix_graph.get_suffix(u'P2Sg_Pron')
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
P3Sg_Pron = self._suffix_graph.get_suffix(u'P3Sg_Pron')
A1Pl_Pron = self._suffix_graph.get_suffix(u'A1Pl_Pron')
P1Pl_Pron = self._suffix_graph.get_suffix(u'P1Pl_Pron')
A2Pl_Pron = self._suffix_graph.get_suffix(u'A2Pl_Pron')
P2Pl_Pron = self._suffix_graph.get_suffix(u'P2Pl_Pron')
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
P3Pl_Pron = self._suffix_graph.get_suffix(u'P3Pl_Pron')
##### A1Sg
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [A1Sg_Pron, (P1Sg_Pron,'m'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A2Sg
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [A2Sg_Pron, (P2Sg_Pron,'n'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A3Sg
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ni')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'ne')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nde')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'nden')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'yle')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nin')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, P3Sg_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ni')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'ne')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nde')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'nden')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'yle')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nin')])
self._add_morpheme_container(root_kendi, [A3Sg_Pron, (P3Sg_Pron,'si'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A1pl
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [A1Pl_Pron, (P1Pl_Pron,'miz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [(A1Pl_Pron,'ler'), (P1Pl_Pron,'imiz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A2pl
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [A2Pl_Pron, (P2Pl_Pron,'niz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_kendi, [(A2Pl_Pron,'ler'), (P2Pl_Pron,'iniz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A3pl
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ni')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'ne')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nde')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'nden')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'yle')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nin')])
self._add_morpheme_container(root_kendi, [(A3Pl_Pron,'leri'), P3Pl_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_hepsi(self):
root_hep = self._find_root(u'hep', SyntacticCategory.PRONOUN, None)
root_hepsi = self._find_root(u'hepsi', SyntacticCategory.PRONOUN, None)
A1Pl_Pron = self._suffix_graph.get_suffix(u'A1Pl_Pron')
P1Pl_Pron = self._suffix_graph.get_suffix(u'P1Pl_Pron')
A2Pl_Pron = self._suffix_graph.get_suffix(u'A2Pl_Pron')
P2Pl_Pron = self._suffix_graph.get_suffix(u'P2Pl_Pron')
A3Pl_Pron = self._suffix_graph.get_suffix(u'A3Pl_Pron')
P3Pl_Pron = self._suffix_graph.get_suffix(u'P3Pl_Pron')
##### No A1Sg
##### No A2Sg
##### No A3Sg
##### A1pl
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_hep, [A1Pl_Pron, (P1Pl_Pron,'imiz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A2pl
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Acc_Pron'), u'i')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Dat_Pron'), u'e')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Loc_Pron'), u'de')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Abl_Pron'), u'den')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Ins_Pron'), u'le')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'in')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), (self._suffix_graph.get_suffix(u'AccordingTo'), u'ce')])
self._add_morpheme_container(root_hep, [A2Pl_Pron, (P2Pl_Pron,'iniz'), self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
##### A3pl
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, self._suffix_graph.get_suffix(u'Nom_Pron')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Acc_Pron'), u'ni')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Dat_Pron'), u'ne')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'), u'nde')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'), u'nden')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Ins_Pron'), u'yle')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'Gen_Pron'), u'nin')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, (self._suffix_graph.get_suffix(u'AccordingTo'), u'nce')])
self._add_morpheme_container(root_hepsi, [A3Pl_Pron, P3Pl_Pron, self._suffix_graph.get_suffix(u'Nom_Pron_Deriv')])
def _create_predefined_path_of_herkes(self):
root_herkes = self._find_root(u'herkes', SyntacticCategory.PRONOUN, None)
self._add_morpheme_container(root_herkes, [self._suffix_graph.get_suffix(u'A3Sg_Pron'), self._suffix_graph.get_suffix(u'Pnon_Pron')])
def _create_predefined_path_of_question_particles(self):
root_mii = self._find_root(u'mı', SyntacticCategory.QUESTION, None)
root_mi = self._find_root(u'mi', SyntacticCategory.QUESTION, None)
root_mu = self._find_root(u'mu', SyntacticCategory.QUESTION, None)
root_muu = self._find_root(u'mü', SyntacticCategory.QUESTION, None)
Pres_Ques = self._suffix_graph.get_suffix(u'Pres_Ques')
Past_Ques = self._suffix_graph.get_suffix(u'Past_Ques')
Narr_Ques = self._suffix_graph.get_suffix(u'Narr_Ques')
A1Sg_Ques = self._suffix_graph.get_suffix(u'A1Sg_Ques')
A2Sg_Ques = self._suffix_graph.get_suffix(u'A2Sg_Ques')
A3Sg_Ques = self._suffix_graph.get_suffix(u'A3Sg_Ques')
A1Pl_Ques = self._suffix_graph.get_suffix(u'A1Pl_Ques')
A2Pl_Ques = self._suffix_graph.get_suffix(u'A2Pl_Ques')
A3Pl_Ques = self._suffix_graph.get_suffix(u'A3Pl_Ques')
##### Pres
self._add_morpheme_container(root_mii, [Pres_Ques, (A1Sg_Ques,u'yım')])
self._add_morpheme_container(root_mii, [Pres_Ques, (A2Sg_Ques,u'sın')])
self._add_morpheme_container(root_mii, [Pres_Ques, (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mii, [Pres_Ques, (A1Pl_Ques,u'yız')])
self._add_morpheme_container(root_mii, [Pres_Ques, (A2Pl_Ques,u'sınız')])
self._add_morpheme_container(root_mii, [Pres_Ques, (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A1Sg_Ques,u'yim')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A2Sg_Ques,u'sin')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A1Pl_Ques,u'yiz')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A2Pl_Ques,u'siniz')])
self._add_morpheme_container(root_mi , [Pres_Ques, (A3Pl_Ques,u'ler')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A1Sg_Ques,u'yum')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A2Sg_Ques,u'sun')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A1Pl_Ques,u'yuz')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A2Pl_Ques,u'sunuz')])
self._add_morpheme_container(root_mu , [Pres_Ques, (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A1Sg_Ques,u'yüm')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A2Sg_Ques,u'sün')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A3Sg_Ques,u'')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A1Pl_Ques,u'yüz')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A2Pl_Ques,u'sünüz')])
self._add_morpheme_container(root_muu, [Pres_Ques, (A3Pl_Ques,u'ler')])
##### Past
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A1Sg_Ques,u'm')])
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A2Sg_Ques,u'n')])
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A1Pl_Ques,u'k')])
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A2Pl_Ques,u'nız')])
self._add_morpheme_container(root_mii, [(Past_Ques,u'ydı'), (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A1Sg_Ques,u'm')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A2Sg_Ques,u'n')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A1Pl_Ques,u'k')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A2Pl_Ques,u'niz')])
self._add_morpheme_container(root_mi , [(Past_Ques,u'ydi'), (A3Pl_Ques,u'ler')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A1Sg_Ques,u'm')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A2Sg_Ques,u'n')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A1Pl_Ques,u'k')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A2Pl_Ques,u'nuz')])
self._add_morpheme_container(root_mu , [(Past_Ques,u'ydu'), (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A1Sg_Ques,u'm')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A2Sg_Ques,u'n')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A1Pl_Ques,u'k')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A2Pl_Ques,u'nüz')])
self._add_morpheme_container(root_muu, [(Past_Ques,u'ydü'), (A3Pl_Ques,u'ler')])
##### Narr
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A1Sg_Ques,u'ım')])
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A2Sg_Ques,u'sın')])
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A1Pl_Ques,u'ız')])
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A2Pl_Ques,u'sınız')])
self._add_morpheme_container(root_mii, [(Narr_Ques,u'ymış'), (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A1Sg_Ques,u'im')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A2Sg_Ques,u'sin')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A1Pl_Ques,u'iz')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A2Pl_Ques,u'siniz')])
self._add_morpheme_container(root_mi , [(Narr_Ques,u'ymiş'), (A3Pl_Ques,u'ler')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A1Sg_Ques,u'um')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A2Sg_Ques,u'sun')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A1Pl_Ques,u'uz')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A2Pl_Ques,u'sunuz')])
self._add_morpheme_container(root_mu , [(Narr_Ques,u'ymuş'), (A3Pl_Ques,u'lar')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A1Sg_Ques,u'üm')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A2Sg_Ques,u'sün')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A3Sg_Ques,u'')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A1Pl_Ques,u'üz')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A2Pl_Ques,u'sünüz')])
self._add_morpheme_container(root_muu, [(Narr_Ques,u'ymüş'), (A3Pl_Ques,u'ler')])
def _create_predefined_path_of_ne(self):
root_ne = self._find_root(u'ne', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.QUESTION)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Sg_Pron'),u'm')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Sg_Pron'),u'yim')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Sg_Pron'),u'n')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Sg_Pron'),u'yin')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P3Sg_Pron'),u'yi')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P3Sg_Pron'),u'si')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'),u'yimiz')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'),u'yiniz')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P3Pl_Pron'),u'leri')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, self._suffix_graph.get_suffix(u'Pnon_Pron'), (self._suffix_graph.get_suffix(u'Gen_Pron'), u'yin')])
self._add_morpheme_container(root_ne, [A3Sg_Pron, self._suffix_graph.get_suffix(u'Pnon_Pron')])
self._add_morpheme_container(root_ne, [(self._suffix_graph.get_suffix(u'A3Pl_Pron'), u'ler'), self._suffix_graph.get_suffix(u'Pnon_Pron')])
def _create_predefined_path_of_ora_bura_sura_nere(self):
root_or = self._find_root(u'or', SyntacticCategory.PRONOUN, None)
root_bur = self._find_root(u'bur', SyntacticCategory.PRONOUN, None)
root_sur = self._find_root(u'şur', SyntacticCategory.PRONOUN, None)
root_ner = self._find_root(u'ner', SyntacticCategory.PRONOUN, SecondarySyntacticCategory.QUESTION)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
Pnon_Pron = self._suffix_graph.get_suffix(u'Pnon_Pron')
# define predefined paths for "orda" and "ordan" etc.
self._add_morpheme_container(root_or, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'),'da')])
self._add_morpheme_container(root_or, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'),'dan')])
self._add_morpheme_container(root_bur, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'),'da')])
self._add_morpheme_container(root_bur, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'),'dan')])
self._add_morpheme_container(root_sur, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'),'da')])
self._add_morpheme_container(root_sur, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'),'dan')])
self._add_morpheme_container(root_ner, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Loc_Pron'),'de')])
self._add_morpheme_container(root_ner, [A3Sg_Pron, Pnon_Pron, (self._suffix_graph.get_suffix(u'Abl_Pron'),'den')])
def _create_predefined_path_of_iceri_disari(self):
root_icer = self._find_root(u'içer', SyntacticCategory.NOUN, None)
root_disar = self._find_root(u'dışar', SyntacticCategory.NOUN, None)
A3Sg_Noun = self._suffix_graph.get_suffix(u'A3Sg_Noun')
Pnon_Noun = self._suffix_graph.get_suffix(u'Pnon_Noun')
P3Sg_Noun = self._suffix_graph.get_suffix(u'P3Sg_Noun')
# define predefined paths for "içerde" and "dışardan" etc.
self._add_morpheme_container(root_icer, [A3Sg_Noun, Pnon_Noun, (self._suffix_graph.get_suffix(u'Loc_Noun'),'de')])
self._add_morpheme_container(root_icer, [A3Sg_Noun, Pnon_Noun, (self._suffix_graph.get_suffix(u'Abl_Noun'),'den')])
self._add_morpheme_container(root_icer, [A3Sg_Noun, (P3Sg_Noun,'si')])
self._add_morpheme_container(root_disar, [A3Sg_Noun, Pnon_Noun, (self._suffix_graph.get_suffix(u'Loc_Noun'),'da')])
self._add_morpheme_container(root_disar, [A3Sg_Noun, Pnon_Noun, (self._suffix_graph.get_suffix(u'Abl_Noun'),'dan')])
self._add_morpheme_container(root_disar, [A3Sg_Noun, (P3Sg_Noun,u'sı')])
def _create_predefined_path_of_bazilari_bazisi(self):
root_bazisi = self._find_root(u'bazısı', SyntacticCategory.PRONOUN, None)
root_bazilari = self._find_root(u'bazıları', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_bazilari, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_bazilari, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'mız')])
self._add_morpheme_container(root_bazilari, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'nız')])
self._add_morpheme_container(root_bazisi, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
def _create_predefined_path_of_kimileri_kimisi_kimi(self):
root_kimi = self._find_root(u'kimi', SyntacticCategory.PRONOUN, None)
root_kimisi = self._find_root(u'kimisi', SyntacticCategory.PRONOUN, None)
root_kimileri = self._find_root(u'kimileri', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_kimileri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_kimileri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_kimileri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_kimi, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_kimi, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_kimi, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_kimisi, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
def _create_predefined_path_of_birileri_birisi_biri(self):
root_biri = self._find_root(u'biri', SyntacticCategory.PRONOUN, None)
root_birisi = self._find_root(u'birisi', SyntacticCategory.PRONOUN, None)
root_birileri = self._find_root(u'birileri', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_birileri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_birileri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_birileri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_biri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_biri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_biri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_birisi, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
def _create_predefined_path_of_hicbirisi_hicbiri(self):
root_hicbiri = self._find_root(u'hiçbiri', SyntacticCategory.PRONOUN, None)
root_hicbirisi = self._find_root(u'hiçbirisi', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_hicbiri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_hicbiri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_hicbiri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_hicbirisi, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
def _create_predefined_path_of_birbiri(self):
root_birbir = self._find_root(u'birbir', SyntacticCategory.PRONOUN, None)
root_birbiri = self._find_root(u'birbiri', SyntacticCategory.PRONOUN, None)
self._add_morpheme_container(root_birbiri, [self._suffix_graph.get_suffix(u'A3Sg_Pron'), self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_birbiri, [self._suffix_graph.get_suffix(u'A1Pl_Pron'), (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_birbiri, [self._suffix_graph.get_suffix(u'A2Pl_Pron'), (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_birbir, [self._suffix_graph.get_suffix(u'A3Pl_Pron'), (self._suffix_graph.get_suffix(u'P3Pl_Pron'), u'leri')])
def _create_predefined_path_of_cogu_bircogu_coklari_bircoklari(self):
root_cogu = self._find_root(u'çoğu', SyntacticCategory.PRONOUN, None)
root_bircogu = self._find_root(u'birçoğu', SyntacticCategory.PRONOUN, None)
root_coklari = self._find_root(u'çokları', SyntacticCategory.PRONOUN, None)
root_bircoklari = self._find_root(u'birçokları', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_cogu, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_cogu, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'muz')])
self._add_morpheme_container(root_cogu, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'nuz')])
self._add_morpheme_container(root_bircogu, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_bircogu, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'muz')])
self._add_morpheme_container(root_bircogu, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'nuz')])
self._add_morpheme_container(root_coklari, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Pl_Pron')])
self._add_morpheme_container(root_bircoklari, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Pl_Pron')])
def _create_predefined_path_of_birkaci(self):
root_birkaci = self._find_root(u'birkaçı', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_birkaci, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_birkaci, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'mız')])
self._add_morpheme_container(root_birkaci, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'nız')])
def _create_predefined_path_of_cumlesi(self):
root_cumlesi = self._find_root(u'cümlesi', SyntacticCategory.PRONOUN, None)
self._add_morpheme_container(root_cumlesi, [self._suffix_graph.get_suffix(u'A3Sg_Pron'), self._suffix_graph.get_suffix(u'P3Sg_Pron')])
def _create_predefined_path_of_digeri_digerleri(self):
root_digeri = self._find_root(u'diğeri', SyntacticCategory.PRONOUN, None)
root_digerleri = self._find_root(u'diğerleri', SyntacticCategory.PRONOUN, None)
A3Sg_Pron = self._suffix_graph.get_suffix(u'A3Sg_Pron')
self._add_morpheme_container(root_digeri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Sg_Pron')])
self._add_morpheme_container(root_digeri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_digeri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
self._add_morpheme_container(root_digerleri, [A3Sg_Pron, self._suffix_graph.get_suffix(u'P3Pl_Pron')])
self._add_morpheme_container(root_digerleri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P1Pl_Pron'), u'miz')])
self._add_morpheme_container(root_digerleri, [A3Sg_Pron, (self._suffix_graph.get_suffix(u'P2Pl_Pron'), u'niz')])
|
{
"content_hash": "c63160809885c5eda1fd803c276da1da",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 191,
"avg_line_length": 73.31818181818181,
"alnum_prop": 0.6536827302556969,
"repo_name": "aliok/trnltk",
"id": "55545b5853e9983dd1c85c8c304edbc57d532c38",
"size": "66269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trnltk/morphology/morphotactics/predefinedpaths.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60232"
},
{
"name": "Python",
"bytes": "1320401"
},
{
"name": "Shell",
"bytes": "2191"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
sys.path.insert(0, os.path.abspath('..'))
import cdrouter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cdrouter.py'
copyright = u'2017-2022, QA Cafe'
author = u'QA Cafe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cdrouter.__version__
# The full version, including alpha/beta/rc tags.
release = cdrouter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cdrouterpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cdrouterpy.tex', u'cdrouter.py Documentation',
u'QA Cafe', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cdrouterpy', u'cdrouter.py Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cdrouterpy', u'cdrouter.py Documentation',
author, 'cdrouterpy', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "9ce5b1105cc29812a0c982cac3762dd7",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 30.161971830985916,
"alnum_prop": 0.6672892832127014,
"repo_name": "qacafe/cdrouter.py",
"id": "9d7f8f77186f475d2dd797171d110024c876e03f",
"size": "4707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "409892"
},
{
"name": "Shell",
"bytes": "2426"
}
],
"symlink_target": ""
}
|
from django.db import connections
from django.db.models.manager import Manager as DJManager
import re
import copy
from .utils import dict_keys_to_str
try:
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
except ImportError:
class ObjectDoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
DoesNotExist = ObjectDoesNotExist
__all__ = ['queryset_manager', 'Q', 'InvalidQueryError',
'InvalidCollectionError']
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
class InvalidCollectionError(Exception):
pass
DoesNotExist = ObjectDoesNotExist
RE_TYPE = type(re.compile(''))
class Q(object):
OR = '||'
AND = '&&'
OPERATORS = {
'eq': 'this.%(field)s == %(value)s',
'ne': 'this.%(field)s != %(value)s',
'gt': 'this.%(field)s > %(value)s',
'gte': 'this.%(field)s >= %(value)s',
'lt': 'this.%(field)s < %(value)s',
'lte': 'this.%(field)s <= %(value)s',
'lte': 'this.%(field)s <= %(value)s',
'in': '%(value)s.indexOf(this.%(field)s) != -1',
'nin': '%(value)s.indexOf(this.%(field)s) == -1',
'mod': '%(field)s %% %(value)s',
'all': ('%(value)s.every(function(a){'
'return this.%(field)s.indexOf(a) != -1 })'),
'size': 'this.%(field)s.length == %(value)s',
'exists': 'this.%(field)s != null',
'regex_eq': '%(value)s.test(this.%(field)s)',
'regex_ne': '!%(value)s.test(this.%(field)s)',
}
def __init__(self, **query):
self.query = [query]
def _combine(self, other, op):
obj = Q()
obj.query = ['('] + copy.deepcopy(self.query) + [op]
obj.query += copy.deepcopy(other.query) + [')']
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def as_js(self, document):
js = []
js_scope = {}
for i, item in enumerate(self.query):
if isinstance(item, dict):
item_query = QuerySet._transform_query(document, **item)
# item_query will values will either be a value or a dict
js.append(self._item_query_as_js(item_query, js_scope, i))
else:
js.append(item)
return pymongo.code.Code(' '.join(js), js_scope)
def _item_query_as_js(self, item_query, js_scope, item_num):
# item_query will be in one of the following forms
# {'age': 25, 'name': 'Test'}
# {'age': {'$lt': 25}, 'name': {'$in': ['Test', 'Example']}
# {'age': {'$lt': 25, '$gt': 18}}
js = []
for i, (key, value) in enumerate(item_query.items()):
op = 'eq'
# Construct a variable name for the value in the JS
value_name = 'i%sf%s' % (item_num, i)
if isinstance(value, dict):
# Multiple operators for this field
for j, (op, value) in enumerate(value.items()):
# Create a custom variable name for this operator
op_value_name = '%so%s' % (value_name, j)
# Construct the JS that uses this op
value, operation_js = self._build_op_js(op, key, value,
op_value_name)
# Update the js scope with the value for this op
js_scope[op_value_name] = value
js.append(operation_js)
else:
# Construct the JS for this field
value, field_js = self._build_op_js(op, key, value, value_name)
js_scope[value_name] = value
js.append(field_js)
return ' && '.join(js)
def _build_op_js(self, op, key, value, value_name):
"""Substitute the values in to the correct chunk of Javascript.
"""
if isinstance(value, RE_TYPE):
# Regexes are handled specially
if op.strip('$') == 'ne':
op_js = Q.OPERATORS['regex_ne']
else:
op_js = Q.OPERATORS['regex_eq']
else:
op_js = Q.OPERATORS[op.strip('$')]
# Perform the substitution
operation_js = op_js % {
'field': key,
'value': value_name
}
return value, operation_js
class InternalMetadata:
def __init__(self, meta):
self.object_name = meta["object_name"]
class InternalModel:
"""
An internal queryset model to be embedded in a query set for django compatibility.
"""
def __init__(self, document):
self.document = document
self._meta = InternalMetadata(document._meta)
self.DoesNotExist = ObjectDoesNotExist
class QuerySet(object):
"""A set of results returned from a query. Wraps a ES cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._accessed_collection = False
self._query = {}
self._where_clause = None
self._loaded_fields = []
self._ordering = []
self.transform = TransformDjango()
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
#if document._meta.get('allow_inheritance'):
#self._query = {'_types': self._document._class_name}
self._cursor_obj = None
self._limit = None
self._skip = None
#required for compatibility with django
#self.model = InternalModel(document)
def __call__(self, q_obj=None, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used
:param query: Django-style query keyword arguments
"""
if q_obj:
self._where_clause = q_obj.as_js(self._document)
query = QuerySet._transform_query(_doc_cls=self._document, **query)
self._query.update(query)
return self
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__(*q_objs, **query)
def find(self, query):
self._query.update(self.transform.transform_incoming(query, self._collection))
return self
def exclude(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
query["not"] = True
return self.__call__(*q_objs, **query)
def all(self):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__()
def distinct(self, *args, **kwargs):
"""
Distinct method
"""
return self._cursor.distinct(*args, **kwargs)
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
return self._collection_obj
def values(self, *args):
return (args and [dict(zip(args,[getattr(doc, key) for key in args])) for doc in self]) or [obj for obj in self._cursor.clone()]
def values_list(self, *args, **kwargs):
flat = kwargs.pop("flat", False)
if flat and len(args) != 1:
raise Exception("args len must be 1 when flat=True")
return (flat and self.distinct(args[0] if not args[0] in ["id", "pk"] else "_id")) or zip(*[self.distinct(field if not field in ["id", "pk"] else "_id") for field in args])
@property
def _cursor(self):
if self._cursor_obj is None:
cursor_args = {}
if self._loaded_fields:
cursor_args = {'fields': self._loaded_fields}
self._cursor_obj = self._collection.find(self._query,
**cursor_args)
# Apply where clauses to cursor
if self._where_clause:
self._cursor_obj.where(self._where_clause)
# apply default ordering
# if self._document._meta['ordering']:
# self.order_by(*self._document._meta['ordering'])
return self._cursor_obj.clone()
@classmethod
def _lookup_field(cls, document, fields):
"""
Looks for "field" in "document"
"""
if isinstance(fields, (tuple, list)):
return [document._meta.get_field_by_name((field == "pk" and "id") or field)[0] for field in fields]
return document._meta.get_field_by_name((fields == "pk" and "id") or fields)[0]
@classmethod
def _translate_field_name(cls, doc_cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.attname for f in QuerySet._lookup_field(doc_cls, parts)]
return '.'.join(parts)
@classmethod
def _transform_query(self, _doc_cls=None, **parameters):
"""
Converts parameters to elasticsearch queries.
"""
spec = {}
operators = ['ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists']
match_operators = ['contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'iexact']
exclude = parameters.pop("not", False)
for key, value in parameters.items():
parts = key.split("__")
lookup_type = (len(parts)>=2) and ( parts[-1] in operators + match_operators and parts.pop()) or ""
# Let's get the right field and be sure that it exists
parts[0] = QuerySet._lookup_field(_doc_cls, parts[0]).attname
if not lookup_type and len(parts)==1:
if exclude:
value = {"$ne" : value}
spec.update({parts[0] : value})
continue
if parts[0] == "id":
parts[0] = "_id"
value = [isinstance(par, basestring) or par for par in value]
if lookup_type in ['contains', 'icontains',
'startswith', 'istartswith',
'endswith', 'iendswith',
'exact', 'iexact']:
flags = 0
if lookup_type.startswith('i'):
flags = re.IGNORECASE
lookup_type = lookup_type.lstrip('i')
regex = r'%s'
if lookup_type == 'startswith':
regex = r'^%s'
elif lookup_type == 'endswith':
regex = r'%s$'
elif lookup_type == 'exact':
regex = r'^%s$'
value = re.compile(regex % value, flags)
elif lookup_type in operators:
value = { "$" + lookup_type : value}
elif lookup_type and len(parts)==1:
raise DatabaseError("Unsupported lookup type: %r" % lookup_type)
key = '.'.join(parts)
if exclude:
value = {"$ne" : value}
spec.update({key : value})
return spec
def get(self, *q_objs, **query):
"""Retrieve the the matching object raising id django is available
:class:`~django.core.exceptions.MultipleObjectsReturned` or
:class:`~django.core.exceptions.ObjectDoesNotExist` exceptions if multiple or
no results are found.
If django is not available:
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` exception if multiple results and
:class:`~mongoengine.queryset.DoesNotExist` or `DocumentName.DoesNotExist`
if no results are found.
.. versionadded:: 0.3
"""
self.__call__(*q_objs, **query)
count = self.count()
if count == 1:
return self[0]
elif count > 1:
message = u'%d items returned, instead of 1' % count
raise self._document.MultipleObjectsReturned(message)
else:
raise self._document.DoesNotExist("%s matching query does not exist."
% self._document._meta.object_name)
def get_or_create(self, *q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist. Returns a tuple of
``(object, created)``, where ``object`` is the retrieved or created object
and ``created`` is a boolean specifying whether a new object was created. Raises
:class:`~mongoengine.queryset.MultipleObjectsReturned` or
`DocumentName.MultipleObjectsReturned` if multiple results are found.
A new document will be created if the document doesn't exists; a
dictionary of default values for the new document may be provided as a
keyword argument called :attr:`defaults`.
.. versionadded:: 0.3
"""
defaults = query.get('defaults', {})
if 'defaults' in query:
del query['defaults']
self.__call__(*q_objs, **query)
count = self.count()
if count == 0:
query.update(defaults)
doc = self._document(**query)
doc.save()
return doc, True
elif count == 1:
return self.first(), False
else:
message = u'%d items returned, instead of 1' % count
raise self._document.MultipleObjectsReturned(message)
def first(self):
"""Retrieve the first object matching the query.
"""
try:
result = self[0]
except IndexError:
result = None
return result
def with_id(self, object_id):
"""Retrieve the object matching the id provided.
:param object_id: the value for the id of the document to look up
"""
id_field = self._document._meta['id_field']
object_id = self._document._fields[id_field].to_mongo(object_id)
result = self._collection.find_one({'_id': (not isinstance(object_id, ObjectId) and ObjectId(object_id)) or object_id})
if result is not None:
result = self._document(**dict_keys_to_str(result))
return result
def in_bulk(self, object_ids):
"""Retrieve a set of documents by their ids.
:param object_ids: a list or tuple of id's
:rtype: dict of ids as keys and collection-specific
Document subclasses as values.
.. versionadded:: 0.3
"""
doc_map = {}
docs = self._collection.find({'_id': {'$in': [ (not isinstance(id, ObjectId) and ObjectId(id)) or id for id in object_ids]}})
for doc in docs:
doc_map[str(doc['id'])] = self._document(**dict_keys_to_str(doc))
return doc_map
def count(self):
"""Count the selected elements in the query.
"""
if self._limit == 0:
return 0
return self._cursor.count(with_limit_and_skip=False)
def __len__(self):
return self.count()
def map_reduce(self, map_f, reduce_f, finalize_f=None, limit=None,
scope=None, keep_temp=False):
"""Perform a map/reduce query using the current query spec
and ordering. While ``map_reduce`` respects ``QuerySet`` chaining,
it must be the last call made, as it does not return a maleable
``QuerySet``.
See the :meth:`~mongoengine.tests.QuerySetTest.test_map_reduce`
and :meth:`~mongoengine.tests.QuerySetTest.test_map_advanced`
tests in ``tests.queryset.QuerySetTest`` for usage examples.
:param map_f: map function, as :class:`~pymongo.code.Code` or string
:param reduce_f: reduce function, as
:class:`~pymongo.code.Code` or string
:param finalize_f: finalize function, an optional function that
performs any post-reduction processing.
:param scope: values to insert into map/reduce global scope. Optional.
:param limit: number of objects from current query to provide
to map/reduce method
:param keep_temp: keep temporary table (boolean, default ``True``)
Returns an iterator yielding
:class:`~mongoengine.document.MapReduceDocument`.
.. note:: Map/Reduce requires server version **>= 1.1.1**. The PyMongo
:meth:`~pymongo.collection.Collection.map_reduce` helper requires
PyMongo version **>= 1.2**.
.. versionadded:: 0.3
"""
#from document import MapReduceDocument
if not hasattr(self._collection, "map_reduce"):
raise NotImplementedError("Requires MongoDB >= 1.1.1")
map_f_scope = {}
if isinstance(map_f, pymongo.code.Code):
map_f_scope = map_f.scope
map_f = unicode(map_f)
# map_f = pymongo.code.Code(self._sub_js_fields(map_f), map_f_scope)
map_f = pymongo.code.Code(map_f, map_f_scope)
reduce_f_scope = {}
if isinstance(reduce_f, pymongo.code.Code):
reduce_f_scope = reduce_f.scope
reduce_f = unicode(reduce_f)
# reduce_f_code = self._sub_js_fields(reduce_f)
reduce_f_code = reduce_f
reduce_f = pymongo.code.Code(reduce_f_code, reduce_f_scope)
mr_args = {'query': self._query, 'keeptemp': keep_temp}
if finalize_f:
finalize_f_scope = {}
if isinstance(finalize_f, pymongo.code.Code):
finalize_f_scope = finalize_f.scope
finalize_f = unicode(finalize_f)
# finalize_f_code = self._sub_js_fields(finalize_f)
finalize_f_code = finalize_f
finalize_f = pymongo.code.Code(finalize_f_code, finalize_f_scope)
mr_args['finalize'] = finalize_f
if scope:
mr_args['scope'] = scope
if limit:
mr_args['limit'] = limit
results = self._collection.map_reduce(map_f, reduce_f, **mr_args)
results = results.find()
if self._ordering:
results = results.sort(self._ordering)
for doc in results:
yield self._document.objects.with_id(doc['value'])
def limit(self, n):
"""Limit the number of returned documents to `n`. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
:param n: the maximum number of objects to return
"""
if n == 0:
self._cursor.limit(1)
else:
self._cursor.limit(n)
self._limit = n
# Return self to allow chaining
return self
def skip(self, n):
"""Skip `n` documents before returning the results. This may also be
achieved using array-slicing syntax (e.g. ``User.objects[5:]``).
:param n: the number of objects to skip before returning results
"""
self._cursor.skip(n)
self._skip = n
return self
def __getitem__(self, key):
"""Support skip and limit using getitem and slicing syntax.
"""
# Slice provided
if isinstance(key, slice):
try:
self._cursor_obj = self._cursor[key]
self._skip, self._limit = key.start, key.stop
except IndexError, err:
# PyMongo raises an error if key.start == key.stop, catch it,
# bin it, kill it.
start = key.start or 0
if start >= 0 and key.stop >= 0 and key.step is None:
if start == key.stop:
self.limit(0)
self._skip, self._limit = key.start, key.stop - start
return self
raise err
# Allow further QuerySet modifications to be performed
return self
# Integer index provided
elif isinstance(key, int):
return self._document(**dict_keys_to_str(self._cursor[key]))
def only(self, *fields):
"""Load only a subset of this document's fields. ::
post = BlogPost.objects(...).only("title")
:param fields: fields to include
.. versionadded:: 0.3
"""
self._loaded_fields = []
for field in fields:
if '.' in field:
raise InvalidQueryError('Subfields cannot be used as '
'arguments to QuerySet.only')
# Translate field name
field = QuerySet._lookup_field(self._document, field)[-1].db_field
self._loaded_fields.append(field)
# _cls is needed for polymorphism
if self._document._meta.get('allow_inheritance'):
self._loaded_fields += ['_cls']
return self
def order_by(self, *args):
"""Order the :class:`~mongoengine.queryset.QuerySet` by the keys. The
order may be specified by prepending each of the keys by a + or a -.
Ascending order is assumed.
:param keys: fields to order the query results by; keys may be
prefixed with **+** or **-** to determine the ordering direction
"""
self._ordering = []
for col in args:
self._ordering.append(( (col.startswith("-") and col[1:]) or col, (col.startswith("-") and -1) or 1 ))
self._cursor.sort(self._ordering)
return self
def explain(self, format=False):
"""Return an explain plan record for the
:class:`~mongoengine.queryset.QuerySet`\ 's cursor.
:param format: format the plan before returning it
"""
plan = self._cursor.explain()
if format:
import pprint
plan = pprint.pformat(plan)
return plan
def delete(self, safe=False):
"""Delete the documents matched by the query.
:param safe: check if the operation succeeded before returning
"""
self._collection.remove(self._query, safe=safe)
@classmethod
def _transform_update(cls, _doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
operators = ['set', 'unset', 'inc', 'dec', 'push', 'push_all', 'pull',
'pull_all']
mongo_update = {}
for key, value in update.items():
parts = key.split('__')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in operators:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
fields = QuerySet._lookup_field(_doc_cls, parts)
parts = [field.db_field for field in fields]
# Convert value to proper value
field = fields[-1]
if op in (None, 'set', 'unset', 'push', 'pull'):
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
key = '.'.join(parts)
if op:
value = {key: value}
key = '$' + op
if op is None or key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def update(self, safe_update=True, upsert=False, **update):
"""Perform an atomic update on the fields matched by the query.
:param safe: check if the operation succeeded before returning
:param update: Django-style update keyword arguments
.. versionadded:: 0.2
"""
if pymongo.version < '1.1.1':
raise OperationError('update() method requires PyMongo 1.1.1+')
update = QuerySet._transform_update(self._document, **update)
try:
self._collection.update(self._query, update, safe=safe_update,
upsert=upsert, multi=True)
except pymongo.errors.OperationFailure, err:
if unicode(err) == u'multi not coded yet':
message = u'update() method requires MongoDB 1.1.3+'
raise OperationError(message)
raise OperationError(u'Update failed (%s)' % unicode(err))
def update_one(self, safe_update=True, upsert=False, **update):
"""Perform an atomic update on first field matched by the query.
:param safe: check if the operation succeeded before returning
:param update: Django-style update keyword arguments
.. versionadded:: 0.2
"""
update = QuerySet._transform_update(self._document, **update)
try:
# Explicitly provide 'multi=False' to newer versions of PyMongo
# as the default may change to 'True'
if pymongo.version >= '1.1.1':
self._collection.update(self._query, update, safe=safe_update,
upsert=upsert, multi=False)
else:
# Older versions of PyMongo don't support 'multi'
self._collection.update(self._query, update, safe=safe_update)
except pymongo.errors.OperationFailure, e:
raise OperationError(u'Update failed [%s]' % unicode(e))
def __iter__(self, *args, **kwargs):
for obj in self._cursor:
data = dict_keys_to_str(obj)
if '_id' in data:
data['id']=data.pop('_id')
yield self._document(**data)
def _sub_js_fields(self, code):
"""When fields are specified with [~fieldname] syntax, where
*fieldname* is the Python name of a field, *fieldname* will be
substituted for the MongoDB name of the field (specified using the
:attr:`name` keyword argument in a field's constructor).
"""
def field_sub(match):
# Extract just the field name, and look up the field objects
field_name = match.group(1).split('.')
fields = QuerySet._lookup_field(self._document, field_name)
# Substitute the correct name for the field into the javascript
return u'["%s"]' % fields[-1].db_field
return re.sub(u'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
def exec_js(self, code, *fields, **options):
"""
Execute a Javascript function on the server. A list of fields may be
provided, which will be translated to their correct names and supplied
as the arguments to the function. A few extra variables are added to
the function's scope: ``collection``, which is the name of the
collection in use; ``query``, which is an object representing the
current query; and ``options``, which is an object containing any
options specified as keyword arguments.
As fields in MongoEngine may use different names in the database (set
using the :attr:`db_field` keyword argument to a :class:`Field`
constructor), a mechanism exists for replacing MongoEngine field names
with the database field names in Javascript code. When accessing a
field, use square-bracket notation, and prefix the MongoEngine field
name with a tilde (~).
:param code: a string of Javascript code to execute
:param fields: fields that you will be using in your function, which
will be passed in to your function as arguments
:param options: options that you want available to the function
(accessed in Javascript through the ``options`` object)
"""
# code = self._sub_js_fields(code)
fields = [QuerySet._translate_field_name(self._document, f) for f in fields]
collection = self._collection
scope = {
'collection': collection.name,
'options': options or {},
}
query = self._query
if self._where_clause:
query['$where'] = self._where_clause
scope['query'] = query
code = pymongo.code.Code(code, scope=scope)
return collection.database.eval(code, *fields)
def sum(self, field):
"""Sum over the values of the specified field.
:param field: the field to sum over; use dot-notation to refer to
embedded document fields
"""
sum_func = """
function(sumField) {
var total = 0.0;
db[collection].find(query).forEach(function(doc) {
total += (doc[sumField] || 0.0);
});
return total;
}
"""
return self.exec_js(sum_func, field)
def average(self, field):
"""Average over the values of the specified field.
:param field: the field to average over; use dot-notation to refer to
embedded document fields
"""
average_func = """
function(averageField) {
var total = 0.0;
var num = 0;
db[collection].find(query).forEach(function(doc) {
if (doc[averageField]) {
total += doc[averageField];
num += 1;
}
});
return total / num;
}
"""
return self.exec_js(average_func, field)
def item_frequencies(self, list_field, normalize=False):
"""Returns a dictionary of all items present in a list field across
the whole queried set of documents, and their corresponding frequency.
This is useful for generating tag clouds, or searching documents.
:param list_field: the list field to use
:param normalize: normalize the results so they add to 1.0
"""
freq_func = """
function(listField) {
if (options.normalize) {
var total = 0.0;
db[collection].find(query).forEach(function(doc) {
total += doc[listField].length;
});
}
var frequencies = {};
var inc = 1.0;
if (options.normalize) {
inc /= total;
}
db[collection].find(query).forEach(function(doc) {
doc[listField].forEach(function(item) {
frequencies[item] = inc + (frequencies[item] || 0);
});
});
return frequencies;
}
"""
return self.exec_js(freq_func, list_field, normalize=normalize)
def __repr__(self):
limit = REPR_OUTPUT_SIZE + 1
if self._limit is not None and self._limit < limit:
limit = self._limit
data = list(self[self._skip:limit])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def _clone(self):
return self
class Manager(DJManager):
def __init__(self, manager_func=None):
super(Manager, self).__init__()
self._manager_func = manager_func
self._collection = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
# setattr(model, name, ManagerDescriptor(self))
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
self.model = owner #We need to set the model to get the db
if instance is not None:
# Document class being used rather than a document object
return self
if self._collection is None:
self._collection = connections[self.db].db_connection[owner._meta.db_table]
# owner is the document that contains the QuerySetManager
queryset = QuerySet(owner, self._collection)
if self._manager_func:
if self._manager_func.func_code.co_argcount == 1:
queryset = self._manager_func(queryset)
else:
queryset = self._manager_func(owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
if func.func_code.co_argcount == 1:
import warnings
msg = 'Methods decorated with queryset_manager should take 2 arguments'
warnings.warn(msg, DeprecationWarning)
return QuerySetManager(func)
|
{
"content_hash": "8cf50610d535017bbf30db3a23d58478",
"timestamp": "",
"source": "github",
"line_count": 905,
"max_line_length": 180,
"avg_line_length": 38.37237569060773,
"alnum_prop": 0.5530566993981628,
"repo_name": "aparo/django-elasticsearch",
"id": "78d2e13e2a371a9e7f6e5bf367303c45006f8e78",
"size": "34727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_elasticsearch/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "94880"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class OperationDisplay(Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: Service provider: Microsoft.Billing.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Invoice,
etc.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self):
super(OperationDisplay, self).__init__()
self.provider = None
self.resource = None
self.operation = None
|
{
"content_hash": "559e35714f8aae2d4a26cc90b2a9e631",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 29.257142857142856,
"alnum_prop": 0.60546875,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "0dcad2d20d02e381f150e5c62b3ac354602314cd",
"size": "1498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-billing/azure/mgmt/billing/models/operation_display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import json
import pkgutil
import unittest2
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._table_name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = dict(record.name_get())[record.id]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._table_name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
Lang = self.registry('res.lang')
if not Lang.search(self.cr, openerp.SUPERUSER_ID, [('code', '=', code)]):
Lang.create(self.cr, openerp.SUPERUSER_ID, {
'name': code,
'code': code,
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
})
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.osv.orm')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.osv.orm')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'value'. This might be "
u"'unknown' in the current model, or a field of the same name in "
u"an o2m.")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.osv.orm')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'value'. This might be "
u"'unknown' in the current model, or a field of the same name in "
u"an o2m.")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
['3', '1'],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), ['1', '2'])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: dict(record.name_get())[record.id]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
|
{
"content_hash": "9d268a84c602dbb1e2dc80ec9bdf69bc",
"timestamp": "",
"source": "github",
"line_count": 1176,
"max_line_length": 94,
"avg_line_length": 38.00765306122449,
"alnum_prop": 0.5299460813924872,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "0afbf6f49c3fe319aaab2971d49dfdc10dacbf40",
"size": "44786",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "openerp/tests/addons/test_impex/tests/test_load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
import pytest
from unittest import TestCase
import bigdl.nano.automl as nano_automl
class TestGlobalConfig(TestCase):
def test_disable_automl(self):
nano_automl.hpo_config.disable_hpo_pytorch()
pass
def test_enable_automl(self):
nano_automl.hpo_config.enable_hpo_pytorch()
pass
if __name__ == '__main__':
pytest.main([__file__])
|
{
"content_hash": "d8e960ebd49a965dcee19e1dba5fcd91",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 19.94736842105263,
"alnum_prop": 0.6596306068601583,
"repo_name": "intel-analytics/BigDL",
"id": "47f74025ead7ee5701714132c8b9d2ce05a04ce8",
"size": "967",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/nano/test/automl/pytorch/test_global_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "139304"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54112822"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8825782"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216148"
},
{
"name": "Shell",
"bytes": "848241"
}
],
"symlink_target": ""
}
|
from helper import TestHelper
class TestJumpPrevOffsetIndent(TestHelper):
def command(self):
return 'jump_prev_indent'
def test_positive_indent_offset(self):
lines = [
' Lorem ipsum dolor sit amet',
'',
'Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [57, 57]
ending_selection = [2, 2]
self.check_command(lines, starting_selection, ending_selection, indent_offset = 1)
def test_negative_indent_offset(self):
lines = [
' Lorem ipsum dolor sit amet',
'',
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet'
]
starting_selection = [59, 59]
ending_selection = [30, 30]
self.check_command(lines, starting_selection, ending_selection, indent_offset = -1)
def test_block_skip(self):
lines = [
'Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet',
' Lorem ipsum dolor sit amet'
]
starting_selection = [56, 56]
ending_selection = [27, 27]
self.check_command(lines, starting_selection, ending_selection, indent_offset = -1)
def test_ignore_if_no_match(self):
lines = [
' Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet',
'Lorem ipsum dolor sit amet'
]
starting_selection = [58, 58]
ending_selection = [58, 58]
self.check_command(lines, starting_selection, ending_selection, indent_offset = 1)
|
{
"content_hash": "24e43d612ef6fefd743b0045d1432b1e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 28.11764705882353,
"alnum_prop": 0.6366806136680614,
"repo_name": "mwean/sublime_jump_along_indent",
"id": "f453363aad094c4529cc3fca4c7eac98dad2b57c",
"size": "1434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_jump_prev_offset_indent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20528"
}
],
"symlink_target": ""
}
|
import datetime
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v2
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
class ConfigDriveTestV21(test.TestCase):
base_url = '/v2/fake/servers/'
def _setup_wsgi(self):
self.app = fakes.wsgi_app_v21(init_only=('servers', 'os-config-drive'))
def setUp(self):
super(ConfigDriveTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fake.stub_out_image_service(self.stubs)
self._setup_wsgi()
def test_show(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank(self.base_url + '1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app)
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('config_drive', res_dict['server'])
def test_detail_servers(self):
# Sort is disabled in v2 without an extension so stub out
# the non-sorted DB get
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
# But it is enabled in v3 so stub out the sorted function
self.stubs.Set(db, 'instance_get_all_by_filters_sort',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank(self.base_url + 'detail')
res = req.get_response(self.app)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertNotEqual(len(server_dicts), 0)
for server_dict in server_dicts:
self.assertIn('config_drive', server_dict)
class ConfigDriveTestV2(ConfigDriveTestV21):
def _setup_wsgi(self):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Config_drive'])
self.app = fakes.wsgi_app(init_only=('servers',))
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
bad_request = exception.ValidationError
def _set_up_controller(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-config-drive',
'osapi_v3')
self.no_config_drive_controller = servers_v21.ServersController(
extension_info=ext_info)
def _verfiy_config_drive(self, **kwargs):
self.assertNotIn('config_drive', kwargs)
def _initialize_extension(self):
pass
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': fakes.FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self.stubs)
self.stubs.Set(db, 'instance_create', instance_create)
def _test_create_extra(self, params, override_controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequest.blank(self.base_url + 'servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller is not None:
server = override_controller.create(req, body=body).obj['server']
else:
server = self.controller.create(req, body=body).obj['server']
def test_create_instance_with_config_drive_disabled(self):
params = {'config_drive': "False"}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._verfiy_config_drive(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params,
override_controller=self.no_config_drive_controller)
def _create_instance_body_of_config_drive(self, param):
self._initialize_extension()
def create(*args, **kwargs):
self.assertIn('config_drive', kwargs)
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'config_drive_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'config_drive': param,
},
}
req = fakes.HTTPRequest.blank(self.base_url + 'servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
return req, body
def test_create_instance_with_config_drive(self):
param = True
req, body = self._create_instance_body_of_config_drive(param)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_config_drive_as_boolean_string(self):
param = 'false'
req, body = self._create_instance_body_of_config_drive(param)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_bad_config_drive(self):
param = 12345
req, body = self._create_instance_body_of_config_drive(param)
self.assertRaises(self.bad_request,
self.controller.create, req, body=body)
def test_create_instance_without_config_drive(self):
param = True
req, body = self._create_instance_body_of_config_drive(param)
del body['server']['config_drive']
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_empty_config_drive(self):
param = ''
req, body = self._create_instance_body_of_config_drive(param)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
bad_request = webob.exc.HTTPBadRequest
def _set_up_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = servers_v2.Controller(self.ext_mgr)
self.no_config_drive_controller = None
def _verfiy_config_drive(self, **kwargs):
self.assertIsNone(kwargs['config_drive'])
def _initialize_extension(self):
self.ext_mgr.extensions = {'os-config-drive': 'fake'}
def test_create_instance_with_empty_config_drive(self):
param = ''
req, body = self._create_instance_body_of_config_drive(param)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
|
{
"content_hash": "d64d1f8ebdc1aad18bd397e687994581",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 79,
"avg_line_length": 38.66945606694561,
"alnum_prop": 0.6118805453365073,
"repo_name": "shakamunyi/nova",
"id": "5c2a42526fb84a38f94447200ac46752e3beb9ec",
"size": "9878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.flake8.subsystem import Flake8, Flake8FieldSet
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.core.goals.lint import REPORT_DIR, LintRequest, LintResult, LintResults
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
class Flake8Request(LintRequest):
field_set_type = Flake8FieldSet
@dataclass(frozen=True)
class Flake8Partition:
field_sets: Tuple[Flake8FieldSet, ...]
interpreter_constraints: InterpreterConstraints
def generate_argv(source_files: SourceFiles, flake8: Flake8) -> Tuple[str, ...]:
args = []
if flake8.config:
args.append(f"--config={flake8.config}")
args.extend(flake8.args)
args.extend(source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def flake8_lint_partition(partition: Flake8Partition, flake8: Flake8) -> LintResult:
flake8_pex_get = Get(
VenvPex,
PexRequest(
output_filename="flake8.pex",
internal_only=True,
requirements=flake8.pex_requirements(),
interpreter_constraints=partition.interpreter_constraints,
main=flake8.main,
),
)
config_files_get = Get(ConfigFiles, ConfigFilesRequest, flake8.config_request)
source_files_get = Get(
SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets)
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
flake8_pex, config_files, report_directory, source_files = await MultiGet(
flake8_pex_get, config_files_get, report_directory_digest_get, source_files_get
)
input_digest = await Get(
Digest,
MergeDigests(
(source_files.snapshot.digest, config_files.snapshot.digest, report_directory)
),
)
result = await Get(
FallibleProcessResult,
VenvPexProcess(
flake8_pex,
argv=generate_argv(source_files, flake8),
input_digest=input_digest,
output_directories=(REPORT_DIR,),
description=f"Run Flake8 on {pluralize(len(partition.field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return LintResult.from_fallible_process_result(
result,
partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
report=report,
)
@rule(desc="Lint with Flake8", level=LogLevel.DEBUG)
async def flake8_lint(
request: Flake8Request, flake8: Flake8, python_setup: PythonSetup
) -> LintResults:
if flake8.skip:
return LintResults([], linter_name="Flake8")
# NB: Flake8 output depends upon which Python interpreter version it's run with
# (http://flake8.pycqa.org/en/latest/user/invocation.html). We batch targets by their
# constraints to ensure, for example, that all Python 2 targets run together and all Python 3
# targets run together.
constraints_to_field_sets = InterpreterConstraints.group_field_sets_by_constraints(
request.field_sets, python_setup
)
partitioned_results = await MultiGet(
Get(LintResult, Flake8Partition(partition_field_sets, partition_compatibility))
for partition_compatibility, partition_field_sets in constraints_to_field_sets.items()
)
return LintResults(partitioned_results, linter_name="Flake8")
def rules():
return [*collect_rules(), UnionRule(LintRequest, Flake8Request), *pex.rules()]
|
{
"content_hash": "cb3f77408ba674cd1b7070e4f311e586",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 97,
"avg_line_length": 39.48623853211009,
"alnum_prop": 0.7184014869888475,
"repo_name": "patricklaw/pants",
"id": "ad757e2e3056f4697082c02b2a1f25e2b67429f6",
"size": "4436",
"binary": false,
"copies": "1",
"ref": "refs/heads/scala",
"path": "src/python/pants/backend/python/lint/flake8/rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from settings.base import rel
CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [rel('templates'),
# rel('product/templates'),
],
'APP_DIRS': True,
'OPTIONS': {'context_processors': CONTEXT_PROCESSORS},
}
]
|
{
"content_hash": "5e85adbeb1e40ccd290a2921564d097e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 28.88,
"alnum_prop": 0.6412742382271468,
"repo_name": "skylifewww/pangolinland",
"id": "499a5d7d6433f7447ef43d6d0a7e49769f1968cd",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/templates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "179721"
},
{
"name": "HTML",
"bytes": "195624"
},
{
"name": "JavaScript",
"bytes": "262432"
},
{
"name": "Makefile",
"bytes": "1485"
},
{
"name": "Nginx",
"bytes": "646"
},
{
"name": "Python",
"bytes": "116827"
}
],
"symlink_target": ""
}
|
"""race dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text.race import race
class RaceTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for race dataset."""
DATASET_CLASS = race.Race
BUILDER_CONFIG_NAMES_TO_TEST = ["high"]
SPLITS = {
"train": 1, # Number of fake train example
"test": 1, # Number of fake test example
"dev": 1, # Number of fake dev example
}
if __name__ == "__main__":
tfds.testing.test_main()
|
{
"content_hash": "58ab92242bee4a550ea072291baf4980",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 25.68421052631579,
"alnum_prop": 0.6598360655737705,
"repo_name": "tensorflow/datasets",
"id": "2b538cdf0bdfdc830b7703b2f26f333fcbfc11da",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/text/race/race_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
}
|
from cel import cli
def test_main_has_all_commands_registered():
for command in ['build', 'build_runner', 'start', 'run', 'templates']:
assert command in cli.main.commands
|
{
"content_hash": "3ade4ce6c560811c0d22a694cea9334d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 74,
"avg_line_length": 31,
"alnum_prop": 0.6774193548387096,
"repo_name": "a-musing-moose/cel",
"id": "70ea728fd72e12d2381b5c5db6ada6eb565ad2d5",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21915"
}
],
"symlink_target": ""
}
|
import os, sys, mobilesdk
import shutil, time, json
import xml.dom.minidom
class PluginsTest(mobilesdk.MobileSDKTest):
# Tests TIMOB-4903, and "compile" for external build plugins
def testPluginFunctions(self):
self.createProject("pluginTest", "android")
testPlugin = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testResources", "testPlugin.py")
pluginDir = os.path.join(self.projectDir, "plugins", "testPlugin")
os.makedirs(pluginDir)
tiappPath = os.path.join(self.projectDir, "tiapp.xml")
tiapp = xml.dom.minidom.parse(tiappPath)
pluginsEl = tiapp.createElement("plugins")
pluginEl = tiapp.createElement("plugin")
pluginEl.setAttribute("version", "0.1")
pluginName = tiapp.createTextNode("testPlugin")
pluginEl.appendChild(pluginName)
pluginsEl.appendChild(pluginEl)
tiapp.documentElement.appendChild(pluginsEl)
f = open(tiappPath, "w")
tiapp.writexml(f)
f.close()
shutil.copy(testPlugin, os.path.join(pluginDir, "plugin.py"))
self.buildAndroidProject()
pluginCompile = os.path.join(self.projectDir, "plugin_compile.json")
self.assertTrue(os.path.exists(pluginCompile))
compileData = json.loads(open(pluginCompile, "r").read())
for key in ("platform", "tiapp", "project_dir", "titanium_dir",
"appid", "template_dir", "project_name", "command",
"build_dir", "app_name", "android_builder", "deploy_type",
"dist_dir", "logger"):
self.assertTrue(key in compileData)
self.assertEqual(compileData["project_dir"], self.projectDir)
pluginPostBuild = os.path.join(self.projectDir, "plugin_postbuild.txt")
self.assertTrue(os.path.exists(pluginPostBuild))
|
{
"content_hash": "b7b6269703a698b3a6f48ea56929b703",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 105,
"avg_line_length": 37.95348837209303,
"alnum_prop": 0.7340686274509803,
"repo_name": "hieupham007/Titanium_Mobile",
"id": "2a1312686e6e1305aca9a988646db3dbabed3e8d",
"size": "1654",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "drillbit/sdk_tests/android/plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "11166"
},
{
"name": "C",
"bytes": "231409"
},
{
"name": "C++",
"bytes": "211563"
},
{
"name": "CSS",
"bytes": "14427"
},
{
"name": "Java",
"bytes": "2302640"
},
{
"name": "JavaScript",
"bytes": "3449846"
},
{
"name": "Objective-C",
"bytes": "3264737"
},
{
"name": "PHP",
"bytes": "17988"
},
{
"name": "Perl",
"bytes": "12759"
},
{
"name": "Python",
"bytes": "1778903"
},
{
"name": "Shell",
"bytes": "28068"
}
],
"symlink_target": ""
}
|
import pytest
def test_sub_container():
import pyctrl
import pyctrl.block as block
from pyctrl.block.container import Container, Input, Output, ContainerException
from pyctrl.block.system import Gain, DTTF
container = Container()
container.add_signals('s1', 's2', 's3')
# add subcontainer
container1 = Container()
container.add_filter('container1',
container1,
['s1'], ['s2','s3'])
container.add_signals('container1/s1', 'container1/s2')
container.add_source('container1/input1',
Input(),
['s1'])
container.add_filter('container1/gain1',
Gain(gain = 3),
['s1'],['s2'])
container.add_sink('container1/output1',
Output(),
['s2'])
# add subsubcontainer
container.add_sink('container1/output2',
Output(),
['s3'])
container2 = Container()
container.add_filter('container1/container2',
container2,
['s1'], ['s3'])
container.add_signals('container1/container2/s1', 'container1/container2/s2')
container.add_source('container1/container2/input1',
Input(),
['s1'])
container.add_filter('container1/container2/gain1',
Gain(gain = 5),
['s1'],['s2'])
container.add_sink('container1/container2/output1',
Output(),
['s2'])
print(container.info('all'))
from pyctrl.flask import JSONEncoder, JSONDecoder
json1 = JSONEncoder(sort_keys = True, indent = 4).encode(container)
obj = JSONDecoder().decode(json1)
json2 = JSONEncoder(sort_keys = True, indent = 4).encode(obj)
assert json1 == json2
print('json = \n{}'.format(json1))
def _test_mip_balance():
import numpy as np
from pyctrl import Controller
from pyctrl.block.container import Container, Input, Output
from pyctrl.block.system import System, Subtract, Differentiator, Sum, Gain
from pyctrl.block.nl import ControlledCombination, Product
from pyctrl.block import Fade, Printer
from pyctrl.system.ss import DTSS
from pyctrl.block.logic import CompareAbsWithHysterisis, SetFilter, State
GRN_LED = 61
PAUSE_BTN = 62
# create mip
mip = Controller()
# add signals
mip.add_signals('theta','theta_dot','encoder1','encoder2','pwm1','pwm2')
# phi is the average of the encoders
mip.add_signal('phi')
mip.add_filter('phi',
Sum(gain=0.5),
['encoder1','encoder2'],
['phi'])
# phi dot
mip.add_signal('phi_dot')
mip.add_filter('phi_dot',
Differentiator(),
['clock','phi'],
['phi_dot'])
# phi dot and steer reference
mip.add_signals('phi_dot_reference', 'phi_dot_reference_fade')
mip.add_signals('steer_reference', 'steer_reference_fade')
# add fade in filter
mip.add_filter('fade',
Fade(target = [0, 0.5], period = 5),
['clock','phi_dot_reference','steer_reference'],
['phi_dot_reference_fade','steer_reference_fade'])
# state-space matrices
A = np.array([[0.913134, 0.0363383],[-0.0692862, 0.994003]])
B = np.array([[0.00284353, -0.000539063], [0.00162443, -0.00128745]])
C = np.array([[-383.009, 303.07]])
D = np.array([[-1.22015, 0]])
B = 2*np.pi*(100/7.4)*np.hstack((-B, B[:,1:]))
D = 2*np.pi*(100/7.4)*np.hstack((-D, D[:,1:]))
ssctrl = DTSS(A,B,C,D)
# state-space controller
mip.add_signals('pwm')
mip.add_filter('controller',
System(model = ssctrl),
['theta_dot','phi_dot','phi_dot_reference_fade'],
['pwm'])
# enable pwm only if about small_angle
mip.add_signals('small_angle', 'small_angle_pwm')
mip.add_filter('small_angle_pwm',
Product(),
['small_angle', 'pwm'],
['small_angle_pwm'])
# steering biasing
mip.add_filter('steer',
ControlledCombination(),
['steer_reference_fade',
'small_angle_pwm','small_angle_pwm'],
['pwm1','pwm2'])
# set references
mip.set_signal('phi_dot_reference',0)
mip.set_signal('steer_reference',0.5)
# add supervisor actions on a timer
# actions are inside a container so that they are executed all at once
mip.add_timer('supervisor',
Container(),
['theta'],
['small_angle','is_running'],
period = 0.5, repeat = True)
mip.add_signals('timer/supervisor/theta',
'timer/supervisor/small_angle',
'timer/supervisor/is_running')
mip.add_source('timer/supervisor/theta',
Input(),
['theta'])
mip.add_sink('timer/supervisor/small_angle',
Output(),
['small_angle'])
mip.add_sink('timer/supervisor/is_running',
Output(),
['is_running'])
# add small angle sensor
mip.add_filter('timer/supervisor/is_angle_small',
CompareAbsWithHysterisis(threshold = 0.11,
hysterisis = 0.09,
offset = -0.07,
state = (State.LOW,)),
['theta'],
['small_angle'])
# reset controller and fade
mip.add_sink('timer/supervisor/reset_controller',
SetFilter(label = ['/controller','/fade'],
on_rise = {'reset': True}),
['small_angle'])
# add pause button on a timer
mip.add_source('timer/supervisor/pause_button',
('pyctrl.block', 'Constant'),
['is_running'],
kwargs = {'value': 0},
enable = True)
from pyctrl.flask import JSONEncoder, JSONDecoder
json1 = JSONEncoder(sort_keys = True, indent = 4).encode(mip)
obj = JSONDecoder().decode(json1)
json2 = JSONEncoder(sort_keys = True, indent = 4).encode(obj)
assert json1 == json2
print('json = \n{}'.format(json1))
|
{
"content_hash": "7fa4c08f8c88665ca3a18cba32e629fc",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 83,
"avg_line_length": 31.43127962085308,
"alnum_prop": 0.5149276236429433,
"repo_name": "mcdeoliveira/pyctrl",
"id": "4520788d59989fb5223f7f124620afa79d5e647e",
"size": "6632",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/test_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407"
},
{
"name": "HTML",
"bytes": "12986"
},
{
"name": "JavaScript",
"bytes": "6740"
},
{
"name": "Python",
"bytes": "580777"
}
],
"symlink_target": ""
}
|
from urllib import unquote
from amgut.lib.mail import send_email
from amgut.handlers.base_handlers import BaseHandler
from amgut.connections import ag_data
from amgut import text_locale
class ChangePassVerifyHandler(BaseHandler):
def get(self):
email = self.get_argument('email', None)
if email is not None:
email = unquote(email)
kitid = self.get_argument('kitid', None)
passcode = self.get_argument('passcode', None)
new_password = self.get_argument('new_password', None)
confirm_password = self.get_argument('confirm_password', None)
if self.is_valid(email, kitid, passcode):
result = 'valid'
else:
result = 'notvalid'
latlongs = ag_data.getMapMarkers()
self.render('change_pass_verify.html', email=email, kitid=kitid,
passcode=passcode, new_password=new_password,
confirm_password=confirm_password,
result=result, message=None, latlongs_db=latlongs,
loginerror='')
def post(self):
email = self.get_argument('email', None)
kit_id = self.get_argument('kitid', None)
if email is not None:
email = unquote(email)
new_password = self.get_argument('new_password', None)
confirm_password = self.get_argument('confirm_password', None)
self.reset_pass_and_email(new_password, confirm_password, email,
kit_id)
def is_valid(self, email, kitid, passcode):
return ag_data.ag_verify_kit_password_change_code(email, kitid,
passcode)
def reset_pass_and_email(self, new_password, confirm_password, email,
supplied_kit_id):
ag_data.ag_update_kit_password(supplied_kit_id, new_password)
latlongs = ag_data.getMapMarkers()
tl = text_locale['handlers']
MESSAGE = tl['CHANGE_PASS_BODY'] % supplied_kit_id
try:
send_email(MESSAGE, tl['CHANGE_PASS_SUBJECT'], email)
self.render('change_pass_verify.html', email='', kitid='',
passocde='', new_password='',
confirm_password='', result=4, message='',
latlongs_db=latlongs, loginerror='')
except:
self.render('change_pass_verify.html', email='', kitid='',
passocde='', new_password='',
confirm_password='', result=5, message='',
latlongs_db=latlongs, loginerror='')
|
{
"content_hash": "976b948239bf75f5773e633cc78cf4b9",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 43.09836065573771,
"alnum_prop": 0.5701787751996957,
"repo_name": "mortonjt/american-gut-web",
"id": "a3a20ac7db9fae58107c68a7a799f7f4db4c178d",
"size": "2629",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "amgut/handlers/change_pass_verify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11959"
},
{
"name": "HTML",
"bytes": "302205"
},
{
"name": "JavaScript",
"bytes": "44832"
},
{
"name": "Python",
"bytes": "420507"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from . import checks # NOQA
class WagtailAdminAppConfig(AppConfig):
name = 'wagtail.admin'
label = 'wagtailadmin'
verbose_name = _("Wagtail admin")
|
{
"content_hash": "56fb7dcac1483569d86b40cf2c005f66",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 25.4,
"alnum_prop": 0.7322834645669292,
"repo_name": "nealtodd/wagtail",
"id": "a5536a9b5d4e046560b24686152504e826e6f8cf",
"size": "254",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "wagtail/admin/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "190511"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "371011"
},
{
"name": "JavaScript",
"bytes": "262163"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3564287"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making the predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
|
{
"content_hash": "ca68b288feb21581ac728569ed255e5a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 101,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.741871921182266,
"repo_name": "balazssimon/ml-playground",
"id": "57a3d13ae9b990c361bf66228d27405f012041ea",
"size": "2561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udemy/Machine Learning A-Z/Part 8 - Deep Learning/Section 39 - Artificial Neural Networks (ANN)/ann.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "468040"
},
{
"name": "Python",
"bytes": "446476"
},
{
"name": "R",
"bytes": "60424"
}
],
"symlink_target": ""
}
|
import math, random, re
from collections import defaultdict, Counter, deque
from linear_algebra import dot, get_row, get_column, make_matrix, magnitude, scalar_multiply, shape, distance
from functools import partial
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# give each user a friends list
for user in users:
user["friends"] = []
# and populate it
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
#
# Betweenness Centrality
#
def shortest_paths_from(from_user):
# a dictionary from "user_id" to *all* shortest paths to that user
shortest_paths_to = { from_user["id"] : [[]] }
# a queue of (previous user, next user) that we need to check.
# starts out with all pairs (from_user, friend_of_from_user)
frontier = deque((from_user, friend)
for friend in from_user["friends"])
# keep going until we empty the queue
while frontier:
prev_user, user = frontier.popleft() # take from the beginning
user_id = user["id"]
# the fact that we're pulling from our queue means that
# necessarily we already know a shortest path to prev_user
paths_to_prev = shortest_paths_to[prev_user["id"]]
paths_via_prev = [path + [user_id] for path in paths_to_prev]
# it's possible we already know a shortest path to here as well
old_paths_to_here = shortest_paths_to.get(user_id, [])
# what's the shortest path to here that we've seen so far?
if old_paths_to_here:
min_path_length = len(old_paths_to_here[0])
else:
min_path_length = float('inf')
# any new paths to here that aren't too long
new_paths_to_here = [path_via_prev
for path_via_prev in paths_via_prev
if len(path_via_prev) <= min_path_length
and path_via_prev not in old_paths_to_here]
shortest_paths_to[user_id] = old_paths_to_here + new_paths_to_here
# add new neighbors to the frontier
frontier.extend((user, friend)
for friend in user["friends"]
if friend["id"] not in shortest_paths_to)
return shortest_paths_to
for user in users:
user["shortest_paths"] = shortest_paths_from(user)
for user in users:
user["betweenness_centrality"] = 0.0
for source in users:
source_id = source["id"]
for target_id, paths in source["shortest_paths"].items():
if source_id < target_id: # don't double count
num_paths = len(paths) # how many shortest paths?
contrib = 1 / num_paths # contribution to centrality
for path in paths:
for id in path:
if id not in [source_id, target_id]:
users[id]["betweenness_centrality"] += contrib
#
# closeness centrality
#
def farness(user):
"""the sum of the lengths of the shortest paths to each other user"""
return sum(len(paths[0])
for paths in user["shortest_paths"].values())
for user in users:
user["closeness_centrality"] = 1 / farness(user)
#
# matrix multiplication
#
def matrix_product_entry(A, B, i, j):
return dot(get_row(A, i), get_column(B, j))
def matrix_multiply(A, B):
n1, k1 = shape(A)
n2, k2 = shape(B)
if k1 != n2:
raise ArithmeticError("incompatible shapes!")
return make_matrix(n1, k2, partial(matrix_product_entry, A, B))
def vector_as_matrix(v):
"""returns the vector v (represented as a list) as a n x 1 matrix"""
return [[v_i] for v_i in v]
def vector_from_matrix(v_as_matrix):
"""returns the n x 1 matrix as a list of values"""
return [row[0] for row in v_as_matrix]
def matrix_operate(A, v):
v_as_matrix = vector_as_matrix(v)
product = matrix_multiply(A, v_as_matrix)
return vector_from_matrix(product)
def find_eigenvector(A, tolerance=0.00001):
guess = [1 for __ in A]
while True:
result = matrix_operate(A, guess)
length = magnitude(result)
next_guess = scalar_multiply(1/length, result)
if distance(guess, next_guess) < tolerance:
return next_guess, length # eigenvector, eigenvalue
guess = next_guess
#
# eigenvector centrality
#
def entry_fn(i, j):
return 1 if (i, j) in friendships or (j, i) in friendships else 0
n = len(users)
adjacency_matrix = make_matrix(n, n, entry_fn)
eigenvector_centralities, _ = find_eigenvector(adjacency_matrix)
#
# directed graphs
#
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3),
(2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
for user in users:
user["endorses"] = [] # add one list to track outgoing endorsements
user["endorsed_by"] = [] # and another to track endorsements
for source_id, target_id in endorsements:
users[source_id]["endorses"].append(users[target_id])
users[target_id]["endorsed_by"].append(users[source_id])
endorsements_by_id = [(user["id"], len(user["endorsed_by"]))
for user in users]
sorted(endorsements_by_id,
key=lambda pair: pair[1],
reverse=True)
def page_rank(users, damping = 0.85, num_iters = 100):
# initially distribute PageRank evenly
num_users = len(users)
pr = { user["id"] : 1 / num_users for user in users }
# this is the small fraction of PageRank
# that each node gets each iteration
base_pr = (1 - damping) / num_users
for __ in range(num_iters):
next_pr = { user["id"] : base_pr for user in users }
for user in users:
# distribute PageRank to outgoing links
links_pr = pr[user["id"]] * damping
for endorsee in user["endorses"]:
next_pr[endorsee["id"]] += links_pr / len(user["endorses"])
pr = next_pr
return pr
if __name__ == "__main__":
print("Betweenness Centrality")
for user in users:
print(user["id"], user["betweenness_centrality"])
print()
print("Closeness Centrality")
for user in users:
print(user["id"], user["closeness_centrality"])
print()
print("Eigenvector Centrality")
for user_id, centrality in enumerate(eigenvector_centralities):
print(user_id, centrality)
print()
print("PageRank")
for user_id, pr in page_rank(users).items():
print(user_id, pr)
|
{
"content_hash": "ff32d03d5333d481b0f0a3e7b5142c85",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 109,
"avg_line_length": 30.69298245614035,
"alnum_prop": 0.5885967419262647,
"repo_name": "joelgrus/data-science-from-scratch",
"id": "f9c7142e4f4f8cd4906c25697722cc504d424693",
"size": "6998",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "first-edition/code-python3/network_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "545277"
}
],
"symlink_target": ""
}
|
"""
CP FireEye Dynamic analysis endpoint
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask_jsonschema import validate
from app.api.analysis.fireeye import process_add_fireeye_url_analysis, process_get_fireeye_analysis, \
process_get_fireeye_report, process_add_fireeye_analysis, process_get_fireeye_environments
from app.cp import cp
@cp.route('/analysis/fireeye/report/<string:sha256>/<int:rid>', methods=['GET'])
def get_cp_fireeye_report(sha256, rid):
return process_get_fireeye_report(sha256, rid, only_current_user=True)
@cp.route('/analysis/fireeye/<string:sha256>/<int:sid>', methods=['GET'])
def get_cp_fireeye_analysis(sha256, sid):
return process_get_fireeye_analysis(sha256, sid, only_current_user=True)
@cp.route('/analysis/fireeye', methods=['POST', 'PUT'])
def add_cp_fireeye_analysis():
return process_add_fireeye_analysis()
@cp.route('/analysis/fireeye-url', methods=['POST', 'PUT'])
@validate('analysis', 'add_fireeye_url_analysis')
def add_cp_fireeye_url_analysis():
return process_add_fireeye_url_analysis()
@cp.route('/analysis/fireeye/environments')
def get_cp_fireeye_environments():
return process_get_fireeye_environments()
|
{
"content_hash": "349fd899f0b5baf985209ab667e254eb",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 102,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.7110552763819096,
"repo_name": "certeu/do-portal",
"id": "58a63830369dbf29d7cff77348a7f9b5dc4f74ba",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/cp/analysis/fireeye.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31516"
},
{
"name": "HTML",
"bytes": "241648"
},
{
"name": "JavaScript",
"bytes": "84093"
},
{
"name": "Makefile",
"bytes": "3016"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "480459"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-oidc-provider'
copyright = u'2016, Juan Ignacio Fiorentino'
author = u'Juan Ignacio Fiorentino'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5.x'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-oidc-providerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-oidc-provider.tex', u'django-oidc-provider Documentation',
u'Juan Ignacio Fiorentino', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-oidc-provider', u'django-oidc-provider Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-oidc-provider', u'django-oidc-provider Documentation',
author, 'django-oidc-provider', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
{
"content_hash": "0cb60c52cb0ab08578df8188b39572ad",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 83,
"avg_line_length": 32.218934911242606,
"alnum_prop": 0.7081726354453627,
"repo_name": "torreco/django-oidc-provider",
"id": "1d0828f02f3a2bf35bb457c3c320cb1c6c81e9af",
"size": "11323",
"binary": false,
"copies": "3",
"ref": "refs/heads/v0.5.x",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8706"
},
{
"name": "Python",
"bytes": "157098"
}
],
"symlink_target": ""
}
|
"""This module defines handlers for InvalidUsage exceptions defined in common package
Note that to use the error handlers, you must import them
"""
from flask import jsonify
from servmon.api import api_blueprint
from servmon.common import invalid_usage
@api_blueprint.errorhandler(invalid_usage.InvalidUsage)
def handle_invalid_usage(error):
"""Return JSON response upon InvalidUsage exception
:param error: the exception object raised
:type error: InvalidUsage
:rtype: json
"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
|
{
"content_hash": "7fa3b9fdb414120089ae454b80d0c3c7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 28.40909090909091,
"alnum_prop": 0.7376,
"repo_name": "hpsuenaa/servmon",
"id": "f6607c1bb066bc47f3c2fc85e5b67082e370e27c",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servmon/common/errorhandler/invalid_usage_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31716"
}
],
"symlink_target": ""
}
|
from wallace.db.base.sql.model import SqlModel
class PostgresModel(SqlModel):
pass
|
{
"content_hash": "b482442cf1742143e688e38236b12dd2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 46,
"avg_line_length": 17.8,
"alnum_prop": 0.7752808988764045,
"repo_name": "csira/wallace",
"id": "77f2cd176fb4e4c6cdb19c01f2a8054da7a2c365",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallace/db/pg/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73299"
},
{
"name": "Shell",
"bytes": "92"
}
],
"symlink_target": ""
}
|
from .homogeneous import Translation, UniformScale, Rotation, Similarity
def scale_about_centre(obj, scale):
r"""
Return a Homogeneous Transform that implements scaling an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
scale : `float` or ``(n_dims,)`` `ndarray`
The scale factor as defined in the :map:`Scale` documentation.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
rescale = Similarity.init_identity(obj.n_dims)
s = UniformScale(scale, obj.n_dims, skip_checks=True)
t = Translation(-obj.centre(), skip_checks=True)
# Translate to origin, scale, then translate back
rescale.compose_before_inplace(t)
rescale.compose_before_inplace(s)
rescale.compose_before_inplace(t.pseudoinverse())
return rescale
def rotate_ccw_about_centre(obj, theta, degrees=True):
r"""
Return a Homogeneous Transform that implements rotating an object
counter-clockwise about its centre. The given object must be transformable
and must implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
theta : `float`
The angle of rotation clockwise about the origin.
degrees : `bool`, optional
If ``True`` theta is interpreted as degrees. If ``False``, theta is
interpreted as radians.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the rotation.
"""
rotate_ccw = Similarity.init_identity(obj.n_dims)
r = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
t = Translation(-obj.centre(), skip_checks=True)
# Translate to origin, rotate counter-clockwise, then translate back
rotate_ccw.compose_before_inplace(t)
rotate_ccw.compose_before_inplace(r)
rotate_ccw.compose_before_inplace(t.pseudoinverse())
return rotate_ccw
|
{
"content_hash": "65d334ce6022dbdc2552ea6dfbb15416",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 35.564516129032256,
"alnum_prop": 0.6839002267573696,
"repo_name": "mozata/menpo",
"id": "3843226cb6651205a208a71932dd0167d3124b4c",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpo/transform/compositions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "C",
"bytes": "70100"
},
{
"name": "C++",
"bytes": "44577"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "1728478"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
import smtplib
from email.mime.text import MIMEText
class PrintAction:
def execute(self, content):
print(content)
class EmailAction:
"""Send an email when a rule is matched"""
from_email = "alerts@stocks.com"
def __init__(self, to):
self.to_email = to
def execute(self, content):
message = MIMEText(content)
message["Subject"] = "New Stock Alert"
message["From"] = "alerts@stocks.com"
message["To"] = self.to_email
smtp = smtplib.SMTP("email.stocks.com")
try:
smtp.send_message(message)
finally:
smtp.quit()
|
{
"content_hash": "e000809d5a864eef448262e598f15986",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 47,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.5920634920634921,
"repo_name": "DanielFrank/test_driven_python",
"id": "bb436ceccdb1979a22a8d5c7052f8e6ead08b461",
"size": "630",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stock_alerter/action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33177"
}
],
"symlink_target": ""
}
|
import sys
from random import choice as randchoice
from itertools import cycle
from board import Loc, Board
size = 3
blank = '.'
players = 'XO'
class TictactoeBoard(Board):
def filled(self):
return not any(tile==blank for tile in self)
def random_blank(self):
blank_locs = [loc for loc in self.locations() if self[loc] == blank]
return randchoice(blank_locs)
def completed(self, line, item):
return all(self[loc] == item for loc in line)
class Tictactoe(object):
winmsg = "\n %s is the winner!"
drawmsg = "\n It's a draw!"
def make_win_lines(self):
"""Create a list of winning lines -- when a player fills any one of them, he wins."""
winlines, diag1, diag2 = [], [], []
for n in range(size):
winlines.append( [Loc(m, n) for m in range(size)] )
winlines.append( [Loc(n, m) for m in range(size)] )
diag1.append(Loc(n, n))
diag2.append(Loc(size-n-1, n))
return winlines + [diag1, diag2]
def check_end(self, player):
"""Check if `player` has won the game; check for a draw."""
for line in self.win_lines:
if board.completed(line, player):
self.game_won(player)
if board.filled():
self.game_won(None)
def game_won(self, player):
print(self.winmsg % player if player else self.drawmsg)
sys.exit()
def run(self):
"""Main loop."""
self.win_lines = self.make_win_lines()
for player in cycle(players):
board[ board.random_blank() ] = player
board.draw()
self.check_end(player)
if __name__ == "__main__":
board = TictactoeBoard(size, blank)
Tictactoe().run()
|
{
"content_hash": "14645ce0742e7e6fd8b5fa947ca53e4c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 93,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.5745762711864407,
"repo_name": "akulakov/simple-games",
"id": "1e2f86931c5cb678bafc96bb5d4adb9be9201ced",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tictactoe.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85454"
}
],
"symlink_target": ""
}
|
import numpy
import math
def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100):
"""Estimating sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: max. of iterations
@rtype: tuple
@return: (sky value, number of iteration)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.median(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.median(work_arr)
return (new_sky, iteration)
def sky_mean_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100):
"""Estimating sky value for a given number of iterations
@type input_arr: numpy array
@param input_arr: image data array
@type sig_fract: float
@param sig_fract: fraction of sigma clipping
@type percent_fract: float
@param percent_fract: convergence fraction
@type max_iter: max. of iterations
@rtype: tuple
@return: (sky value, number of iteration)
"""
work_arr = numpy.ravel(input_arr)
old_sky = numpy.mean(work_arr)
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
iteration = 0
while ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :
iteration += 1
old_sky = new_sky
sig = work_arr.std()
upper_limit = old_sky + sig_fract * sig
lower_limit = old_sky - sig_fract * sig
indices = numpy.where((work_arr < upper_limit) & (work_arr > lower_limit))
work_arr = work_arr[indices]
new_sky = numpy.mean(work_arr)
return (new_sky, iteration)
def linear(inputArray, scale_min=None, scale_max=None):
"""Performs linear scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print "img_scale : linear"
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
imageData = imageData.clip(min=scale_min, max=scale_max)
imageData = (imageData -scale_min) / (scale_max - scale_min)
indices = numpy.where(imageData < 0)
imageData[indices] = 0.0
indices = numpy.where(imageData > 1)
imageData[indices] = 1.0
return imageData
def sqrt(inputArray, scale_min=None, scale_max=None):
"""Performs sqrt scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print "img_scale : sqrt"
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
imageData = imageData.clip(min=scale_min, max=scale_max)
imageData = imageData - scale_min
indices = numpy.where(imageData < 0)
imageData[indices] = 0.0
imageData = numpy.sqrt(imageData)
imageData = imageData / math.sqrt(scale_max - scale_min)
return imageData
def log(inputArray, scale_min=None, scale_max=None):
"""Performs log10 scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
"""
print "img_scale : log"
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
factor = math.log10(scale_max - scale_min)
indices0 = numpy.where(imageData < scale_min)
indices1 = numpy.where((imageData >= scale_min) & (imageData <= scale_max))
indices2 = numpy.where(imageData > scale_max)
imageData[indices0] = 0.0
imageData[indices2] = 1.0
try :
imageData[indices1] = numpy.log10(imageData[indices1])/factor
except :
print "Error on math.log10 for ", (imageData[i][j] - scale_min)
return imageData
def asinh(inputArray, scale_min=None, scale_max=None, non_linear=2.0):
"""Performs asinh scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@type non_linear: float
@param non_linear: non-linearity factor
@rtype: numpy array
@return: image data array
"""
print "img_scale : asinh"
imageData=numpy.array(inputArray, copy=True)
if scale_min == None:
scale_min = imageData.min()
if scale_max == None:
scale_max = imageData.max()
factor = numpy.arcsinh((scale_max - scale_min)/non_linear)
indices0 = numpy.where(imageData < scale_min)
indices1 = numpy.where((imageData >= scale_min) & (imageData <= scale_max))
indices2 = numpy.where(imageData > scale_max)
imageData[indices0] = 0.0
imageData[indices2] = 1.0
imageData[indices1] = numpy.arcsinh((imageData[indices1] - \
scale_min)/non_linear)/factor
return imageData
|
{
"content_hash": "964d3e8591d6263ac3bc71187ef5bd80",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 92,
"avg_line_length": 29.179611650485437,
"alnum_prop": 0.7038762269173182,
"repo_name": "bamford/astrobamf",
"id": "738e7ea9c528e9b0e08a03aa3a0abc946f4c1db4",
"size": "6135",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "img_scale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1597994"
},
{
"name": "C++",
"bytes": "81341"
},
{
"name": "FORTRAN",
"bytes": "147106"
},
{
"name": "PHP",
"bytes": "2236"
},
{
"name": "Perl",
"bytes": "942"
},
{
"name": "Python",
"bytes": "280936"
},
{
"name": "Shell",
"bytes": "34020"
}
],
"symlink_target": ""
}
|
from core import Tile
import json
from random import randrange
from ds import WUF, isValidSequence
# import pdb
def loadTiles():
f = open('data/tiles.json')
tiles = json.load(f)
tileArr = []
# print tiles['freq']
for (l, f) in tiles['freq'].iteritems():
score = tiles['scores'][l]
for x in range(f):
tileArr.append(Tile(l, score))
return {
'tiles': tileArr,
'letter_score': tiles['scores']
}
class Game:
def __init__(self, board):
self.board = board
self.loadTiles()
self.isFirstTurn = True
def loadTiles(self):
res = loadTiles()
self.tiles = res['tiles']
self.letter_scores = res['letter_score']
def fillRack(self, rack, default=None):
if default is not None:
existing = len(rack.tiles)
remaining = rack.size - existing
default = list(default[:remaining])
tiles = []
for ch in default:
#this itself will raise a StopIteration error
tile = next(t for t in self.tiles if t.letter == ch)
tiles.append(tile)
self.tiles.remove(tile)
rack.tiles.extend(tiles)
# TODO: write code for case when the default < remaining
return
existing = len(rack.tiles)
remaining = rack.size - existing
self.shuffle()
i = remaining
while i > 0:
rack.tiles.append(self.tiles.pop())
i = i - 1
def shuffle(self):
for i in range(len(self.tiles) - 1, 0, -1):
j = randrange(i + 1)
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
# def putWord(self, pos, word, orientation='horizontal'):
# for ch in word:
def setPlayer(self, player):
self.player = player
self._utilized = []
def playLetter(self, ch):
assert self.player is not None
assert self.position is not None
assert self.orientation is not None
idx = self.player.rack.find(ch)
if idx == -1:
# check for blank tile
idx = self.player.rack.find('_')
if idx == -1:
return {
'result' : False,
'msg' : 'Invalid tile:' + ch
}
else:
#set the blank tile
tile = self.player.rack.tiles[idx]
tile.setSubstituteLetter(ch)
pos = self.position
if pos[0] < self.board.size and pos[1] < self.board.size:
cell = self.board.getCell(pos)
#push into queue
if cell.tile is not None:
return {
'result': False,
'msg': 'Cannot accept: ' + ch + '. Already a tile in ' + str(pos)
}
self._utilized.append((self.player.rack.tiles[idx], pos))
if self.orientation == 'horizontal':
self.position = (pos[0], pos[1] + 1)
# #if this cell has some tile increment it
# if self.board.getCell(self.position).hasTile():
# self.position = (self.position[0], self.position[1] + 1)
else:
self.position = (pos[0] + 1, pos[1])
else:
return {
'result': False,
'msg': 'Cannot accept letter: ' + ch + '. Row/column at its end'
}
# remove tile from rack at that index
del self.player.rack.tiles[idx]
return {
'result' : True,
'msg' : 'Accepted'
}
def setOrientation(self, orientation):
self.orientation = orientation
def setPosition(self, pos):
self.position = pos
def playWord(self, word):
assert self.orientation is not None
assert self.position is not None
for letter in word:
res = self.playLetter(letter)
if res['result'] == False:
return res
return {
'result' : True,
'msg' : 'Accepted'
}
def joinNeighbours(self, pos, vcells):
nodes = []
size = self.board.size
#top (0, 1 - 13)
#botton (14, 1 - 13)
#left (1 - 13, 0)
#right (1-13, 14)
#topLeft (0,0)
#topRight (0,14)
#bottomLeft (14, 0)
#bottomRight (14,14)
if pos[0] == 0 and (pos[1] > 0 and pos[1] < size): #top
nodes.append((pos[0] - 1, pos[1])) #left
nodes.append((pos[0] + 1, pos[1])) #right
# nodes.append((pos[0], pos[1] - 1)) #top
nodes.append((pos[0], pos[1] + 1)) #bottom
elif pos[0] == size - 1 and (pos[1] > 0 and pos[1] < size): #bottom
nodes.append((pos[0] - 1, pos[1])) #left
nodes.append((pos[0] + 1, pos[1])) #right
nodes.append((pos[0], pos[1] - 1)) #top
# nodes.append((pos[0], pos[1] + 1)) #bottom
elif (pos[0] > 0 and pos[0] < size) and pos[1] == 0: #left
# nodes.append((pos[0] - 1, pos[1])) #left
nodes.append((pos[0] + 1, pos[1])) #right
nodes.append((pos[0], pos[1] - 1)) #top
nodes.append((pos[0], pos[1] + 1)) #bottom
elif (pos[0] > 0 and pos[0] < size) and pos[1] == size - 1: #right
nodes.append((pos[0] - 1, pos[1])) #left
# nodes.append((pos[0] + 1, pos[1])) #right
nodes.append((pos[0], pos[1] - 1)) #top
nodes.append((pos[0], pos[1] + 1)) #bottom
elif pos[0] == pos[1] == 0: #topLeft
# nodes.append((pos[0] - 1, pos[1])) #left
nodes.append((pos[0] + 1, pos[1])) #right
# nodes.append((pos[0], pos[1] - 1)) #top
nodes.append((pos[0], pos[1] + 1)) #bottom
elif pos[0] == 0 and pos[1] == size - 1: #topRight
nodes.append((pos[0] - 1, pos[1])) #left
# nodes.append((pos[0] + 1, pos[1])) #right
# nodes.append((pos[0], pos[1] - 1)) #top
nodes.append((pos[0], pos[1] + 1)) #bottom
elif pos[0] == size - 1 and pos[1] == 0: #bottomLeft
# nodes.append((pos[0] - 1, pos[1])) #left
nodes.append((pos[0] + 1, pos[1])) #right
nodes.append((pos[0], pos[1] - 1)) #top
# nodes.append((pos[0], pos[1] + 1)) #bottom
else:
nodes.append((pos[0] - 1, pos[1])) #left
# nodes.append((pos[0] + 1, pos[1])) #right
nodes.append((pos[0], pos[1] - 1)) #top
# nodes.append((pos[0], pos[1] + 1)) #bottom
# nodes = map(lambda x: self.board._getIndex(x), nodes)
#
s = self.board._getIndex(pos)
# vcells = list(self.board.virtual_cells)
uf = WUF(vcells)
# for idx in nodes:
# if self.board.getCell(idx).hasTile():
# uf.join(s, idx);
#
# self._ucells = vcells
for n in nodes:
cell = self.board.getCell(n);
if cell.hasTile():
uf.join(s, cell.id)
# return uf
def endTurn(self):
midCellIndex = self.board.size** 2 / 2
def _sort_(a):
if self.orientation == 'horizontal':
return a[1]
else:
return a[0]
#sort our placed array
sorted(self._utilized, key=_sort_)
vcells = list(self.board.virtual_cells)
for item in self._utilized:
self.joinNeighbours(item[1], vcells)
uf = WUF(vcells)
# join the entered
for i in range(1, len(self._utilized)):
prev = self._utilized[i - 1]
curr = self._utilized[i]
pidx = self.board._getIndex(prev[1])
cidx = self.board._getIndex(curr[1])
uf.join(pidx, cidx)
#first index pos of placed word
_ , p = self._utilized[0]
idx = self.board._getIndex(p)
# if self.isFirstTurn:
# if uf.isConnected(idx, midCellIndex) == False:
# return { 'result' : False, 'msg' : 'Must place first word to pass through the star tile' }
#
# # TODO: Must commit and compute score here
#
# #TODO: Fill rack
#
# self.isFirstTurn = False
# return { 'result' : True, 'msg': 'Accepted' }
# else:
# if uf.isConnected(idx, midCellIndex) == False:
# return { 'result': 'False', 'msg' : 'Placed word isn\'t connected to game on board'}
if uf.isConnected(idx, midCellIndex) == False:
if self.isFirstTurn:
return { 'result' : False, 'msg' : 'Must place first word to pass through the star tile' }
else:
return { 'result' : False, 'msg' : 'Word placed must be connected words played on board' }
def getCurrentScore(self):
assert self.orientation is not None
assert len(self._utilized) > 0
# 1. Check if all letters put
# belong to one row or column
# 2. identify gap. Check gap with
# board
# 3. retrieve all words (horizontal and vertical)
# 4. compute score
"""
General strategy
================
1- If only 1 letter in queue
1.1- if is first turn
1.1.1- throw error
1.2- check for neighboring joins and make word(s)
1.3- If there are words - compute score
"""
#clone utilized queue
utilized = self._utilized[:]
if len(utilized) == 1:
#we've to check neighbours
if self.isFirstTurn:
return { 'result': False, 'msg': 'First turn not valid word'}
# we'd only be interested
# in a case where this isn't
# the first turn
if self.isFirstTurn:
# we don't really care for neighbours
# at this point
# join each letter
# and try to compute
positions = map(lambda x: x[1], utilized)
seqRet = isValidSequence(positions)
if seqRet['result'] == True:
if seqRet['direction'] == 'horizontal':
sorted(utilized, key=lambda x: x[1][1]) # sort with y-ordinate
_, fpos = utilized[0]
lqueue = []
npos = -1
gap = False
for tile, pos in utilized:
if npos == -1:
npos = pos[1]
elif npos == pos[1]:
gap = False
else:
gap = True
npos = npos + 1
if gap:
# check against the board
cell = self.board.getCell(pos)
if not cell.hasTile():
return Result('False', 'invalid word formation in row')
lqueue.append((cell.tile, pos))
else:
# add tile to lqueue
lqueue.append((tile, pos))
else:
sorted(utilized, key=lambda x: x[1][0]) # sort with x-ordinate
lqueue = []
npos = -1
gap = False
for tile, pos in utilized:
if npos == -1:
npos = pos[0]
elif npos == pos[0]:
gap = False
else:
gap = True
if gap:
# check against the board
cell = self.board.getCell(pos)
if not cell.hasTile():
return Result('False', 'invalid word formation in column')
lqueue.append((cell.tile, pos))
else:
# add tile to lqueue
lqueue.append((tile, pos))
else:
return Result(False, 'invalid tile placement. should be placed either all vertically or horizontally')
assert lqueue is not None
# pdb.set_trace()
score = self._computeQueue(lqueue)
return Result(True, 'computed score', { 'score': score })
def _computeQueue(self, queue):
score = 0
hasWordScoreBonus = False
for tile, pos in queue:
cell = self.board.getCell(pos)
ls = tile.score
if cell.hasBonus():
if cell.bonus == 'TL':
ls = ls * 3
elif cell.bonus == 'DL':
ls = ls * 2
else:
hasWordScoreBonus = True
wsType = cell.bonus
score = score + ls
if hasWordScoreBonus:
factor = 3 if wsType == 'TW' else 2
score = score * factor
return score
def Result(res, msg, other=None):
ret = {}
ret['result'] = res
ret['msg'] = msg
if other is not None:
for key in other:
ret[key] = other[key]
return ret
|
{
"content_hash": "74cb06d6759e57b994639247192174e3",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 118,
"avg_line_length": 32.16428571428571,
"alnum_prop": 0.4641350210970464,
"repo_name": "deostroll/sengine",
"id": "702c74c94478bb82021d361ca44cd30a205421b8",
"size": "13509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31529"
}
],
"symlink_target": ""
}
|
"""Parse an EXPRESS file and extract basic information on all
entities and data types contained"""
import sys
import re
from collections import OrderedDict
re_match_entity = re.compile(r"""
ENTITY\s+(\w+)\s* # 'ENTITY foo'
.*? # skip SUPERTYPE-of
(?:SUBTYPE\s+OF\s+\((\w+)\))?; # 'SUBTYPE OF (bar);' or simply ';'
(.*?) # 'a : atype;' (0 or more lines like this)
(?:(?:INVERSE|UNIQUE|WHERE)\s*$.*?)? # skip the INVERSE, UNIQUE, WHERE clauses and everything behind
END_ENTITY;
""",re.VERBOSE|re.DOTALL|re.MULTILINE)
re_match_type = re.compile(r"""
TYPE\s+(\w+?)\s*=\s*((?:LIST|SET)\s*\[\d+:[\d?]+\]\s*OF)?(?:\s*UNIQUE)?\s*(\w+) # TYPE foo = LIST[1:2] of blub
(?:(?<=ENUMERATION)\s*OF\s*\((.*?)\))?
.*? # skip the WHERE clause
END_TYPE;
""",re.VERBOSE|re.DOTALL)
re_match_field = re.compile(r"""
\s+(\w+?)\s*:\s*(OPTIONAL)?\s*((?:LIST|SET)\s*\[\d+:[\d?]+\]\s*OF)?(?:\s*UNIQUE)?\s*(\w+?);
""",re.VERBOSE|re.DOTALL)
class Schema:
def __init__(self):
self.entities = OrderedDict()
self.types = OrderedDict()
class Entity:
def __init__(self,name,parent,members):
self.name = name
self.parent = parent
self.members = members
class Field:
def __init__(self,name,type,optional,collection):
self.name = name
self.type = type
self.optional = optional
self.collection = collection
self.fullspec = (self.collection+' ' if self.collection else '') + self.type
class Type:
def __init__(self,name,aggregate,equals,enums):
self.name = name
self.aggregate = aggregate
self.equals = equals
self.enums = enums
def read(filename, silent=False):
schema = Schema()
print( "Try to read EXPRESS schema file" + filename)
with open(filename,'rt') as inp:
contents = inp.read()
types = re.findall(re_match_type,contents)
for name,aggregate,equals,enums in types:
schema.types[name] = Type(name,aggregate,equals,enums)
entities = re.findall(re_match_entity,contents)
for name,parent,fields_raw in entities:
print('process entity {0}, parent is {1}'.format(name,parent)) if not silent else None
fields = re.findall(re_match_field,fields_raw)
members = [Field(name,type,opt,coll) for name, opt, coll, type in fields]
print(' got {0} fields'.format(len(members))) if not silent else None
schema.entities[name] = Entity(name,parent,members)
return schema
if __name__ == "__main__":
sys.exit(read(sys.argv[1] if len(sys.argv)>1 else 'schema.exp'))
|
{
"content_hash": "6af701d2c40ca61d11e846ce0926b569",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 118,
"avg_line_length": 35.97530864197531,
"alnum_prop": 0.5425531914893617,
"repo_name": "google/filament",
"id": "c2a39e70b812a4510fcba07b33f63129c1efcc5d",
"size": "4865",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "third_party/libassimp/scripts/StepImporter/ExpressReader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2833995"
},
{
"name": "Batchfile",
"bytes": "4607"
},
{
"name": "C",
"bytes": "2796377"
},
{
"name": "C#",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "7044879"
},
{
"name": "CMake",
"bytes": "209759"
},
{
"name": "CSS",
"bytes": "17232"
},
{
"name": "Dockerfile",
"bytes": "2404"
},
{
"name": "F#",
"bytes": "710"
},
{
"name": "GLSL",
"bytes": "223763"
},
{
"name": "Go",
"bytes": "61019"
},
{
"name": "Groovy",
"bytes": "11811"
},
{
"name": "HTML",
"bytes": "80095"
},
{
"name": "Java",
"bytes": "780083"
},
{
"name": "JavaScript",
"bytes": "90947"
},
{
"name": "Kotlin",
"bytes": "345783"
},
{
"name": "Objective-C",
"bytes": "55990"
},
{
"name": "Objective-C++",
"bytes": "314291"
},
{
"name": "Python",
"bytes": "76565"
},
{
"name": "RenderScript",
"bytes": "1769"
},
{
"name": "Ruby",
"bytes": "4436"
},
{
"name": "Shell",
"bytes": "76965"
},
{
"name": "TypeScript",
"bytes": "3293"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import inspect
from typing import List
from torchmetrics.metric import Metric
import pytorch_lightning as pl
from torch import nn, Tensor, fx
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from bigdl.nano.utils.log4Error import invalidInputError
class LightningModule(pl.LightningModule):
"""
A wrapper LightningMoudle for common PyTorch models.
This class implements common methods in LightningModule, so that classic pytorch
supervised learning model only needs to supply module, loss and optimizer to create
a LightningModule.
"""
def __init__(self, model: nn.Module, loss: _Loss = None, optimizer: Optimizer = None,
scheduler: _LRScheduler = None,
metrics: List[Metric] = None):
"""
Create a LightningMoudle that integrates pytorch modules, loss, optimizer.
:param model: Pytorch model to be converted.
:param loss: A torch loss function.
:param optimizer: A torch optimizer.
:param scheduler: A torch scheduler.
:param metrics: A list of metrics to calculate accuracy of the model.
"""
super().__init__()
self.model = model
self.loss = loss
self.optimizer = optimizer
self.scheduler = scheduler
self.metrics = metrics
self._forward_args_ = None
@property
def forward_args(self): # noqa
if self._forward_args_ is None:
self._forward_args_ = inspect.getfullargspec(self.model.forward).args[1:]
return self._forward_args_
@forward_args.setter
def forward_args(self, forward_args):
self._forward_args_ = forward_args
@property
def _nargs(self):
return len(self.forward_args)
def compile(self,
loss: _Loss = None, optimizer: Optimizer = None,
scheduler: _LRScheduler = None,
metrics: List[Metric] = None):
"""
Compile a LightningMoudle withloss, optimizer, metrics, schedulers.
:param loss: A torch loss function.
:param optimizer: A torch optimizer.
:param scheduler: A torch scheduler.
:param metrics: A list of metrics to calculate accuracy of the model.
"""
if loss is not None:
self.loss = loss
if optimizer is not None:
self.optimizer = optimizer
if scheduler is not None:
self.scheduler = scheduler
if metrics is not None:
self.metrics = metrics
def forward(self, *args):
"""Same as torch.nn.Module.forward()."""
if isinstance(args, fx.Proxy):
args = [args[i] for i in range(self._nargs)]
else:
args = args[:self._nargs]
return self.model(*args)
def on_train_start(self) -> None:
"""Called at the beginning of training after sanity check."""
invalidInputError(self.loss, "Loss must not be None for training.")
return super().on_train_start()
def training_step(self, batch, batch_idx):
"""Define a single training step, return a loss tensor."""
y_hat = self(*batch[:self._nargs])
loss = self.loss(y_hat, batch[-1]) # use last output as target
self.log("train/loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
"""Define a single validation step."""
y_hat = self(*batch[:self._nargs])
if self.loss:
loss = self.loss(y_hat, batch[-1]) # use last output as target
self.log("val/loss", loss, on_epoch=True,
prog_bar=True, logger=True)
if self.metrics:
acc = {"val/{}_{}".format(type(metric).__name__, i): metric(y_hat, batch[-1])
for i, metric in enumerate(self.metrics)}
self.log_dict(acc, on_epoch=True, prog_bar=True, logger=True)
def test_step(self, batch, batch_idx):
"""Define a single test step."""
y_hat = self(*batch[:self._nargs])
if self.metrics:
acc = {"test/{}_{}".format(type(metric).__name__, i): metric(y_hat, batch[-1])
for i, metric in enumerate(self.metrics)}
self.log_dict(acc, on_epoch=True, prog_bar=True, logger=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
"""Define a single predict step."""
return self(*batch[:self._nargs])
def configure_optimizers(self):
"""Setup the optimizers for this module, and return optimizers and schedulers."""
optimizers = [self.optimizer]
schedulers = []
if self.scheduler:
schedulers.append(self.scheduler)
return optimizers, schedulers
def load_state_dict(self, state_dict: 'OrderedDict[str, Tensor]', # type: ignore
strict: bool = True):
"""Same as LightningModule.load_state_dict, execept falling back to pytorch."""
try:
super().load_state_dict(state_dict)
except RuntimeError:
self.model.load_state_dict(state_dict)
|
{
"content_hash": "acc88120dcd37fff386fda0f8eb3ca71",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 90,
"avg_line_length": 38.49264705882353,
"alnum_prop": 0.606494746895893,
"repo_name": "yangw1234/BigDL",
"id": "362c0d29318a7df490ab783f943a15e105006a67",
"size": "5821",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/nano/src/bigdl/nano/pytorch/lightning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
}
|
from twilio.rest import TwilioRestClient
# Credentials owner: yklal95@gmail.com
# Find these values at https://twilio.com/user/account
account_sid = "ACe464c41a9b742a67a494ae0b08fd6a7c"
auth_token = "8473ba13daed9e9f7a3d8fe2cb20941d"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+18779545971", from_="+12024992521",
body="define wikipedia")
print message
|
{
"content_hash": "e8bf7b0da3e55ed185333504abafcf91",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 39.72727272727273,
"alnum_prop": 0.7345537757437071,
"repo_name": "jiteshjha/hello_friend",
"id": "5188c05d5d24e28b1883db988d2acc6f963fd6b7",
"size": "512",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Utilities/send_sms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12487"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
}
|
"""Interact with Stackdriver Error Reporting via Logging API.
It's possible to report Stackdriver Error Reporting errors by formatting
structured log messages in Stackdriver Logging in a given format. This
client provides a mechanism to report errors using that technique.
"""
import google.cloud.logging.client
class _ErrorReportingLoggingAPI(object):
"""Report to Stackdriver Error Reporting via Logging API
:type project: str
:param project: the project which the client acts on behalf of. If not
passed falls back to the default inferred from the
environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no
``_http`` object is passed), falls back to the default
inferred from the environment.
:type _http: :class:`httplib2.Http` or class that defines ``request()``.
:param _http: An optional HTTP object to make requests. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
def __init__(self, project, credentials=None, _http=None):
self.logging_client = google.cloud.logging.client.Client(
project, credentials, _http=_http)
def report_error_event(self, error_report):
"""Report error payload.
:type error_report: dict
:param: error_report:
dict payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
:meth:~`google.cloud.error_reporting.client._build_error_report`
"""
logger = self.logging_client.logger('errors')
logger.log_struct(error_report)
|
{
"content_hash": "0cd115508ce835d4babe2ebc76513730",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 44.145833333333336,
"alnum_prop": 0.6460594620103822,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "d8bd7a12a4771ecae42a01a83b9e0ad6cfc5e4c3",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/error_reporting/_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import os
import unittest2 as unittest
from mock import patch, ANY
import liquipy
from liquipy.executor import Executor as LiquibaseExecutor
class LiquipySampleTest(unittest.TestCase):
def setUp(self):
self.pathToChangelog = os.path.realpath(os.path.join(
os.path.dirname(__file__),
"../../sample/changelog.yml"))
@patch("liquipy.db.LiquibaseExecutor", autospec=LiquibaseExecutor)
def testSample(self, LiquibaseExecutorMock):
""" Simple unit test demonstrating common use, mirroring the bundled sample
"""
db = liquipy.Database(
host="localhost",
database="test_liquipy",
username="root",
tempDir=".")
db.initialize(self.pathToChangelog)
db.update()
# Basic assertions on use of liquipy.executor.Executor
self.assertTrue(LiquibaseExecutorMock.called)
LiquibaseExecutorMock.assert_called_once_with(
"localhost", "test_liquipy", "root", ANY)
self.assertTrue(LiquibaseExecutorMock.return_value.run.called)
LiquibaseExecutorMock.return_value.run.assert_called_once_with(
ANY, "update")
|
{
"content_hash": "555242f1965dd3ddc55651a505266071",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 30.027027027027028,
"alnum_prop": 0.7047704770477048,
"repo_name": "oxtopus/liquipy",
"id": "e14114684e9f4b64625202a97a6988b5e8f58224",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/sample_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6684"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
}
|
"""
This module provides code to work with the enzyme.dat file from
Enzyme.
http://www.expasy.ch/enzyme/
Tested with the release of 03-Mar-2009.
Functions:
read Reads a file containing one ENZYME entry
parse Reads a file containing multiple ENZYME entries
Classes:
Record Holds ENZYME data.
"""
def parse(handle):
"""Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
handle - handle to the file."""
while True:
record = __read(handle)
if not record:
break
yield record
def read(handle):
"""Read one ENZYME record.
This function is for parsing ENZYME files containing
exactly one record.
handle - handle to the file."""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one ENZYME record found")
return record
class Record(dict):
"""\
Holds information from an ExPASy ENZYME record as a Python dictionary.
Each record contains the following keys:
ID: EC number
DE: Recommended name
AN: Alternative names (if any)
CA: Catalytic activity
CF: Cofactors (if any)
PR: Pointers to the Prosite documentation entrie(s) that
correspond to the enzyme (if any)
DR: Pointers to the Swiss-Prot protein sequence entrie(s)
that correspond to the enzyme (if any)
CC: Comments
"""
def __init__(self):
dict.__init__(self)
self["ID"] = ''
self["DE"] = ''
self["AN"] = []
self["CA"] = ''
self["CF"] = ''
self["CC"] = [] # one comment per line
self["PR"] = []
self["DR"] = []
def __repr__(self):
if self["ID"]:
if self["DE"]:
return "%s (%s, %s)" % (self.__class__.__name__,
self["ID"], self["DE"])
else:
return "%s (%s)" % (self.__class__.__name__,
self["ID"])
else:
return "%s ( )" % (self.__class__.__name__)
def __str__(self):
output = "ID: " + self["ID"]
output += " DE: " + self["DE"]
output += " AN: " + repr(self["AN"])
output += " CA: '" + self["CA"] + "'"
output += " CF: " + self["CF"]
output += " CC: " + repr(self["CC"])
output += " PR: " + repr(self["PR"])
output += " DR: %d Records" % len(self["DR"])
return output
# Everything below is private
def __read(handle):
record = None
for line in handle:
key, value = line[:2], line[5:].rstrip()
if key=="ID":
record = Record()
record["ID"] = value
elif key=="DE":
record["DE"]+=value
elif key=="AN":
if record["AN"] and not record["AN"][-1].endswith("."):
record["AN"][-1] += " " + value
else:
record["AN"].append(value)
elif key=="CA":
record["CA"] += value
elif key=="DR":
pair_data = value.rstrip(";").split(';')
for pair in pair_data:
t1, t2 = pair.split(',')
row = [t1.strip(), t2.strip()]
record["DR"].append(row)
elif key=="CF":
if record["CF"]:
record["CF"] += " " + value
else:
record["CF"] = value
elif key=="PR":
assert value.startswith("PROSITE; ")
value = value[9:].rstrip(";")
record["PR"].append(value)
elif key=='CC':
if value.startswith("-!- "):
record["CC"].append(value[4:])
elif value.startswith(" ") and record["CC"]:
record["CC"][-1] += value[3:]
# copyright notice is silently skipped
elif key=="//":
if record:
return record
else: # This was the copyright notice
continue
if record:
raise ValueError("Unexpected end of stream")
|
{
"content_hash": "f6f044928370980a961e9034b324586f",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 70,
"avg_line_length": 29.260563380281692,
"alnum_prop": 0.49458483754512633,
"repo_name": "LyonsLab/coge",
"id": "cdd0f7616544b10bee233697c0f602a9f166661c",
"size": "4440",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bin/last_wrapper/Bio/ExPASy/Enzyme.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1469592"
},
{
"name": "C++",
"bytes": "156708"
},
{
"name": "CSS",
"bytes": "65405"
},
{
"name": "HTML",
"bytes": "105418"
},
{
"name": "Haxe",
"bytes": "111359"
},
{
"name": "Java",
"bytes": "55110"
},
{
"name": "JavaScript",
"bytes": "762653"
},
{
"name": "Makefile",
"bytes": "7838"
},
{
"name": "Perl",
"bytes": "5054463"
},
{
"name": "Python",
"bytes": "4394136"
},
{
"name": "Raku",
"bytes": "15140"
},
{
"name": "RobotFramework",
"bytes": "15841"
},
{
"name": "Roff",
"bytes": "3514"
},
{
"name": "Rust",
"bytes": "1507"
},
{
"name": "Shell",
"bytes": "13600"
},
{
"name": "TSQL",
"bytes": "24440"
}
],
"symlink_target": ""
}
|
import rdtest
import renderdoc as rd
class VK_Buffer_Truncation(rdtest.Buffer_Truncation):
demos_test_name = 'VK_Buffer_Truncation'
internal = False
|
{
"content_hash": "d96d307e55a9a103f092e0e623eea6b6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.759493670886076,
"repo_name": "Zorro666/renderdoc",
"id": "a1613e61e69f58e4f7a42ad075ebdcce0df1a151",
"size": "158",
"binary": false,
"copies": "3",
"ref": "refs/heads/v1.x",
"path": "util/test/tests/Vulkan/VK_Buffer_Truncation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7657767"
},
{
"name": "C++",
"bytes": "33782979"
},
{
"name": "CMake",
"bytes": "101295"
},
{
"name": "CSS",
"bytes": "1642"
},
{
"name": "Dockerfile",
"bytes": "119"
},
{
"name": "GLSL",
"bytes": "58063"
},
{
"name": "HLSL",
"bytes": "80557"
},
{
"name": "Java",
"bytes": "2241"
},
{
"name": "JavaScript",
"bytes": "10593"
},
{
"name": "Objective-C",
"bytes": "53867"
},
{
"name": "Objective-C++",
"bytes": "156322"
},
{
"name": "Python",
"bytes": "924182"
},
{
"name": "QMake",
"bytes": "15225"
},
{
"name": "SWIG",
"bytes": "54789"
},
{
"name": "Shell",
"bytes": "51606"
}
],
"symlink_target": ""
}
|
import generateData, configureDS, schema, config, build, deploy, starter, interactive, sys
# Configuration dictionary
d = config.getConfigDir()
# MAIN PROGRAM
d = interactive.collectConfigs(d)
if not d[config.IS_CONFIG_OK]:
print 'Configuration is invalid, exit OX Platform setup!'
sys.exit()
# generate ldap schema
if d['generateSchema'] == 'true':
print 'Start to generate schema...'
schema.createSchemaFile()
schema.createUserSchema()
print 'Finished to generate schema.'
# generate Ldap Data
if d['generateLdapDataLdif'] == 'true':
print 'Start to generate ldap data...'
generateData.generateDataLdif()
print 'Finished to generate ldap data.'
# configure DS with schema and data
if d['configureDS'] == 'true':
print 'Start to configure Directory Server...'
configureDS.configureDirectoryServer(d)
print 'Finished to configure Directory Server.'
if d['buildOX'] == 'true':
print 'Start to build OX...'
build.buildOx(d)
print 'Finished to build OX.'
if d['deployOX'] == 'true':
print 'Start to deploy OX...'
deploy.deployOx(d)
print 'Finished to deploy OX.'
if d['startContainer'] == 'true':
print 'Start Application Container...'
starter.start(d)
print 'Finished to start Application Container.'
|
{
"content_hash": "3517c6ca620d312db6cabcd35d8afc18",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 90,
"avg_line_length": 27.53191489361702,
"alnum_prop": 0.6939721792890263,
"repo_name": "GluuFederation/install",
"id": "34e02f6881868bf8ed26815b522d5e38ccd18c25",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88204"
}
],
"symlink_target": ""
}
|
from .xkcd_rgb import xkcd_rgb # noqa: F401
from .crayons import crayons # noqa: F401
|
{
"content_hash": "20442aae8ecd8d947a6eb55eb7127909",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 44,
"alnum_prop": 0.7272727272727273,
"repo_name": "arokem/seaborn",
"id": "3d0bf1d56bdc5c0e724c8eeb95200297884337cc",
"size": "88",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "seaborn/colors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "Python",
"bytes": "893858"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
}
|
from cyder.api.v1.endpoints.dns import api
from cyder.cydns.txt.models import TXT
class TXTSerializer(api.CommonDNSSerializer, api.LabelDomainMixin):
class Meta(api.CommonDNSMeta):
model = TXT
class TXTViewSet(api.CommonDNSViewSet):
model = TXT
serializer_class = TXTSerializer
|
{
"content_hash": "dccf66cd92a244e5ff50679f22988083",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.7582781456953642,
"repo_name": "akeym/cyder",
"id": "114ca22d7d87d279d941dfa157ad50680c296b4d",
"size": "302",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cyder/api/v1/endpoints/dns/txt/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22868"
},
{
"name": "HTML",
"bytes": "54595"
},
{
"name": "JavaScript",
"bytes": "214688"
},
{
"name": "Makefile",
"bytes": "2375"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1955522"
},
{
"name": "Shell",
"bytes": "9416"
}
],
"symlink_target": ""
}
|
import unittest
import os
from unittest import TextTestResult
from django.test.runner import DiscoverRunner
from django.db import NotSupportedError
from djangae.utils import find_project_root
from google.appengine.ext import testbed
# Many Django tests require saving instances with a PK
# of zero. App Engine doesn't allow this (it treats the key
# as incomplete in this case) so we skip those tests here
DJANGO_TESTS_WHICH_REQUIRE_ZERO_PKS = {
'model_forms.tests.ModelMultipleChoiceFieldTests.test_model_multiple_choice_required_false',
'model_forms.tests.ModelChoiceFieldTests.test_modelchoicefield',
'custom_pk.tests.CustomPKTests.test_zero_non_autoincrement_pk',
'bulk_create.tests.BulkCreateTests.test_zero_as_autoval'
}
# These tests only work if you haven't changed AUTH_USER_MODEL
# This is probably a bug in Django (the tests should use skipIfCustomUser)
# but I haven't had a chance to see if it's fixed in master (and it's not fixed in
# 1.7, so this needs to exist either way)
DJANGO_TESTS_WHICH_REQUIRE_AUTH_USER = {
'proxy_models.tests.ProxyModelAdminTests.test_cascade_delete_proxy_model_admin_warning',
'proxy_models.tests.ProxyModelAdminTests.test_delete_str_in_model_admin',
'proxy_models.tests.ProxyModelTests.test_permissions_created' # Requires permissions created
}
DJANGO_TESTS_WHICH_HAVE_BUGS = {
'one_to_one.tests.OneToOneTests.test_foreign_key', # Uses the wrong IDs, fixed in 1.8+
}
# This is potentially fixable by us. sql_with_params returns a tuple of
# our Select/Insert/UpdateCommand, and an empty list (because the params
# are stored in the where tree. Some tests assume that we'll be returning the
# params separately, and so they fail. We could fix this by actually returning the
# values that went into the where, but that's for another day.
DJANGO_TESTS_WHICH_EXPECT_SQL_PARAMS = {
'model_forms.tests.ModelMultipleChoiceFieldTests.test_clean_does_deduplicate_values',
'ordering.tests.OrderingTests.test_order_by_f_expression_duplicates'
}
# Django 1.8 removed the supports_select_related flag, so we have to manually skip
# tests which depend on it
DJANGO_TESTS_WHICH_USE_SELECT_RELATED = {
'defer.tests.DeferTests.test_defer_with_select_related',
'defer.tests.DeferTests.test_defer_select_related_raises_invalid_query',
'defer.tests.DeferTests.test_only_select_related_raises_invalid_query',
'defer.tests.DeferTests.test_only_with_select_related',
'model_inheritance.tests.ModelInheritanceDataTests.test_select_related_works_on_parent_model_fields'
}
DJANGO_TESTS_TO_SKIP = DJANGO_TESTS_WHICH_REQUIRE_ZERO_PKS.union(
DJANGO_TESTS_WHICH_REQUIRE_AUTH_USER).union(
DJANGO_TESTS_WHICH_HAVE_BUGS).union(
DJANGO_TESTS_WHICH_EXPECT_SQL_PARAMS).union(
DJANGO_TESTS_WHICH_USE_SELECT_RELATED
)
def init_testbed():
# We don't initialize the datastore stub here, that needs to be done by Django's create_test_db and destroy_test_db.
IGNORED_STUBS = [ "init_datastore_v3_stub" ]
stub_kwargs = {
"init_taskqueue_stub": {
"root_path": find_project_root()
}
}
bed = testbed.Testbed()
bed.activate()
for init_name in testbed.INIT_STUB_METHOD_NAMES.values():
if init_name in IGNORED_STUBS:
continue
getattr(bed, init_name)(**stub_kwargs.get(init_name, {}))
return bed
def bed_wrap(test):
def _wrapped(*args, **kwargs):
bed = None
try:
# Init test stubs
bed = init_testbed()
return test(*args, **kwargs)
finally:
if bed:
bed.deactivate()
bed = None
return _wrapped
class SkipUnsupportedTestResult(TextTestResult):
def addError(self, test, err):
skip = os.environ.get("SKIP_UNSUPPORTED", True)
# If the error is a NotSupportedError and the test is a Django test (where we expect some
# functionality to be unsupported) rather than a Djangae test (where our tests should be
# written to explicitly state which things are and aren't supported) then skip it
if skip and err[0] in (NotSupportedError,) and test.__module__.split(".")[0] != "djangae":
self.addExpectedFailure(test, err)
else:
super(SkipUnsupportedTestResult, self).addError(test, err)
class DjangaeTestSuiteRunner(DiscoverRunner):
def _discover_additional_tests(self):
"""
Django's DiscoverRunner only detects apps that are below
manage.py, which isn't particularly useful if you have other apps
on the path that need testing (arguably all INSTALLED_APPS should be tested
as they all form part of your project and a bug in them could bring your site down).
This method looks for a setting called DJANGAE_ADDITIONAL_TEST_APPS in
and will add extra test cases found in those apps. By default this adds the
djangae tests to your app, but you can of course override that.
"""
from django.conf import settings
from importlib import import_module
ADDITIONAL_APPS = getattr(settings, "DJANGAE_ADDITIONAL_TEST_APPS", ("djangae",))
extra_tests = []
for app in ADDITIONAL_APPS:
mod = import_module(app)
if mod:
folder = mod.__path__[0]
new_tests = self.test_loader.discover(start_dir=folder, top_level_dir=os.path.dirname(folder))
extra_tests.extend(new_tests._tests)
self.test_loader._top_level_dir = None
return extra_tests
def build_suite(self, *args, **kwargs):
extra_tests = self._discover_additional_tests()
args = list(args)
args[1] = extra_tests
suite = super(DjangaeTestSuiteRunner, self).build_suite(*args, **kwargs)
new_tests = []
# Django's DiscoveryRunner can create duplicate tests when passing
# extra_tests argument. Getting rid of that:
suite._tests = list(set(suite._tests))
for i, test in enumerate(suite._tests):
# https://docs.djangoproject.com/en/1.7/topics/testing/advanced/#django.test.TransactionTestCase.available_apps
# available_apis is part of an internal API that allows to speed up
# internal Django test, but that breaks the integration with
# Djangae models and tests, so we are disabling it here
if hasattr(test, 'available_apps'):
test.available_apps = None
if args[0] and not any([test.id().startswith(x) for x in args[0]]):
continue
if test.id() in DJANGO_TESTS_TO_SKIP:
continue #FIXME: It would be better to wrap this in skipTest or something
new_tests.append(bed_wrap(test))
suite._tests[:] = new_tests
return suite
class SkipUnsupportedRunner(DjangaeTestSuiteRunner):
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=SkipUnsupportedTestResult
).run(suite)
|
{
"content_hash": "d9519950340617fe626d301816b4ba9d",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 123,
"avg_line_length": 39.42857142857143,
"alnum_prop": 0.6782329988851727,
"repo_name": "asendecka/djangae",
"id": "cdd4897537feca1f095b2f3585ce1b04dba270ba",
"size": "7176",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangae/test_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "277"
},
{
"name": "Python",
"bytes": "684586"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
}
|
"""
Tests whether a file (in .csv format) with each line, consisting of data about a single student,
is anonymous with to a particular level of k.
This program will take a set of fields in each line (hard coded, at the moment) and will check to
insure that there are at least k other lines with the same values for those fields. The fields
selected should be all those that can be used to re-identify a student. The program will ask for the
data file to test and the level of k to test.
The program can be run in either full or summary mode. In summary mode, the program will print the
number of lines that would violate the level of k specified. In full mode, all of sets of properties
that would violate the level of k are printed out, so that one can see what properties might need
to be smeared.
Created on May 28, 2014
@author: waldo
"""
import operator
import csv, sys
import utils
def buildKey(ids, dataLine):
"""
Concatenate a set of fields together to build an overall key
This is a simple approach to determining k-anonymity, in which all
of the fields of interest are concatenated as a single key. The
ids coming in should be a list of indexes into the fields in the dataLine.
These will be concatenated in order to form a new key. Note that this
currently assumes that all of the data fields are strings.
"""
retKey = ''
for i in ids:
retKey += dataLine[i]
return retKey
def makeDict(ids, infile):
"""
Create and return a dictionary keyed by a concatenation of fields with value the number
of entries containing all and only those fields.
Taking a list of indexes into a line of a (csv) file and an open csv.reader(), build a
dictionary that is keyed by the string concatenation of the fields in the index with
value the number of times a line containing just those fields in those indexes occurs. Return
the dictionary to the caller.
"""
retDict = {}
for line in infile:
keyAnon = buildKey(ids, line)
if keyAnon in retDict:
retDict[keyAnon] += 1
else:
retDict[keyAnon] = 1
return retDict
if __name__ == '__main__':
"""
When run stand-alone, this script will query for a filename and a level of anonymity
to check for the externally-connected data fields in the .csv file. The user will also
be prompted for either a summary of the anonymity level (in which case only the number
of records that fail to be at least anonymous to the level indicated) will be printed, or
a full report, in which case the concatenation of fields that allow identification finer
than the level entered will be printed. Note that the indexes of the fields that can be
linked to external properties is hard-coded at the moment; it would be good to have a more
flexible mechanism for this but finding one that is not error prone is difficult.
The id fields that could connect to the outside are 0 -> course_id, 6 -> final_cc_cname,
7 -> LoE, 8 -> YoB, 9 -> gender, and 17 -> nforum_posts]
"""
idFields = [0,6,7,8,9,17]
if len(sys.argv) < 4:
fname = utils.getFileName('data file to test')
kanon = utils.getIntVal('Enter value of k to test : ')
full = utils.getStringVal('Enter s for summary, f for full report : ', ['s', 'f'])
else:
fname = sys.argv[1]
kanon = int(sys.argv[2])
full = sys.argv[3]
fin = open(fname, 'rU')
fread = csv.reader(fin)
totals = []
for i in range(0,kanon):
totals.append(0)
fread.next()
anonDict = makeDict(idFields, fread)
sortedDict = sorted(anonDict.iteritems(), key=operator.itemgetter(1))
for k,v in sortedDict:
if v < kanon:
totals[v-1] += 1
if full == 'f':
print v, k
for i in range(0,kanon-1):
print 'Number of buckets with', i+1, 'entries is', totals[i]
|
{
"content_hash": "378f209fe6386ce199ebc01904c3ea0b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 100,
"avg_line_length": 38.77669902912621,
"alnum_prop": 0.670756134201302,
"repo_name": "jimwaldo/HarvardX-Tools",
"id": "66482d616c2c3705115b41e4e9d6d556b5e3bb62",
"size": "4017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/checkData/testKAnon.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "254545"
},
{
"name": "Scala",
"bytes": "63362"
},
{
"name": "Shell",
"bytes": "3551"
}
],
"symlink_target": ""
}
|
import random
class ZobristHashing(object):
def __init__(self, n_positions, n_pieces):
size = n_positions * n_pieces
self.table = [random.getrandbits(32) for i in range(size)]
self.n_positions = n_positions
self.n_pieces = n_pieces
def __call__(self, board):
result = 0
for i in range(self.n_positions):
if board[i] != ' ':
result ^= self.table[i * self.n_pieces + board[i]]
return result
|
{
"content_hash": "6d18e418a014defd6d855ebe8263260a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 28.529411764705884,
"alnum_prop": 0.5608247422680412,
"repo_name": "davidrobles/mlnd-capstone-code",
"id": "89becad5fb308fb97228e3d53e2a1697fc407d63",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capstone/game/utils/zobrist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "150850"
}
],
"symlink_target": ""
}
|
def makeBar(progress):
# Get the progress in half for a shorter progress bar
shortProgress = progress/2
# Convert progress to a string while we are at it
progressString = str(progress)
# Get the amount of "done" progress, or the % of 100%
doneProgress = int(shortProgress)
# Get the reverse of above, for the % of 100% not done
undoneProgress = 50-int(shortProgress)
# We fill the percentage done with # characters
doneString = '#'*doneProgress
# The rest with whitespaces
undoneString = ' '*undoneProgress
# Build our progress bar and return it
return '[{}{}] {}%'.format(doneString, undoneString, progressString.rjust(5))
|
{
"content_hash": "c43a105833290f345a05d95c4de6880c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 37.888888888888886,
"alnum_prop": 0.6891495601173021,
"repo_name": "StarbotDiscord/Starbot",
"id": "6d8f3bcc639a56a123fd3b64b98a67c8b798434c",
"size": "1303",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "libs/progressBar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "189"
},
{
"name": "Python",
"bytes": "83709"
},
{
"name": "Shell",
"bytes": "52"
}
],
"symlink_target": ""
}
|
"""
__Trigger01ExprPart1_Complete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Sun Mar 1 20:50:52 2015
_________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MT_pre__Trigger_T import *
from LHS import *
from graph_MT_pre__Trigger_T import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def Trigger01ExprPart1_Complete_MDL(self, rootNode, MoTifRuleRootNode=None, MT_pre__UMLRT2Kiltera_MMRootNode=None):
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('Trigger01ExprPart1_Complete')
# --- ASG attributes over ---
# --- Generating attributes code for ASG MT_pre__UMLRT2Kiltera_MM ---
if( MT_pre__UMLRT2Kiltera_MMRootNode ):
# author
MT_pre__UMLRT2Kiltera_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__UMLRT2Kiltera_MMRootNode.description.setValue('\n')
MT_pre__UMLRT2Kiltera_MMRootNode.description.setHeight(15)
# name
MT_pre__UMLRT2Kiltera_MMRootNode.name.setValue('')
MT_pre__UMLRT2Kiltera_MMRootNode.name.setNone()
# --- ASG attributes over ---
self.obj121=MT_pre__Trigger_T(self)
self.obj121.isGraphObjectVisual = True
if(hasattr(self.obj121, '_setHierarchicalLink')):
self.obj121._setHierarchicalLink(False)
# MT_label__
self.obj121.MT_label__.setValue('1')
# MT_pivotOut__
self.obj121.MT_pivotOut__.setValue('element1')
# MT_subtypeMatching__
self.obj121.MT_subtypeMatching__.setValue(('True', 1))
self.obj121.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj121.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj121.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj121.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj121.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj121.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj121.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj121.MT_pivotIn__.setValue('')
self.obj121.MT_pivotIn__.setNone()
self.obj121.graphClass_= graph_MT_pre__Trigger_T
if self.genGraphics:
new_obj = graph_MT_pre__Trigger_T(123.0,160.0,self.obj121)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Trigger_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj121.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj121)
self.globalAndLocalPostcondition(self.obj121, rootNode)
self.obj121.postAction( rootNode.CREATE )
self.obj120=LHS(self)
self.obj120.isGraphObjectVisual = True
if(hasattr(self.obj120, '_setHierarchicalLink')):
self.obj120._setHierarchicalLink(False)
# constraint
self.obj120.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj120.constraint.setHeight(15)
self.obj120.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(100.0,40.0,self.obj120)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj120.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj120)
self.globalAndLocalPostcondition(self.obj120, rootNode)
self.obj120.postAction( rootNode.CREATE )
# Connections for obj121 (graphObject_: Obj1) of type MT_pre__Trigger_T
self.drawConnections(
)
# Connections for obj120 (graphObject_: Obj0) of type LHS
self.drawConnections(
)
newfunction = Trigger01ExprPart1_Complete_MDL
loadedMMName = ['MoTifRule_META', 'MT_pre__UMLRT2Kiltera_MM_META']
atom3version = '0.3'
|
{
"content_hash": "3d7bc66ddbfd1307b2703dc21f4aa499",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 626,
"avg_line_length": 47.657534246575345,
"alnum_prop": 0.6325093417648749,
"repo_name": "levilucio/SyVOLT",
"id": "41f389a3f6e6873e3435a015344848104de4813f",
"size": "6958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/Multiplicity/models/Trigger01ExprPart1_Complete_MDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from html import escape
from wptserve.utils import isomorphic_decode
def main(request, response):
label = request.GET.first(b'label')
return u"""<!doctype html><meta charset="%s">""" % escape(isomorphic_decode(label))
|
{
"content_hash": "53fcc43d3da17394250cea9e195d18de",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 87,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.7192982456140351,
"repo_name": "chromium/chromium",
"id": "15edff7061f96b6f40fe42f6c4c09d3cdc07b643",
"size": "228",
"binary": false,
"copies": "21",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/dom/nodes/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import argparse
import fileinput
import io
import shutil
import subprocess
GIT = shutil.which("git")
def update_file(file, lineno, old, new):
"""
Replace all occurrences of the old substring by the new substring.
:param file: The file to update.
:param lineno: The line number to update.
:param old: The old substring.
:param new: The new substring.
"""
print("\tUpdating file in place")
with fileinput.FileInput(file, inplace=True) as f:
for line in f:
if f.lineno() == lineno and old in line:
print(line.replace(old, new), end="")
else:
print(line, end="")
def analyze_file(file, lineno, commits_and_tags, dry_run=False):
"""
Analyze the given file.
:param file: The file to analyze.
:param lineno: The line number to analyze.
:param commits_and_tags: The output dictionary mapping commits to release tags.
:param dry_run: Whether or not this is a dry run.
"""
print(f"Analyzing {file}:{lineno}")
line_sha = (
subprocess.check_output(
[GIT, "blame", "--porcelain", "-L", f"{lineno},{lineno}", file], text=True
)
.split("\n", 1)[0]
.split(" ", 1)[0]
)
print(f"\tfirst sha: {line_sha}")
first_tag = subprocess.check_output(
[GIT, "tag", "--sort=creatordate", "--contains", line_sha, "jenkins-*"],
text=True,
).split("\n", 1)[0]
if first_tag:
print(f"\tfirst tag was {first_tag}")
commits_and_tags[line_sha] = first_tag
if not dry_run:
since_version = first_tag.replace("jenkins-", "")
update_file(
file,
int(lineno),
"@since TODO",
f"@since {since_version}",
)
update_file(
file,
int(lineno),
'@Deprecated(since = "TODO")',
f'@Deprecated(since = "{since_version}")',
)
update_file(
file,
int(lineno),
'@RestrictedSince("TODO")',
f'@RestrictedSince("{since_version}")',
)
else:
print(
"\tNot updating file, no tag found. "
"Normal if the associated PR/commit is not merged and released yet; "
"otherwise make sure to fetch tags from jenkinsci/jenkins"
)
def analyze_files(commits_and_tags, dry_run=False):
"""
Analyze all files in the repository.
:param commits_and_tags: The output dictionary mapping commits to release tags.
:param dry_run: Whether or not this is a dry run.
"""
cmd = [
GIT,
"grep",
"--line-number",
"-E",
'@since TODO|@Deprecated\\(since = "TODO"\\)|@RestrictedSince\\("TODO"\\)',
"--",
"*.java",
"*.jelly",
"*.js",
]
with subprocess.Popen(cmd, stdout=subprocess.PIPE) as proc:
for line in io.TextIOWrapper(proc.stdout):
parts = line.rstrip().split(":", 2)
analyze_file(parts[0], parts[1], commits_and_tags, dry_run=dry_run)
retcode = proc.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, cmd)
print()
def display_results(commits_and_tags):
"""
Display the results of the analysis.
:param commits_and_tags: The output dictionary mapping commits to release tags.
"""
print("List of commits introducing new API and the first release they went in:")
releases = {release for release in commits_and_tags.values()}
for release in sorted(releases):
print(f"* https://github.com/jenkinsci/jenkins/releases/tag/{release}")
for commit, first_release in commits_and_tags.items():
if release == first_release:
print(f" - https://github.com/jenkinsci/jenkins/commit/{commit}")
def main():
"""
Update '@since TODO', '@Deprecated(since = "TODO")', and '@RestrictedSince("TODO")' entries
with actual Jenkins release versions.
This script is a developer tool, to be used by maintainers.
"""
parser = argparse.ArgumentParser(
description="Update '@since TODO', '@Deprecated(since = \"TODO\")', and '@RestrictedSince(\"TODO\")' entries "
"with actual Jenkins release versions. "
)
parser.add_argument("-n", "--dry-run", help="Dry run", action="store_true")
args = parser.parse_args()
commits_and_tags = {}
analyze_files(commits_and_tags, dry_run=args.dry_run)
if commits_and_tags:
display_results(commits_and_tags)
if __name__ == "__main__":
main()
|
{
"content_hash": "eefcaf2cfca3478dbfa6999aa2dfeea8",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 118,
"avg_line_length": 32.08904109589041,
"alnum_prop": 0.568196371398079,
"repo_name": "MarkEWaite/jenkins",
"id": "044d441a935e1ac7204ea4616fac2cafe2d6e6d3",
"size": "4709",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "update-since-todo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "4633"
},
{
"name": "Batchfile",
"bytes": "1023"
},
{
"name": "C",
"bytes": "2091"
},
{
"name": "CSS",
"bytes": "215870"
},
{
"name": "Dockerfile",
"bytes": "210"
},
{
"name": "Groovy",
"bytes": "75924"
},
{
"name": "HTML",
"bytes": "938754"
},
{
"name": "Handlebars",
"bytes": "15221"
},
{
"name": "Java",
"bytes": "11621704"
},
{
"name": "JavaScript",
"bytes": "392506"
},
{
"name": "Less",
"bytes": "192071"
},
{
"name": "Perl",
"bytes": "16145"
},
{
"name": "Python",
"bytes": "4709"
},
{
"name": "Ruby",
"bytes": "17290"
},
{
"name": "Shell",
"bytes": "3635"
}
],
"symlink_target": ""
}
|
from werkzeug import FileStorage
from wtforms import FileField as _FileField
from wtforms import ValidationError
class FileField(_FileField):
"""
Werkzeug-aware subclass of **wtforms.FileField**
Provides a `has_file()` method to check if its data is a FileStorage
instance with an actual file.
"""
@property
def file(self):
"""
:deprecated: synonym for **data**
"""
return self.data
def has_file(self):
'''Return True iff self.data is a FileStorage with file data'''
if not isinstance(self.data, FileStorage):
return False
# filename == None => the field was present but no file was entered
# filename == '<fdopen>' is for a werkzeug hack:
# large file uploads will get stored in a temporary file on disk and
# show up as an extra FileStorage with name '<fdopen>'
return self.data.filename not in [None, '', '<fdopen>']
class FileRequired(object):
"""
Validates that field has a file.
:param message: error message
You can also use the synonym **file_required**.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.has_file():
raise ValidationError(self.message)
file_required = FileRequired
class FileAllowed(object):
"""
Validates that the uploaded file is allowed by the given
Flask-Uploads UploadSet.
:param upload_set: A list/tuple of extention names or an instance
of ``flask.ext.uploads.UploadSet``
:param message: error message
You can also use the synonym **file_allowed**.
"""
def __init__(self, upload_set, message=None):
self.upload_set = upload_set
self.message = message
def __call__(self, form, field):
if not field.has_file():
return
if isinstance(self.upload_set, (tuple, list)):
ext = field.data.filename.rsplit('.', 1)[-1]
if ext.lower() in self.upload_set:
return
raise ValidationError(self.message)
if not self.upload_set.file_allowed(field.data, field.data.filename):
raise ValidationError(self.message)
file_allowed = FileAllowed
|
{
"content_hash": "10fc128c3f8e1ea46618a148f0f4a438",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 29.050632911392405,
"alnum_prop": 0.6200435729847494,
"repo_name": "midma101/AndIWasJustGoingToBed",
"id": "a61cacb658eb909d41a1147a8bb24b102c957f74",
"size": "2295",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/flask_wtf/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29472"
},
{
"name": "JavaScript",
"bytes": "3277638"
},
{
"name": "PHP",
"bytes": "4548"
},
{
"name": "Python",
"bytes": "15564"
}
],
"symlink_target": ""
}
|
"""Tests for learn.estimators.tensor_signature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
class TensorSignatureTest(tf.test.TestCase):
def testTensorSignatureCompatible(self):
placeholder_a = tf.placeholder(name='test',
shape=[None, 100],
dtype=tf.int32)
placeholder_b = tf.placeholder(name='another',
shape=[256, 100],
dtype=tf.int32)
placeholder_c = tf.placeholder(name='mismatch',
shape=[256, 100],
dtype=tf.float32)
placeholder_d = tf.placeholder(name='mismatch',
shape=[128, 100],
dtype=tf.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
self.assertTrue(tensor_signature.tensors_compatible(placeholder_a,
signatures))
self.assertTrue(tensor_signature.tensors_compatible(placeholder_b,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_c,
signatures))
self.assertTrue(tensor_signature.tensors_compatible(placeholder_d,
signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
self.assertTrue(tensor_signature.tensors_compatible(inputs, signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_a,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_b,
signatures))
self.assertFalse(tensor_signature.tensors_compatible(
{'b': placeholder_b}, signatures))
self.assertTrue(tensor_signature.tensors_compatible(
{'a': placeholder_b,
'c': placeholder_c}, signatures))
self.assertFalse(tensor_signature.tensors_compatible(
{'a': placeholder_c}, signatures))
def testSparseTensorCompatible(self):
t = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
signatures = tensor_signature.create_signatures(t)
self.assertTrue(tensor_signature.tensors_compatible(t, signatures))
def testTensorSignaturePlaceholders(self):
placeholder_a = tf.placeholder(name='test',
shape=[None, 100],
dtype=tf.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
placeholder_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholder_out.dtype, placeholder_a.dtype)
self.assertEqual(placeholder_out.get_shape(), placeholder_a.get_shape())
self.assertTrue(tensor_signature.tensors_compatible(placeholder_out,
signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
placeholders_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholders_out['a'].dtype, placeholder_a.dtype)
self.assertEqual(placeholders_out['a'].get_shape(),
placeholder_a.get_shape())
self.assertTrue(tensor_signature.tensors_compatible(placeholders_out,
signatures))
def testSparseTensorSignaturePlaceholders(self):
tensor = tf.SparseTensor(values=[1.0, 2.0], indices=[[0, 2], [0, 3]],
shape=[5, 5])
signature = tensor_signature.create_signatures(tensor)
placeholder = tensor_signature.create_placeholders_from_signatures(
signature)
self.assertTrue(isinstance(placeholder, tf.SparseTensor))
self.assertEqual(placeholder.values.dtype, tensor.values.dtype)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "aebe01d45c302777d331d3e485db2537",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 47.175824175824175,
"alnum_prop": 0.5949219659911483,
"repo_name": "ninotoshi/tensorflow",
"id": "bd1e18bd8d96d996e47afe845222cf11008e702d",
"size": "4896",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/tensor_signature_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6579490"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "66333"
},
{
"name": "Python",
"bytes": "3809695"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
}
|
"""log_this_plus
Revision ID: 525c854f0005
Revises: e46f2d27a08e
Create Date: 2016-12-13 16:19:02.239322
"""
# revision identifiers, used by Alembic.
revision = '525c854f0005'
down_revision = 'e46f2d27a08e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('logs', sa.Column('duration_ms', sa.Integer(), nullable=True))
op.add_column('logs', sa.Column('referrer', sa.String(length=1024), nullable=True))
def downgrade():
op.drop_column('logs', 'referrer')
op.drop_column('logs', 'duration_ms')
|
{
"content_hash": "8b090e47ab02ce0eaf3b212ec9036abe",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 87,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7058823529411765,
"repo_name": "dmigo/incubator-superset",
"id": "1db3f5c76006a7f8ca22d5bd39f36faa9dec116d",
"size": "568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/migrations/versions/525c854f0005_log_this_plus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99154"
},
{
"name": "HTML",
"bytes": "100560"
},
{
"name": "JavaScript",
"bytes": "1557840"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1075386"
},
{
"name": "Shell",
"bytes": "1557"
},
{
"name": "Smarty",
"bytes": "1048"
}
],
"symlink_target": ""
}
|
import copy, json, os, sys
from bs4 import BeautifulSoup as bs4
content_file = sys.argv[1]
menu_content = file(sys.argv[2])
content = bs4(file(content_file), 'lxml')
for menu_container in content.select('.static-menu'):
menu_container.clear()
menu_container.append(bs4(menu_content, 'html.parser'))
file(content_file, 'w').write(content.prettify().encode('utf-8'))
|
{
"content_hash": "0deef5aeec16bb19448fdb7fe24b90e6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.7116402116402116,
"repo_name": "emkael/pzbs-ranking",
"id": "e30c702d19f33b3cc48d51ecebee5281da36751e",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/menus-write.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2317"
},
{
"name": "HTML",
"bytes": "24468"
},
{
"name": "JavaScript",
"bytes": "21345"
},
{
"name": "Makefile",
"bytes": "1597"
},
{
"name": "Python",
"bytes": "12510"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
}
|
__all__ = ['register', 'sharedPackages',
'reloadSharedPackage', 'reloadSharedPackages']
from panda3d._core import Filename, VirtualFileSystem, VirtualFileMountSystem, OFileStream, copyStream
import sys
import marshal
import imp
import types
import __builtin__
# The sharedPackages dictionary lists all of the "shared packages",
# special Python packages that automatically span multiple directories
# via magic in the VFSImporter. You can make a package "shared"
# simply by adding its name into this dictionary (and then calling
# reloadSharedPackages() if it's already been imported).
# When a package name is in this dictionary at import time, *all*
# instances of the package are located along sys.path, and merged into
# a single Python module with a __path__ setting that represents the
# union. Thus, you can have a direct.showbase.foo in your own
# application, and loading it won't shadow the system
# direct.showbase.ShowBase which is in a different directory on disk.
sharedPackages = {}
vfs = VirtualFileSystem.getGlobalPtr()
# Possible file types.
FTPythonSource = 0
FTPythonCompiled = 1
FTExtensionModule = 2
FTFrozenModule = 3
compiledExtensions = [ 'pyc', 'pyo' ]
if not __debug__:
# In optimized mode, we prefer loading .pyo files over .pyc files.
# We implement that by reversing the extension names.
compiledExtensions = [ 'pyo', 'pyc' ]
class VFSImporter:
""" This class serves as a Python importer to support loading
Python .py and .pyc/.pyo files from Panda's Virtual File System,
which allows loading Python source files from mounted .mf files
(among other places). """
def __init__(self, path):
self.dir_path = Filename.fromOsSpecific(path)
def find_module(self, fullname, path = None):
if path is None:
dir_path = self.dir_path
else:
dir_path = path
#print >>sys.stderr, "find_module(%s), dir_path = %s" % (fullname, dir_path)
basename = fullname.split('.')[-1]
path = Filename(dir_path, basename)
# First, look for Python files.
filename = Filename(path)
filename.setExtension('py')
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, FTPythonSource)
# If there's no .py file, but there's a .pyc file, load that
# anyway.
for ext in compiledExtensions:
filename = Filename(path)
filename.setExtension(ext)
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, FTPythonCompiled)
# Look for a C/C++ extension module.
for desc in imp.get_suffixes():
if desc[2] != imp.C_EXTENSION:
continue
filename = Filename(path + desc[0])
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, FTExtensionModule,
desc = desc)
# Finally, consider a package, i.e. a directory containing
# __init__.py.
filename = Filename(path, '__init__.py')
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, FTPythonSource,
packagePath = path)
for ext in compiledExtensions:
filename = Filename(path, '__init__.' + ext)
vfile = vfs.getFile(filename, True)
if vfile:
return VFSLoader(dir_path, vfile, filename, FTPythonCompiled,
packagePath = path)
#print >>sys.stderr, "not found."
return None
class VFSLoader:
""" The second part of VFSImporter, this is created for a
particular .py file or directory. """
def __init__(self, dir_path, vfile, filename, fileType,
desc = None, packagePath = None):
self.dir_path = dir_path
self.timestamp = None
if vfile:
self.timestamp = vfile.getTimestamp()
self.filename = filename
self.fileType = fileType
self.desc = desc
self.packagePath = packagePath
def load_module(self, fullname, loadingShared = False):
#print >>sys.stderr, "load_module(%s), dir_path = %s, filename = %s" % (fullname, self.dir_path, self.filename)
if self.fileType == FTFrozenModule:
return self._import_frozen_module(fullname)
if self.fileType == FTExtensionModule:
return self._import_extension_module(fullname)
# Check if this is a child of a shared package.
if not loadingShared and self.packagePath and '.' in fullname:
parentname = fullname.rsplit('.', 1)[0]
if parentname in sharedPackages:
# It is. That means it's a shared package too.
parent = sys.modules[parentname]
path = getattr(parent, '__path__', None)
importer = VFSSharedImporter()
sharedPackages[fullname] = True
loader = importer.find_module(fullname, path = path)
assert loader
return loader.load_module(fullname)
code = self._read_code()
if not code:
raise ImportError, 'No Python code in %s' % (fullname)
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.filename.toOsSpecific()
mod.__loader__ = self
if self.packagePath:
mod.__path__ = [self.packagePath.toOsSpecific()]
#print >> sys.stderr, "loaded %s, path = %s" % (fullname, mod.__path__)
exec(code, mod.__dict__)
return sys.modules[fullname]
def getdata(self, path):
path = Filename(self.dir_path, Filename.fromOsSpecific(path))
vfile = vfs.getFile(path)
if not vfile:
raise IOError
return vfile.readFile(True)
def is_package(self, fullname):
return bool(self.packagePath)
def get_code(self, fullname):
return self._read_code()
def get_source(self, fullname):
return self._read_source()
def get_filename(self, fullname):
return self.filename.toOsSpecific()
def _read_source(self):
""" Returns the Python source for this file, if it is
available, or None if it is not. May raise IOError. """
if self.fileType == FTPythonCompiled or \
self.fileType == FTExtensionModule:
return None
filename = Filename(self.filename)
filename.setExtension('py')
filename.setText()
vfile = vfs.getFile(filename)
if not vfile:
raise IOError
return vfile.readFile(True)
def _import_extension_module(self, fullname):
""" Loads the binary shared object as a Python module, and
returns it. """
vfile = vfs.getFile(self.filename, False)
# We can only import an extension module if it already exists on
# disk. This means if it's a truly virtual file that has no
# on-disk equivalent, we have to write it to a temporary file
# first.
if hasattr(vfile, 'getMount') and \
isinstance(vfile.getMount(), VirtualFileMountSystem):
# It's a real file.
filename = self.filename
elif self.filename.exists():
# It's a virtual file, but it's shadowing a real file in
# the same directory. Assume they're the same, and load
# the real one.
filename = self.filename
else:
# It's a virtual file with no real-world existence. Dump
# it to disk. TODO: clean up this filename.
filename = Filename.temporary('', self.filename.getBasenameWoExtension(),
'.' + self.filename.getExtension(),
type = Filename.TDso)
filename.setExtension(self.filename.getExtension())
filename.setBinary()
sin = vfile.openReadFile(True)
sout = OFileStream()
if not filename.openWrite(sout):
raise IOError
if not copyStream(sin, sout):
raise IOError
vfile.closeReadFile(sin)
del sout
module = imp.load_module(fullname, None, filename.toOsSpecific(),
self.desc)
module.__file__ = self.filename.toOsSpecific()
return module
def _import_frozen_module(self, fullname):
""" Imports the frozen module without messing around with
searching any more. """
#print >>sys.stderr, "importing frozen %s" % (fullname)
module = imp.load_module(fullname, None, fullname,
('', '', imp.PY_FROZEN))
return module
def _read_code(self):
""" Returns the Python compiled code object for this file, if
it is available, or None if it is not. May raise IOError,
ValueError, SyntaxError, or a number of other errors generated
by the low-level system. """
if self.fileType == FTPythonCompiled:
# It's a pyc file; just read it directly.
pycVfile = vfs.getFile(self.filename, False)
if pycVfile:
return self._loadPyc(pycVfile, None)
raise IOError, 'Could not read %s' % (self.filename)
elif self.fileType == FTExtensionModule:
return None
# It's a .py file (or an __init__.py file; same thing). Read
# the .pyc file if it is available and current; otherwise read
# the .py file and compile it.
t_pyc = None
for ext in compiledExtensions:
pycFilename = Filename(self.filename)
pycFilename.setExtension(ext)
pycVfile = vfs.getFile(pycFilename, False)
if pycVfile:
t_pyc = pycVfile.getTimestamp()
break
code = None
if t_pyc and t_pyc >= self.timestamp:
try:
code = self._loadPyc(pycVfile, self.timestamp)
except ValueError:
code = None
if not code:
source = self._read_source()
filename = Filename(self.filename)
filename.setExtension('py')
code = self._compile(filename, source)
return code
def _loadPyc(self, vfile, timestamp):
""" Reads and returns the marshal data from a .pyc file.
Raises ValueError if there is a problem. """
code = None
data = vfile.readFile(True)
if data[:4] == imp.get_magic():
t = ord(data[4]) + (ord(data[5]) << 8) + \
(ord(data[6]) << 16) + (ord(data[7]) << 24)
if not timestamp or t == timestamp:
code = marshal.loads(data[8:])
else:
raise ValueError, 'Timestamp wrong on %s' % (vfile)
else:
raise ValueError, 'Bad magic number in %s' % (vfile)
return code
def _compile(self, filename, source):
""" Compiles the Python source code to a code object and
attempts to write it to an appropriate .pyc file. May raise
SyntaxError or other errors generated by the compiler. """
if source and source[-1] != '\n':
source = source + '\n'
code = __builtin__.compile(source, filename.toOsSpecific(), 'exec')
# try to cache the compiled code
pycFilename = Filename(filename)
pycFilename.setExtension(compiledExtensions[0])
try:
f = open(pycFilename.toOsSpecific(), 'wb')
except IOError:
pass
else:
f.write('\0\0\0\0')
f.write(chr(self.timestamp & 0xff) +
chr((self.timestamp >> 8) & 0xff) +
chr((self.timestamp >> 16) & 0xff) +
chr((self.timestamp >> 24) & 0xff))
f.write(marshal.dumps(code))
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
f.close()
return code
class VFSSharedImporter:
""" This is a special importer that is added onto the meta_path
list, so that it is called before sys.path is traversed. It uses
special logic to load one of the "shared" packages, by searching
the entire sys.path for all instances of this shared package, and
merging them. """
def __init__(self):
pass
def find_module(self, fullname, path = None, reload = False):
#print >>sys.stderr, "shared find_module(%s), path = %s" % (fullname, path)
if fullname not in sharedPackages:
# Not a shared package; fall back to normal import.
return None
if path is None:
path = sys.path
excludePaths = []
if reload:
# If reload is true, we are simply reloading the module,
# looking for new paths to add.
mod = sys.modules[fullname]
excludePaths = getattr(mod, '_vfs_shared_path', None)
if excludePaths is None:
# If there isn't a _vfs_shared_path symbol already,
# the module must have been loaded through
# conventional means. Try to guess which path it was
# found on.
d = self.getLoadedDirname(mod)
excludePaths = [d]
loaders = []
for dir in path:
if dir in excludePaths:
continue
importer = sys.path_importer_cache.get(dir, None)
if importer is None:
try:
importer = VFSImporter(dir)
except ImportError:
continue
sys.path_importer_cache[dir] = importer
try:
loader = importer.find_module(fullname)
if not loader:
continue
except ImportError:
continue
loaders.append(loader)
if not loaders:
return None
return VFSSharedLoader(loaders, reload = reload)
def getLoadedDirname(self, mod):
""" Returns the directory name that the indicated
conventionally-loaded module must have been loaded from. """
if not hasattr(mod, __file__) or mod.__file__ is None:
return None
fullname = mod.__name__
dirname = Filename.fromOsSpecific(mod.__file__).getDirname()
parentname = None
basename = fullname
if '.' in fullname:
parentname, basename = fullname.rsplit('.', 1)
path = None
if parentname:
parent = sys.modules[parentname]
path = parent.__path__
if path is None:
path = sys.path
for dir in path:
pdir = str(Filename.fromOsSpecific(dir))
if pdir + '/' + basename == dirname:
# We found it!
return dir
# Couldn't figure it out.
return None
class VFSSharedLoader:
""" The second part of VFSSharedImporter, this imports a list of
packages and combines them. """
def __init__(self, loaders, reload):
self.loaders = loaders
self.reload = reload
def load_module(self, fullname):
#print >>sys.stderr, "shared load_module(%s), loaders = %s" % (fullname, map(lambda l: l.dir_path, self.loaders))
mod = None
message = None
path = []
vfs_shared_path = []
if self.reload:
mod = sys.modules[fullname]
path = mod.__path__ or []
vfs_shared_path = getattr(mod, '_vfs_shared_path', [])
for loader in self.loaders:
try:
mod = loader.load_module(fullname, loadingShared = True)
except ImportError:
etype, evalue, etraceback = sys.exc_info()
print "%s on %s: %s" % (etype.__name__, fullname, evalue)
if not message:
message = '%s: %s' % (fullname, evalue)
continue
for dir in getattr(mod, '__path__', []):
if dir not in path:
path.append(dir)
if mod is None:
# If all of them failed to load, raise ImportError.
raise ImportError, message
# If at least one of them loaded successfully, return the
# union of loaded modules.
mod.__path__ = path
# Also set this special symbol, which records that this is a
# shared package, and also lists the paths we have already
# loaded.
mod._vfs_shared_path = vfs_shared_path + [l.dir_path for l in self.loaders]
return mod
_registered = False
def register():
""" Register the VFSImporter on the path_hooks, if it has not
already been registered, so that future Python import statements
will vector through here (and therefore will take advantage of
Panda's virtual file system). """
global _registered
if not _registered:
_registered = True
sys.path_hooks.insert(0, VFSImporter)
sys.meta_path.insert(0, VFSSharedImporter())
# Blow away the importer cache, so we'll come back through the
# VFSImporter for every folder in the future, even those
# folders that previously were loaded directly.
sys.path_importer_cache = {}
def reloadSharedPackage(mod):
""" Reloads the specific module as a shared package, adding any
new directories that might have appeared on the search path. """
fullname = mod.__name__
path = None
if '.' in fullname:
parentname = fullname.rsplit('.', 1)[0]
parent = sys.modules[parentname]
path = parent.__path__
importer = VFSSharedImporter()
loader = importer.find_module(fullname, path = path, reload = True)
if loader:
loader.load_module(fullname)
# Also force any child packages to become shared packages, if
# they aren't already.
for basename, child in mod.__dict__.items():
if isinstance(child, types.ModuleType):
childname = child.__name__
if childname == fullname + '.' + basename and \
hasattr(child, '__path__') and \
childname not in sharedPackages:
sharedPackages[childname] = True
reloadSharedPackage(child)
def reloadSharedPackages():
""" Walks through the sharedPackages list, and forces a reload of
any modules on that list that have already been loaded. This
allows new directories to be added to the search path. """
#print >> sys.stderr, "reloadSharedPackages, path = %s, sharedPackages = %s" % (sys.path, sharedPackages.keys())
for fullname in sharedPackages.keys():
mod = sys.modules.get(fullname, None)
if not mod:
continue
reloadSharedPackage(mod)
|
{
"content_hash": "cba398bd7fedeeb3b248bc2443049a69",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 121,
"avg_line_length": 36.38358778625954,
"alnum_prop": 0.5791765014424338,
"repo_name": "matthiascy/panda3d",
"id": "16b1ac4becc2e4417ef33dd79898ee0af369a06b",
"size": "19065",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "direct/src/showbase/VFSImporter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6621371"
},
{
"name": "C++",
"bytes": "31443135"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "8017"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "1461"
},
{
"name": "Objective-C",
"bytes": "15068"
},
{
"name": "Objective-C++",
"bytes": "298226"
},
{
"name": "Pascal",
"bytes": "467818"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "337716"
},
{
"name": "Python",
"bytes": "5840481"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
}
|
import optproblems.cec2005
import numpy as np
import time
from POA import *
import os
if __name__ == "__main__":
dim = 10
repeats = 10
evaluations = 10000*dim
parties = 6
members = 5
candidates = 2
if not os.path.exists('results'):
os.makedirs('results')
if not os.path.exists('convergence'):
os.makedirs('convergence')
np.random.seed(10)
f2 = optproblems.cec2005.F2(dim)
time1 = time.time()
results = np.array([POA(f2, dim=dim, max_eval=evaluations, nparties=parties,
nmembers=members, ncandidates=candidates, lower_bound=-100,
upper_bound=100) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/POA-results-10-2.txt", "w") as file:
print("F2: Shifted Schwefel's Problem 1.2", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/POA-convergence-10-2.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f7 = optproblems.cec2005.F7(dim)
time1 = time.time()
results = np.array([POA(f7, dim=dim, max_eval=evaluations, nparties=parties,
nmembers=members, ncandidates=candidates, lower_bound=-999999,
upper_bound=999999, initial_population_lower_bound=0,
initial_population_upper_bound=600) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/POA-results-10-7.txt", "w") as file:
print("F7: Shifted Rotated Griewank's Function without Bounds", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/POA-convergence-10-7.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f13 = optproblems.cec2005.F13(dim)
time1 = time.time()
results = np.array([POA(f13, dim=dim, max_eval=evaluations, nparties=parties,
nmembers=members, ncandidates=candidates, lower_bound=-3,
upper_bound=1) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/POA-results-10-13.txt", "w") as file:
print("F13: Expanded Extended Griewank's plus Rosenbrock's Function (F8F2)", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/POA-convergence-10-13.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f17 = optproblems.cec2005.F17(dim)
time1 = time.time()
results = np.array([POA(f17, dim=dim, max_eval=evaluations, nparties=parties,
nmembers=members, ncandidates=candidates, lower_bound=-5,
upper_bound=5) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/POA-results-10-17.txt", "w") as file:
print("F17: Rotated Hybrid Composition Function with Noise in Fitness", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/POA-convergence-10-17.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f18 = optproblems.cec2005.F18(dim)
time1 = time.time()
results = np.array([POA(f18, dim=dim, max_eval=evaluations, nparties=parties,
nmembers=members, ncandidates=candidates, lower_bound=-5,
upper_bound=5) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/POA-results-10-18.txt", "w") as file:
print("F18: Rotated Hybrid Composition Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/POA-convergence-10-18.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
|
{
"content_hash": "21d4bc4b59796bac4e9163837216bae8",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 97,
"avg_line_length": 35.76100628930818,
"alnum_prop": 0.5861765740415055,
"repo_name": "JJSrra/Research-SocioinspiredAlgorithms",
"id": "f7a7a1526d4a6a08c76e3cd615dc58c4d266b5ef",
"size": "5686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "POA/POAbenchmark10-2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "357836"
},
{
"name": "Shell",
"bytes": "23920"
}
],
"symlink_target": ""
}
|
from datapackage_pipelines_knesset.common.processors.base_processor import BaseProcessor
import logging
class DumpFields(BaseProcessor):
def __init__(self, *args, **kwargs):
super(DumpFields, self).__init__(*args, **kwargs)
self._schema = self._parameters.get("schema")
def _process(self, datapackage, resources):
return self._process_filter(datapackage, resources)
def _filter_row(self, row, **kwargs):
fields = {}
skip = False
for field in self._schema["fields"]:
value = row[field["from"]] if "from" in field else field["const"]
if not value and "default" in field:
value = field["default"]
if not value and "required" in field:
skip = True
fields[field["name"]] = value
if not skip:
yield fields
if __name__ == "__main__":
DumpFields.main()
|
{
"content_hash": "244b4515fd3b0959502d71aced49884c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 27.848484848484848,
"alnum_prop": 0.5854189336235038,
"repo_name": "hasadna/knesset-data-pipelines",
"id": "caff143f1f967b5929f1be2f836edab2e11f21b4",
"size": "919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "datapackage_pipelines_knesset/common/processors/dump_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1469"
},
{
"name": "Jupyter Notebook",
"bytes": "4163927"
},
{
"name": "Python",
"bytes": "294483"
},
{
"name": "Shell",
"bytes": "1601"
}
],
"symlink_target": ""
}
|
"""
Sales: Hardtree module definition
"""
PROPERTIES = {
'title': 'Sales & Stock',
'details': 'Sales and Client Relationship Management',
'url': '/sales/',
'system': False,
'type': 'major'
}
URL_PATTERNS = [
'^/sales/',
]
# Temporarily disabled cron due to failing .currency setting
# from treeio.sales.cron import subscription_check
# CRON = [subscription_check]
|
{
"content_hash": "ed47f0cf4ce1bd55f217b6014fc26287",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 20.57894736842105,
"alnum_prop": 0.649616368286445,
"repo_name": "thiagof/treeio",
"id": "eb88abb6ac63bac07550f64a9ff40246fbcbd3ac",
"size": "504",
"binary": false,
"copies": "3",
"ref": "refs/heads/2.0",
"path": "treeio/sales/hmodule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400811"
},
{
"name": "HTML",
"bytes": "1508469"
},
{
"name": "JavaScript",
"bytes": "2137383"
},
{
"name": "Nginx",
"bytes": "2335"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2936580"
},
{
"name": "Shell",
"bytes": "17020"
}
],
"symlink_target": ""
}
|
__author__ = 'jh'
__copyright__ = 'www.codeh.de'
from django.views.generic.list import ListView
from django.shortcuts import redirect
from .models import AlertMessage
class AlertsView(ListView):
template_name = 'alert-list.html'
def get_queryset(self):
self.alerts = AlertMessage.objects.all().order_by('-id')
def get_context_data(self, **kwargs):
context = super(AlertsView, self).get_context_data(**kwargs)
context['alerts'] = self.alerts
return context
def AlertView(request, alert_id):
alert = AlertMessage.objects.get(id=alert_id)
redirect_url = alert.url
alert.delete()
return redirect(redirect_url)
|
{
"content_hash": "39ecda30b33c7f6db54a435611c8be31",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 68,
"avg_line_length": 24.392857142857142,
"alnum_prop": 0.6749633967789166,
"repo_name": "jhcodeh/my-doku",
"id": "57da5af98acbab6eb08526ba50d143174e2e299b",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_doku_application/alert_system/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24760"
},
{
"name": "JavaScript",
"bytes": "948"
},
{
"name": "Python",
"bytes": "19428"
}
],
"symlink_target": ""
}
|
"""
Views for managing Quantum networks.
"""
import logging
import warnings
from django import shortcuts
from django import template
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from horizon import api
from horizon.dashboards.nova.networks.forms import (CreateNetwork,
DeleteNetwork, RenameNetwork, AttachPort, CreatePort, DeletePort,
DetachPort, TogglePort)
LOG = logging.getLogger(__name__)
def index(request):
tenant_id = request.user.tenant_id
delete_form, delete_handled = DeleteNetwork.maybe_handle(request)
networks = []
instances = []
try:
networks_list = api.quantum_list_networks(request)
details = []
for network in networks_list['networks']:
net_stats = _calc_network_stats(request, network['id'])
# Get network details like name and id
details = api.quantum_network_details(request, network['id'])
networks.append({
'name': details['network']['name'],
'id': network['id'],
'total': net_stats['total'],
'available': net_stats['available'],
'used': net_stats['used'],
'tenant': tenant_id})
except Exception, e:
LOG.exception("Unable to get network list.")
messages.error(request,
_('Unable to get network list: %s') % e.message)
return shortcuts.render(request,
'nova/networks/index.html', {
'networks': networks,
'delete_form': delete_form})
def create(request):
network_form, handled = CreateNetwork.maybe_handle(request)
if handled:
return shortcuts.redirect('horizon:nova:networks:index')
return shortcuts.render(request,
'nova/networks/create.html',
{'network_form': network_form})
def detail(request, network_id):
tenant_id = request.user.tenant_id
delete_port_form, delete_handled = DeletePort.maybe_handle(request,
initial={"network": network_id})
detach_port_form, detach_handled = DetachPort.maybe_handle(request,
initial={"network": network_id})
toggle_port_form, port_toggle_handled = TogglePort.maybe_handle(request,
initial={"network": network_id})
network = {}
network['id'] = network_id
try:
network_details = api.quantum_network_details(request, network_id)
network['name'] = network_details['network']['name']
network['ports'] = _get_port_states(request, network_id)
except Exception, e:
LOG.exception("Unable to get network details.")
messages.error(request,
_('Unable to get network details: %s') % e.message)
return shortcuts.redirect("horizon:nova:networks:index")
return shortcuts.render(request,
'nova/networks/detail.html',
{'network': network,
'tenant': tenant_id,
'delete_port_form': delete_port_form,
'detach_port_form': detach_port_form,
'toggle_port_form': toggle_port_form})
def rename(request, network_id):
network_details = api.quantum_network_details(request, network_id)
network = network_details['network']
rename_form, handled = RenameNetwork.maybe_handle(request, initial={
'network': network['id'],
'new_name': network['name']})
if handled:
return shortcuts.redirect('horizon:nova:networks:index')
return shortcuts.render(request,
'nova/networks/rename.html', {
'network': network,
'rename_form': rename_form})
def _get_port_states(request, network_id):
"""
Helper method to find port states for a network
"""
network_ports = []
# Get all vifs for comparison with port attachments
vifs = api.get_vif_ids(request)
# Get all ports on this network
ports = api.quantum_list_ports(request, network_id)
for port in ports['ports']:
port_details = api.quantum_port_details(request,
network_id, port['id'])
# Get port attachments
port_attachment = api.quantum_port_attachment(request,
network_id, port['id'])
# Find instance the attachment belongs to
connected_instance = None
if port_attachment['attachment']:
for vif in vifs:
if str(vif['id']) == str(port_attachment['attachment']['id']):
connected_instance = vif['id']
break
network_ports.append({
'id': port_details['port']['id'],
'state': port_details['port']['state'],
'attachment': port_attachment['attachment'],
'instance': connected_instance})
return network_ports
def _calc_network_stats(request, network_id):
"""
Helper method to calculate statistics for a network
"""
# Get all ports statistics for the network
total = 0
available = 0
used = 0
ports = api.quantum_list_ports(request, network_id)
for port in ports['ports']:
total += 1
# Get port attachment
port_attachment = api.quantum_port_attachment(request,
network_id, port['id'])
if port_attachment['attachment']:
used += 1
else:
available += 1
return {'total': total, 'used': used, 'available': available}
def port_create(request, network_id):
create_form, handled = CreatePort.maybe_handle(request, initial={
"network": network_id})
if handled:
return shortcuts.redirect('horizon:nova:networks:detail',
network_id=network_id)
return shortcuts.render(request,
'nova/ports/create.html', {
'network_id': network_id,
'create_form': create_form})
def port_attach(request, network_id, port_id):
attach_form, handled = AttachPort.maybe_handle(request, initial={
"network": network_id,
"port": port_id})
if handled:
return shortcuts.redirect('horizon:nova:networks:detail',
network_id=network_id)
return shortcuts.render(request,
'nova/ports/attach.html', {
'network': network_id,
'port': port_id,
'attach_form': attach_form})
|
{
"content_hash": "c33ce6a0ee45a6e68efb065b1cae6ed5",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 78,
"avg_line_length": 37.18556701030928,
"alnum_prop": 0.5381203215968949,
"repo_name": "usc-isi/horizon-old",
"id": "747511a85dd8ccec75ce8083af3773acd3db151e",
"size": "8023",
"binary": false,
"copies": "1",
"ref": "refs/heads/hpc-horizon",
"path": "horizon/horizon/dashboards/nova/networks/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "42576"
},
{
"name": "Python",
"bytes": "228535"
},
{
"name": "Shell",
"bytes": "422"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat([t4, t5], 0)
t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default():
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
grads = gradients.gradients(w, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The gradient of tf.identity should pass the value through unchanged.
# A previous version of the code did this only for tf.Tensor, not
# tf.IndexedSlices.
self.assertEqual(dx, dy)
def testNonDifferentiableSwitchInWhileLoop(self):
with ops.Graph().as_default():
v = array_ops.placeholder(dtypes.float32, [])
def _Step(i, a, ta):
a += math_ops.cast(v, dtypes.int32)
return (i + 1, a, ta.write(i, a))
n = 4
i, _, ta = control_flow_ops.while_loop(
lambda i, *_: i < n,
_Step, [0, 0, tensor_array_ops.TensorArray(
dtypes.int32, size=n)])
target = ta.read(i - 1)
grad, = gradients.gradients(target, v)
self.assertIsNone(grad)
class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
with self.test_session() as sess:
return sess.run(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
with self.test_session() as sess:
self.assertAllEqual([40.0], sess.run(grads)[0])
self.assertAllEqual([10.0], sess.run(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class PreventGradientTest(test_util.TensorFlowTestCase):
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
_ = gradients.gradients(out, inp)
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class HessianTest(test_util.TensorFlowTestCase):
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.test_session(use_gpu=True):
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = hess.eval()
self.assertAllClose(hess_value, hess_actual)
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testIndexedSlicesToTensorList(self):
with self.test_session():
numpy_list = []
dense_list = []
sparse_list = []
for _ in range(3):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
numpy_list.append(np_val)
dense_list.append(c)
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory." in
str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory." in
str(w[0].message))
class OnlyRealGradientsTest(test_util.TensorFlowTestCase):
def testRealOnly(self):
x = constant_op.constant(7+3j, dtype=dtypes.complex64)
y = math_ops.square(x)
with self.assertRaisesRegexp(
TypeError,
r"Gradients of complex tensors must set grad_ys "
r"\(y\.dtype = tf\.complex64\)"):
gradients.gradients(y, x)
class CompilationEnabledInGradientTest(test_util.TensorFlowTestCase):
def testCompilationInGradient(self):
with self.test_session():
x = constant_op.constant(3)
y_nc = math_ops.add(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.add(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x_grads.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertEqual(True, cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "No attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (4 * x)
self.assertAllClose(4, x_grads.eval())
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "45567b6cd8ee5ea1abe6fa8e8c0c0ee4",
"timestamp": "",
"source": "github",
"line_count": 598,
"max_line_length": 84,
"avg_line_length": 38.10200668896321,
"alnum_prop": 0.6381391266183893,
"repo_name": "handroissuazo/tensorflow",
"id": "453313b4ac3223666d4277d99a909c05a7547cfc",
"size": "23474",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/gradients_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "177136"
},
{
"name": "C++",
"bytes": "20579965"
},
{
"name": "CMake",
"bytes": "120039"
},
{
"name": "CSS",
"bytes": "7005"
},
{
"name": "Go",
"bytes": "103991"
},
{
"name": "HTML",
"bytes": "582790"
},
{
"name": "Java",
"bytes": "278667"
},
{
"name": "JavaScript",
"bytes": "21416"
},
{
"name": "Jupyter Notebook",
"bytes": "399586"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32007"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "187378"
},
{
"name": "Python",
"bytes": "20517015"
},
{
"name": "Shell",
"bytes": "329427"
},
{
"name": "TypeScript",
"bytes": "765149"
}
],
"symlink_target": ""
}
|
import dbus.service
from gi.repository import GObject as gobject
from oacids.helpers.dbus_props import GPropSync, Manager, WithProperties
from ifaces import BUS, IFACE, PATH, INTROSPECTABLE_IFACE, TRIGGER_IFACE, OPENAPS_IFACE
from oacids.schedules import utils
import datetime
from dateutil import parser, rrule, tz
import recurrent
from collections import deque, defaultdict
import hashlib
import threading
class Trigger (GPropSync):
OWN_IFACE = IFACE + '.Trigger'
PROP_SIGS = {
'name': 's'
, 'obj': 's'
, 'expected': 's'
, 'phases': 's'
, 'rrule': 's'
, 'id': 's'
, 'Status': 's'
, 'countdown': 'd'
}
name = gobject.property(type=str)
expected = gobject.property(type=str)
obj = gobject.property(type=str)
phases = gobject.property(type=str)
rrule = gobject.property(type=str)
trigger = gobject.property(type=str)
_states = ['Armed', 'Running', 'Success', 'Error', 'Done', 'Gone' ]
_error = [ ]
@gobject.property(type=float)
def countdown (self):
return (self.when - datetime.datetime.now( )).total_seconds( )
@gobject.property(type=str)
def id (self):
return self.armed.hashed
@gobject.property(type=str)
def Status (self):
print "STATUS", self.id, self._states[self._status]
return self._states[self._status]
def __init__ (self, path, manager=None, props=None, armed=None):
self.manager = manager
bus = manager.bus
self.bus = bus or dbus.SessionBus( )
self.path = path
self.when = armed.when
self.armed = armed
self._status = 0
GPropSync.__init__(self, self.bus.get_connection( ), path)
# WithProperties.__init__(self, self.bus.get_connection( ), path)
self.attrs = props
self.attrs['trigger'] = self.armed.hashed
if props:
for key in props:
self.set_property(key, props[key])
self.sync_all_props( )
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='')
def Armed (self):
pass
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='')
def Running (self):
self._status = 1
pass
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='')
def Fire (self):
now = datetime.datetime.now( )
old = self.Status
self._status = 1
new = self.Status
self.PropertiesChanged(self.OWN_IFACE, dict(Status=new), dict(Status=old))
print "FIRED", now.isoformat( ), self.when.isoformat( ), self.name, self.path
self.manager.Trigger("Queue", self.path)
self.manager.master.background.Do(self.attrs, ack=self.on_success, error=self.on_error)
# self._status += 1
def on_error (self):
print "PHASED ON ERROR"
# self.Done( )
self.Error( )
pass
def on_success (self, results):
print "RESULTS SUCCESS PHASE", results
self._status = 2
# self.Success( )
@dbus.service.signal(dbus_interface=OWN_IFACE, signature='')
def Success (self):
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Success'), dict(Status='Fired'))
old_status = self.Status
self._status = 2
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Success'), dict(Status=old_status))
# self.Done( )
pass
@dbus.service.signal(dbus_interface=OWN_IFACE, signature='')
def Error (self):
self._status = 3
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Error'), dict(Status='Fired'))
# self.Done( )
pass
@dbus.service.signal(dbus_interface=OWN_IFACE, signature='')
def Done (self):
old_status = self.Status
# self._status += 1
self._status = 4
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Done'), dict(Status=old_status))
# self.Finish( )
pass
@dbus.service.signal(dbus_interface=OWN_IFACE, signature='')
def Finish (self):
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Finish'), dict(Status='Done'))
# self.Remove( )
pass
def phase (self, phase):
phases = {
'Running': self.Running
, 'Done': self.Done
, 'Error': self.Error
, 'Finish': self.Finish
, 'Success': self.Success
, 'Remove': self.Remove
}
func = phases.get(phase, None)
print "PHASE", phase, func, self.id
if func:
func( )
@dbus.service.signal(dbus_interface=OWN_IFACE, signature='')
def Remove (self):
self._status = 5
self.PropertiesChanged(self.OWN_IFACE, dict(Status='Remove'), dict())
print "GOT REMOVE SIGNAL", self
pass
class Armable (object):
def __init__ (self, when, remote):
self.remote = remote
self.when = when
self.hashed = hashlib.sha224(remote.path + remote.item.name + when.isoformat( )).hexdigest( )
def __hash__ (self):
return hash((self.when.isoformat( ), self.remote.item.name, self.remote.path))
# return hash(self.hashed)
def __eq__ (self, other):
return other.hashed == self.hashed
def __cmp__ (self, other):
if other.hashed == self.hashed:
return 0
a = ''.join([self.when.isoformat( ), self.remote.item.name, self.remote.path])
b = ''.join([other.when.isoformat( ), other.remote.item.name, other.remote.path])
if a > b:
return 1
else:
return -1
def cleanup (self):
print "cleaning up"
self.manager.schedules.pop(self)
if self.trigger:
def cleaned ( ):
self.manager.Trigger("Cleanup", self.trigger.path)
self.manager.InterfacesRemoved(self.trigger.path, { Trigger.OWN_IFACE: self.props })
self.remote.bus.remove_signal_receiver(self.cleanup, "Remove", dbus_interface=Trigger.OWN_IFACE, bus_name=BUS, path=self.trigger.path)
self.trigger.remove_from_connection( )
gobject.timeout_add(500, cleaned)
def update_phase (self, signal, props):
if props['expected'] == self.props['expected']:
print "UPDATE PHASE", signal, props
def arm (self, manager):
self.manager = manager
props = dict(obj=self.remote.path, name=self.remote.item.name, expected=self.when.isoformat( ), **self.remote.item.fields)
self.props = props
new_path = PATH + '/Scheduler/Armed/' + self.hashed
delay_ms = (self.when - datetime.datetime.now( )).total_seconds( ) * 1000
self.remote.bus.add_signal_receiver(self.cleanup, "Remove", dbus_interface=Trigger.OWN_IFACE, bus_name=BUS, path=new_path)
# manager.bus.add_signal_receiver(self.attrs, ack=self.on_success, error=self.on_error)
trigger = None
try:
trigger = Trigger(new_path, manager, props, self)
if trigger:
trigger.Armed( )
self.manager.Trigger("Arming", trigger.path)
self.trigger = trigger
print "DELAYING", delay_ms
gobject.timeout_add(delay_ms, trigger.Fire)
manager.InterfacesAdded(trigger.path, { Trigger.OWN_IFACE: props })
self.manager.Trigger("Armed", trigger.path)
except:
print "already exited?"
raise
finally:
pass
return trigger
class Scheduler (GPropSync, Manager):
OWN_IFACE = IFACE + '.Scheduler'
PROP_SIGS = {
'TaskWithin': 'd'
, 'MaxTasksAhead': 'u'
}
TaskWithin = gobject.property(type=float)
MaxTasksAhead = gobject.property(type=int, default=5)
def __init__ (self, bus, ctrl):
self.bus = bus
self.path = PATH + '/Scheduler'
self.master = ctrl
Manager.__init__(self, self.path, bus)
self.TaskWithin = (self.master.heartbeat.interval * self.MaxTasksAhead) / 1000
self.init_managed( )
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='so')
def Trigger (self, status, path):
pass
def Scan (self):
candidates = self.Poll(within_seconds=self.TaskWithin)
found = 0
added = 0
for candidate in candidates:
# self.enqueue(candidate)
# print candidate
# print self.schedules
is_armed = False
other = self.schedules.get(candidate, None)
if other is None:
try:
self.schedules[candidate] = candidate.arm(self)
added += 1
is_armed = True
except Exception, e:
print "what happened?", e
pass
else:
found += 1
pass
# print "already scheduled", candidate
txt = { True: "ARMED", False: "skipped" }
# print txt.get(is_armed), candidate.when, candidate.remote.item.name, candidate.remote.path
summary = """{dt}: tracking {num_schedules} managed, added {added} new, skipped {found} upcoming duplicates"""
print summary.format(thread=threading.currentThread( ).ident, dt=datetime.datetime.now( ).isoformat( ), num_schedules=len(self.schedules), added=added, found=found)
return
def Poll (self, within_seconds=None):
# print "poll within", within_seconds
results = [ ]
candidates = self.master.openaps.things.get('schedules', [ ])
now = datetime.datetime.now( )
# print "polling schedules", len(candidates), now.isoformat( ), 'for', self.MaxTasksAhead, 'MaxTasksAhead'
for configured in candidates:
# print "SCHEDULE", configured.item.fields
# spec = recurrent.parse(configured.item.fields['rrule'], now=self.since)
spec = configured.item.fields['rrule']
rr = rrule.rrulestr(spec, dtstart=self.since)
# print configured.item.fields['rrule'], spec
upcoming = rr.after(now)
# print "next", upcoming.isoformat( )
# XXX: bug in making: need to fill out all events before within_seconds as well.
# if (upcoming - now).total_seconds( ) <= within_seconds:
for upcoming in iter_triggers(upcoming, rr, within_seconds):
# print "ARM THING", configured.path
# print "ATTEMPT ARM", configured.item.name, configured.path, spec
# self.enqueue(upcoming, configured)
trigger = Armable(upcoming, configured)
# exists = self.schedules[(upcoming, configured.item.name)]
results.append(trigger)
return results
pass
def GetTriggerById (self, hashed):
for key, trigger in self.schedules.items( ):
if trigger.id == hashed:
return trigger
def enqueue (self, upcoming, event):
name = event.item.name
obj_path = event.path
isostr = upcoming.isoformat( )
path = "%s/Trigger%s%s" % (self.path, name_title, len(self.schedules))
def init_managed (self):
self.since = utils.datetime.datetime.fromtimestamp(self.master.heartbeat.started_at)
# self.add_signal_handler("heartbeat", self.Scan, dbus_interface=OPENAPS_IFACE + ".Heartbeat")
print "SUBSCRIBING to Heartbeat"
self.bus.add_signal_receiver(self.Scan, "Heartbeat", dbus_interface=OPENAPS_IFACE + ".Heartbeat", bus_name=BUS, path=self.master.heartbeat.path)
# self.schedules = defaultdict(dict)
self.schedules = { }
def get_all_managed (self):
paths = dict( )
for thing in self.schedules:
# print thing, thing.trigger.OWN_IFACE
# print thing.trigger.OWN_IFACE, thing.trigger
spec = { thing.trigger.OWN_IFACE: dict(**thing.trigger.GetAll(thing.trigger.OWN_IFACE)) }
paths[thing.trigger.path] = spec
return paths
@dbus.service.method(dbus_interface=OWN_IFACE,
in_signature='a{sv}', out_signature='')
# , async_callbacks=('ack', 'error'))
# def Create (self, props, ack=None, error=None):
def Create (self, props):
path = "%s/Trigger%s" % (self.path, len(self.schedules))
new_schedule = Trigger(path, self.bus, props)
print "NEW SCHEDULE", new_schedule
self.schedules.append(new_schedule)
self.InterfacesAdded(path, { TRIGGER_IFACE: props })
def iter_triggers (upcoming, rule, within_seconds):
end = upcoming + datetime.timedelta(seconds=within_seconds)
while upcoming < end:
yield upcoming
upcoming = rule.after(upcoming)
|
{
"content_hash": "f32f081e1f4bafd4d6a6bc2573dbece3",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 168,
"avg_line_length": 35.91692307692308,
"alnum_prop": 0.6497901139381479,
"repo_name": "openaps/oacids",
"id": "80a0a845f4c1891eba4454a2d81d1fff2f6a1066",
"size": "11674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oacids/exported/scheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "60"
},
{
"name": "Python",
"bytes": "54599"
}
],
"symlink_target": ""
}
|
"""
Plugins to add behavior to mpld3 charts
=======================================
Plugins are means of adding additional javascript features to D3-rendered
matplotlib plots. A number of plugins are defined here; it is also possible
to create nearly any imaginable behavior by defining your own custom plugin.
"""
__all__ = ['connect', 'clear', 'get_plugins', 'PluginBase',
'Reset', 'Zoom', 'BoxZoom',
'PointLabelTooltip', 'PointHTMLTooltip', 'LineLabelTooltip',
'MousePosition']
import collections
import json
import uuid
import matplotlib
from .utils import get_id
def get_plugins(fig):
"""Get the list of plugins in the figure"""
connect(fig)
return fig.mpld3_plugins
def connect(fig, *plugins):
"""Connect one or more plugins to a figure
Parameters
----------
fig : matplotlib Figure instance
The figure to which the plugins will be connected
*plugins :
Additional arguments should be plugins which will be connected
to the figure.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), '-k')
>>> plugins.connect(fig, plugins.LineLabelTooltip(lines[0]))
"""
if not isinstance(fig, matplotlib.figure.Figure):
raise ValueError("plugins.connect: first argument must be a figure")
if not hasattr(fig, 'mpld3_plugins'):
fig.mpld3_plugins = DEFAULT_PLUGINS[:]
for plugin in plugins:
fig.mpld3_plugins.append(plugin)
def clear(fig):
"""Clear all plugins from the figure, including defaults"""
fig.mpld3_plugins = []
class PluginBase(object):
def get_dict(self):
return self.dict_
def javascript(self):
if hasattr(self, "JAVASCRIPT"):
if hasattr(self, "js_args_"):
return self.JAVASCRIPT.render(self.js_args_)
else:
return self.JAVASCRIPT
else:
return ""
def css(self):
if hasattr(self, "css_"):
return self.css_
else:
return ""
class Reset(PluginBase):
"""A Plugin to add a reset button"""
dict_ = {"type": "reset"}
class MousePosition(PluginBase):
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MousePosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt=".3g"):
self.dict_ = {"type": "mouseposition",
"fontsize": fontsize,
"fmt": fmt}
class Zoom(PluginBase):
"""A Plugin to add zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class BoxZoom(PluginBase):
"""A Plugin to add box-zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "boxzoom",
"button": button,
"enabled": enabled}
class PointLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : array or None
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, PointLabelTooltip(points[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LineLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over a line.
Parameters
----------
line : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), 'o')
>>> plugins.connect(fig, LineLabelTooltip(lines[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, label=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
self.dict_ = {"type": "tooltip",
"id": get_id(points),
"labels": label if label is None else [label],
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LinkedBrush(PluginBase):
"""A Plugin to enable linked brushing between plots
Parameters
----------
points : matplotlib Collection or Line2D object
A representative of the scatter plot elements to brush.
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. default=True.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpld3 import fig_to_html, plugins
>>> X = np.random.random((3, 100))
>>> fig, ax = plt.subplots(3, 3)
>>> for i in range(2):
... for j in range(2):
... points = ax[i, j].scatter(X[i], X[j])
>>> plugins.connect(fig, LinkedBrush(points))
>>> fig_to_html(fig)
Notes
-----
Notice that in the above example, only one of the four sets of points is
passed to the plugin. This is all that is needed: for the sake of efficient
data storage, mpld3 keeps track of which plot objects draw from the same
data.
Also note that for the linked brushing to work correctly, the data must
not contain any NaNs. The presence of NaNs makes the different data views
have different sizes, so that mpld3 is unable to link the related points.
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrush",
"button": button,
"enabled": enabled,
"id": get_id(points, suffix)}
class PointHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, PointHTMLTooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "htmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
class LineHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
The label for the line, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10))
>>> label = '<h1>line {title}</h1>'.format(title='A')
>>> plugins.connect(fig, LineHTMLTooltip(lines[0], label))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("linehtmltooltip", LineHTMLTooltip);
LineHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
LineHTMLTooltip.prototype.constructor = LineHTMLTooltip;
LineHTMLTooltip.prototype.requiredProps = ["id"];
LineHTMLTooltip.prototype.defaultProps = {label:null,
hoffset:0,
voffset:10};
function LineHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
var label = this.props.label
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(label)
.style("visibility", "visible");
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");})
};
"""
def __init__(self, line, label=None,
hoffset=0, voffset=10,
css=None):
self.line = line
self.label = label
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
self.dict_ = {"type": "linehtmltooltip",
"id": get_id(line),
"label": label,
"hoffset": hoffset,
"voffset": voffset}
class InteractiveLegendPlugin(PluginBase):
"""A plugin for an interactive legends.
Inspired by http://bl.ocks.org/simzou/6439398
Parameters
----------
plot_elements : iterable of matplotlib elements
the elements to associate with a given legend items
labels : iterable of strings
The labels for each legend element
ax : matplotlib axes instance, optional
the ax to which the legend belongs. Default is the first
axes. The legend will be plotted to the right of the specified
axes
alpha_unsel : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is unselected.
Default is 0.2
alpha_over : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is overlaid.
Default is 1 (no effect), 1.5 works nicely !
start_visible : boolean, optional (could be a list of booleans)
defines if objects should start selected on not.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> N_paths = 5
>>> N_steps = 100
>>> x = np.linspace(0, 10, 100)
>>> y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
>>> y = y.cumsum(1)
>>> fig, ax = plt.subplots()
>>> labels = ["a", "b", "c", "d", "e"]
>>> line_collections = ax.plot(x, y.T, lw=4, alpha=0.6)
>>> interactive_legend = plugins.InteractiveLegendPlugin(line_collections,
... labels,
... alpha_unsel=0.2,
... alpha_over=1.5,
... start_visible=True)
>>> plugins.connect(fig, interactive_legend)
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_unsel":0.2,
"alpha_over":1.0,
"start_visible":true}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_unsel = this.props.alpha_unsel;
var alpha_over = this.props.alpha_over;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = this.props.start_visible[i]; // should become be setable from python side
legendItems.push(obj);
set_alphas(obj, false);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height", 10)
.attr("width", 25)
.attr("x", ax.width + ax.position[0] + 25)
.attr("y",function(d,i) {
return ax.position[1] + i * 25 + 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click).on('mouseover', over).on('mouseout', out);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width + ax.position[0] + 25 + 40;})
.attr("y", function(d,i) {
return ax.position[1] + i * 25 + 10 + 10 - 1;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
set_alphas(d, false);
};
// specify the action on legend overlay
function over(d,i){
set_alphas(d, true);
};
// specify the action on legend overlay
function out(d,i){
set_alphas(d, false);
};
// helper function for setting alphas
function set_alphas(d, is_over){
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
var current_alpha = d.mpld3_elements[i].props.alpha;
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("stroke-width", is_over ?
alpha_over * d.mpld3_elements[i].props.edgewidth : d.mpld3_elements[i].props.edgewidth);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
var current_alpha = d.mpld3_elements[i].props.alphas[0];
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("fill-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel));
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
css_ = """
.legend-box {
cursor: pointer;
}
"""
def __init__(self, plot_elements, labels, ax=None,
alpha_unsel=0.2, alpha_over=1., start_visible=True):
self.ax = ax
if ax:
ax = get_id(ax)
# start_visible could be a list
if isinstance(start_visible, bool):
start_visible = [start_visible] * len(labels)
elif not len(start_visible) == len(labels):
raise ValueError("{} out of {} visible params has been set"
.format(len(start_visible), len(labels)))
mpld3_element_ids = self._determine_mpld3ids(plot_elements)
self.mpld3_element_ids = mpld3_element_ids
self.dict_ = {"type": "interactive_legend",
"element_ids": mpld3_element_ids,
"labels": labels,
"ax": ax,
"alpha_unsel": alpha_unsel,
"alpha_over": alpha_over,
"start_visible": start_visible}
def _determine_mpld3ids(self, plot_elements):
"""
Helper function to get the mpld3_id for each
of the specified elements.
"""
mpld3_element_ids = []
# There are two things being done here. First,
# we make sure that we have a list of lists, where
# each inner list is associated with a single legend
# item. Second, in case of Line2D object we pass
# the id for both the marker and the line.
# on the javascript side we filter out the nulls in
# case either the line or the marker has no equivalent
# D3 representation.
for entry in plot_elements:
ids = []
if isinstance(entry, collections.Iterable):
for element in entry:
mpld3_id = get_id(element)
ids.append(mpld3_id)
if isinstance(element, matplotlib.lines.Line2D):
mpld3_id = get_id(element, 'pts')
ids.append(mpld3_id)
else:
ids.append(get_id(entry))
if isinstance(entry, matplotlib.lines.Line2D):
mpld3_id = get_id(entry, 'pts')
ids.append(mpld3_id)
mpld3_element_ids.append(ids)
return mpld3_element_ids
class PointClickableHTMLTooltip(PluginBase):
"""A plugin for pop-up windows with data with rich HTML
Parameters
----------
points : matplotlib Collection object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
targets : list
The target data or rich HTML to be displayed when each collection element is clicked
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html and target data/tables, if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots(1,1)
>>> xx = yy = range(10)
>>> scat = ax.scatter(xx, range(10))
>>> targets = map(lambda (x, y): "<marquee>It works!<br><h1>{}, {}</h1></marquee>".format(x, y),
>>> zip(xx, yy))
>>> labels = map(lambda (x, y): "{}, {}".format(x,y), zip(xx, yy))
>>> from mpld3.plugins import PointClickableHTMLTooltip
>>> plugins.connect(fig, PointClickableHTMLTooltip(scat, labels=labels, targets=targets))
"""
JAVASCRIPT="""
mpld3.register_plugin("clickablehtmltooltip", PointClickableHTMLTooltip);
PointClickableHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
PointClickableHTMLTooltip.prototype.constructor = PointClickableHTMLTooltip;
PointClickableHTMLTooltip.prototype.requiredProps = ["id"];
PointClickableHTMLTooltip.prototype.defaultProps = {labels:null,
targets:null,
hoffset:0,
voffset:10};
function PointClickableHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
PointClickableHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var targets = this.props.targets;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
if ($(obj.elements()[0][0]).css( "fill-opacity" ) > 0 || $(obj.elements()[0][0]).css( "stroke-opacity" ) > 0) {
tooltip.html(labels[i])
.style("visibility", "visible");
} })
.on("mousedown", function(d, i){
window.open().document.write(targets[i]);
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None, targets=None,
hoffset=2, voffset=-6, css=None):
self.points = points
self.labels = labels
self.targets = targets
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if targets is not None:
styled_targets = map(lambda x: self.css_ + x, targets)
else:
styled_targets = None
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "clickablehtmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"targets": styled_targets,
"hoffset": hoffset,
"voffset": voffset}
class MouseXPosition(PluginBase):
"""Like MousePosition, but only show the X coordinate"""
JAVASCRIPT="""
mpld3.register_plugin("mousexposition", MouseXPositionPlugin);
MouseXPositionPlugin.prototype = Object.create(mpld3.Plugin.prototype);
MouseXPositionPlugin.prototype.constructor = MouseXPositionPlugin;
MouseXPositionPlugin.prototype.requiredProps = [];
MouseXPositionPlugin.prototype.defaultProps = {
fontsize: 12,
fmt: "0d"
};
function MouseXPositionPlugin(fig, props) {
mpld3.Plugin.call(this, fig, props);
}
MouseXPositionPlugin.prototype.draw = function() {
var fig = this.fig;
var fmt = d3.format(this.props.fmt);
var coords = fig.canvas.append("text").attr("class", "mpld3-coordinates").style("text-anchor", "end").style("font-size", this.props.fontsize).attr("x", this.fig.width - 5).attr("y", this.fig.height - 5);
for (var i = 0; i < this.fig.axes.length; i++) {
var update_coords = function() {
var ax = fig.axes[i];
return function() {
var pos = d3.mouse(this), x = ax.x.invert(pos[0]), y = ax.y.invert(pos[1]);
coords.text(fmt(x));
};
}();
fig.axes[i].baseaxes.on("mousemove", update_coords).on("mouseout", function() {
coords.text("");
});
}
};"""
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MouseXPosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt="8.0f"):
self.dict_ = {"type": "mousexposition",
"fontsize": fontsize,
"fmt": fmt}
DEFAULT_PLUGINS = [Reset(), Zoom(), BoxZoom()]
|
{
"content_hash": "ec2f3d5063a57238572551887434074c",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 207,
"avg_line_length": 37.28537455410226,
"alnum_prop": 0.5414739930478043,
"repo_name": "etgalloway/mpld3",
"id": "6bffeef53ee47e5c1f7314e8c378d27ec1b54fec",
"size": "31357",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mpld3/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "150289"
},
{
"name": "Jupyter Notebook",
"bytes": "1911739"
},
{
"name": "Makefile",
"bytes": "943"
},
{
"name": "Python",
"bytes": "122758"
}
],
"symlink_target": ""
}
|
import os
import logging
from typing import Callable, Dict, Type
from filelock import FileLock
import ray
from ray import tune
from ray.tune.resources import Resources
from ray.tune.trainable import TrainableUtil
from ray.tune.result import RESULT_DUPLICATE
from ray.tune.logger import NoopLogger
from ray.tune.function_runner import wrap_function
from horovod.ray import RayExecutor
logger = logging.getLogger(__name__)
def get_rank() -> str:
return os.environ["HOROVOD_RANK"]
def logger_creator(log_config: Dict, logdir: str) -> NoopLogger:
"""Simple NOOP logger for worker trainables."""
index = get_rank()
worker_dir = os.path.join(logdir, "worker_{}".format(index))
os.makedirs(worker_dir, exist_ok=True)
return NoopLogger(log_config, worker_dir)
class _HorovodTrainable(tune.Trainable):
"""Abstract Trainable class for Horovod."""
# Callable function for training.
_function = None
# Number of hosts (nodes) to allocate per trial
_num_hosts: int = 1
# Number of workers (slots) to place on each host.
_num_slots: int = 1
# Number of CPU resources to reserve for each worker.
_num_cpus_per_slot: int = 1
# Whether to reserve and pass GPU resources through.
_use_gpu: bool = False
# bool: Whether a the function has completed training
_finished: bool = False
# Horovod settings
_ssh_str: str = None
_ssh_identity_file: str = None
_timeout_s: int = 30
@property
def num_workers(self):
return self._num_hosts * self._num_slots
def setup(self, config: Dict):
trainable = wrap_function(self.__class__._function)
# We use a filelock here to ensure that the file-writing
# process is safe across different trainables.
if self._ssh_identity_file:
with FileLock(self._ssh_identity_file + ".lock"):
settings = RayExecutor.create_settings(
self._timeout_s, self._ssh_identity_file, self._ssh_str)
else:
settings = RayExecutor.create_settings(
self._timeout_s, self._ssh_identity_file, self._ssh_str)
self.executor = RayExecutor(
settings,
cpus_per_slot=self._num_cpus_per_slot,
use_gpu=self._use_gpu,
num_hosts=self._num_hosts,
num_slots=self._num_slots)
# We can't put `self` in the lambda closure, so we
# resolve the variable ahead of time.
logdir_ = str(self.logdir)
# Starts the workers as specified by the resources above.
self.executor.start(
executable_cls=trainable,
executable_kwargs={
"config": config,
"logger_creator": lambda cfg: logger_creator(cfg, logdir_)
})
def step(self) -> Dict:
if self._finished:
raise RuntimeError("Training has already finished.")
result = self.executor.execute(lambda w: w.step())[0]
if RESULT_DUPLICATE in result:
self._finished = True
return result
def save_checkpoint(self, checkpoint_dir: str) -> str:
# TODO: optimize if colocated
save_obj = self.executor.execute_single(lambda w: w.save_to_object())
checkpoint_path = TrainableUtil.create_from_pickle(
save_obj, checkpoint_dir)
return checkpoint_path
def load_checkpoint(self, checkpoint_dir: str):
checkpoint_obj = TrainableUtil.checkpoint_to_object(checkpoint_dir)
x_id = ray.put(checkpoint_obj)
return self.executor.execute(lambda w: w.restore_from_object(x_id))
def stop(self):
self.executor.execute(lambda w: w.stop())
self.executor.shutdown()
def DistributedTrainableCreator(
func: Callable,
use_gpu: bool = False,
num_hosts: int = 1,
num_slots: int = 1,
num_cpus_per_slot: int = 1,
timeout_s: int = 30,
replicate_pem: bool = False) -> Type[_HorovodTrainable]:
"""Converts Horovod functions to be executable by Tune.
Requires horovod > 0.19 to work.
This function wraps and sets the resources for a given Horovod
function to be used with Tune. It generates a Horovod Trainable (trial)
which can itself be a distributed training job. One basic assumption of
this implementation is that all sub-workers
of a trial will be placed evenly across different machines.
It is recommended that if `num_hosts` per trial > 1, you set
num_slots == the size (or number of GPUs) of a single host.
If num_hosts == 1, then you can set num_slots to be <=
the size (number of GPUs) of a single host.
This above assumption can be relaxed - please file a feature request
on Github to inform the maintainers.
Another assumption is that this API requires gloo as the underlying
communication primitive. You will need to install Horovod with
`HOROVOD_WITH_GLOO` enabled.
*Fault Tolerance:* The trial workers themselves are not fault tolerant.
When a host of a trial fails, all workers of a trial are expected to
die, and the trial is expected to restart. This currently does not
support function checkpointing.
Args:
func (Callable[[dict], None]): A training function that takes in
a config dict for hyperparameters and should initialize
horovod via horovod.init.
use_gpu (bool); Whether to allocate a GPU per worker.
num_cpus_per_slot (int): Number of CPUs to request
from Ray per worker.
num_hosts (int): Number of hosts that each trial is expected
to use.
num_slots (int): Number of slots (workers) to start on each host.
timeout_s (int): Seconds for Horovod rendezvous to timeout.
replicate_pem (bool): THIS MAY BE INSECURE. If true, this will
replicate the underlying Ray cluster ssh key across all hosts.
This may be useful if using the Ray Autoscaler.
Returns:
Trainable class that can be passed into `tune.run`.
Example:
.. code-block:: python
def train(config):
horovod.init()
horovod.allreduce()
from ray.tune.integration.horovod import DistributedTrainableCreator
trainable_cls = DistributedTrainableCreator(
train, num_hosts=1, num_slots=2, use_gpu=True)
tune.run(trainable_cls)
.. versionadded:: 1.0.0
"""
ssh_identity_file = None
sshkeystr = None
if replicate_pem:
from ray.tune.cluster_info import get_ssh_key
ssh_identity_file = get_ssh_key()
if os.path.exists(ssh_identity_file):
# For now, we assume that you're on a Ray cluster.
with open(ssh_identity_file) as f:
sshkeystr = f.read()
class WrappedHorovodTrainable(_HorovodTrainable):
_function = func
_num_hosts = num_hosts
_num_slots = num_slots
_num_cpus_per_slot = num_cpus_per_slot
_use_gpu = use_gpu
_ssh_identity_file = ssh_identity_file
_ssh_str = sshkeystr
_timeout_s = timeout_s
@classmethod
def default_resource_request(cls, config: Dict):
extra_gpu = int(num_hosts * num_slots) * int(use_gpu)
extra_cpu = int(num_hosts * num_slots * num_cpus_per_slot)
return Resources(
cpu=0,
gpu=0,
extra_cpu=extra_cpu,
extra_gpu=extra_gpu,
)
return WrappedHorovodTrainable
# pytest presents a bunch of serialization problems
# that force us to include mocks as part of the module.
def _train_simple(config: Dict):
import horovod.torch as hvd
hvd.init()
from ray import tune
for i in range(config.get("epochs", 2)):
import time
time.sleep(1)
tune.report(test=1, rank=hvd.rank())
|
{
"content_hash": "9948ef630655b16fa6fe9a95b3867f22",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 77,
"avg_line_length": 34.519650655021834,
"alnum_prop": 0.6376976597090449,
"repo_name": "robertnishihara/ray",
"id": "e3603de2b6e7bb941c32f6f714f659e203610759",
"size": "7905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tune/integration/horovod.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from designate.central import rpcapi
cfg.CONF.register_group(cfg.OptGroup(
name='service:api', title="Configuration for API Service"
))
cfg.CONF.register_opts([
cfg.IntOpt('workers', default=None,
help='Number of worker processes to spawn'),
cfg.StrOpt('api-base-uri', default='http://127.0.0.1:9001/'),
cfg.StrOpt('api_host', default='0.0.0.0',
help='API Host'),
cfg.IntOpt('api_port', default=9001,
help='API Port Number'),
cfg.StrOpt('api_paste_config', default='api-paste.ini',
help='File name for the paste.deploy config for designate-api'),
cfg.StrOpt('auth_strategy', default='noauth',
help='The strategy to use for auth. Supports noauth or '
'keystone'),
cfg.BoolOpt('enable-api-v1', default=True),
cfg.BoolOpt('enable-api-v2', default=False),
], group='service:api')
CENTRAL_API = None
def get_central_api():
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup by
rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global CENTRAL_API
if not CENTRAL_API:
CENTRAL_API = rpcapi.CentralAPI()
return CENTRAL_API
|
{
"content_hash": "92d9df1aff640585af5a627cf32796d9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 33.7,
"alnum_prop": 0.6439169139465876,
"repo_name": "richm/designate",
"id": "93f20b6a05dababe3fa41a16ebbda0cf4cbd9fc7",
"size": "1974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1272656"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import subprocess
from pip.vcs import subversion, git, bazaar, mercurial
from pip.backwardcompat import urlretrieve
from tests.lib import path_to_url
if hasattr(subprocess, "check_call"):
subprocess_call = subprocess.check_call
else:
subprocess_call = subprocess.call
def _create_initools_repository(directory):
subprocess_call('svnadmin create INITools'.split(), cwd=directory)
def _dump_initools_repository(directory):
filename, _ = urlretrieve(
'http://bitbucket.org/hltbra/pip-initools-dump/raw/8b55c908a320/'
'INITools_modified.dump'
)
initools_folder = os.path.join(directory, 'INITools')
devnull = open(os.devnull, 'w')
dump = open(filename)
subprocess_call(
['svnadmin', 'load', initools_folder],
stdin=dump,
stdout=devnull,
)
dump.close()
devnull.close()
os.remove(filename)
def _create_svn_repository_for_initools(directory):
if not os.path.exists(os.path.join(directory, 'INITools')):
_create_initools_repository(directory)
_dump_initools_repository(directory)
def _get_vcs_and_checkout_url(remote_repository, directory):
vcs_classes = {'svn': subversion.Subversion,
'git': git.Git,
'bzr': bazaar.Bazaar,
'hg': mercurial.Mercurial}
default_vcs = 'svn'
if '+' not in remote_repository:
remote_repository = '%s+%s' % (default_vcs, remote_repository)
vcs, repository_path = remote_repository.split('+', 1)
vcs_class = vcs_classes[vcs]
branch = ''
if vcs == 'svn':
branch = os.path.basename(remote_repository)
# remove the slash
repository_name = os.path.basename(
remote_repository[:-len(branch) - 1]
)
else:
repository_name = os.path.basename(remote_repository)
destination_path = os.path.join(directory, repository_name)
if not os.path.exists(destination_path):
vcs_class(remote_repository).obtain(destination_path)
return '%s+%s' % (
vcs,
path_to_url('/'.join([directory, repository_name, branch])),
)
def local_checkout(remote_repo, directory):
if not os.path.exists(directory):
os.mkdir(directory)
#os.makedirs(directory)
if remote_repo.startswith('svn'):
_create_svn_repository_for_initools(directory)
return _get_vcs_and_checkout_url(remote_repo, directory)
def local_repo(remote_repo, directory):
return local_checkout(remote_repo, directory).split('+', 1)[1]
|
{
"content_hash": "64bbfa42761421f910f2892546595b57",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 30.75,
"alnum_prop": 0.6531165311653117,
"repo_name": "Ivoz/pip",
"id": "a64d456b3d773955c50330f91898ca9862161b9f",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/lib/local_repos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2223441"
},
{
"name": "Shell",
"bytes": "4677"
}
],
"symlink_target": ""
}
|
import os
import ssl
from oslo.config import cfg
from messager.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
{
"content_hash": "f3169a49e7c78bfc4c092f0fac66485b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 74,
"avg_line_length": 26.55952380952381,
"alnum_prop": 0.5889735544598834,
"repo_name": "snowflying/messager",
"id": "ee931912783c054fa12571474a6c69daa7146446",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messager/common/sslutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249353"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
import re
import numpy as np
from common import ENV_ACTIONS, ENV_BOARD, ENV_ENTITIES, \
ENV_BOARD_SHAPE, ENV_AGENT_NAMES
from MalmoPython import MissionSpec
from malmopy.environment.malmo import MalmoEnvironment, MalmoStateBuilder
import os
class PigChaseSymbolicStateBuilder(MalmoStateBuilder):
"""
This class build a symbolic representation of the current environment.
Generated states consist of a string array, with the name of the block/entities on the given block.
"""
def __init__(self, entities_override=True):
self._entities_override = bool(entities_override)
def build(self, environment):
"""
Return a symbolic view of the board
:param environment Reference to the pig chase environment
:return (board, entities) where board is an array of shape (9, 9) with the block type / entities name for each coordinate, and entities is a list of current entities
"""
assert isinstance(environment,
PigChaseEnvironment), 'environment is not a Pig Chase Environment instance'
world_obs = environment.world_observations
if world_obs is None or ENV_BOARD not in world_obs:
return None
# Generate symbolic view
board = np.array(world_obs[ENV_BOARD], dtype=object).reshape(
ENV_BOARD_SHAPE)
entities = world_obs[ENV_ENTITIES]
if self._entities_override:
for entity in entities:
board[int(entity['z'] + 1), int(entity['x'])] += '/' + entity[
'name']
return (board, entities)
class PigChaseTopDownStateBuilder(MalmoStateBuilder):
"""
Generate low-res RGB top-down view (equivalent to the symbolic view)
"""
RGB_PALETTE = {
'sand': [255, 225, 150],
'grass': [44, 176, 55],
'lapis_block': [190, 190, 255],
'Agent_1': [255, 0, 0],
'Agent_2': [0, 0, 255],
'Pig': [185, 126, 131]
}
GRAY_PALETTE = {
'sand': 255,
'grass': 200,
'lapis_block': 150,
'Agent_1': 100,
'Agent_2': 50,
'Pig': 0
}
def __init__(self, gray=True):
self._gray = bool(gray)
def build(self, environment):
world_obs = environment.world_observations
if world_obs is None or ENV_BOARD not in world_obs:
return None
# Generate symbolic view
board, entities = environment._internal_symbolic_builder.build(
environment)
palette = self.GRAY_PALETTE if self._gray else self.RGB_PALETTE
buffer_shape = (board.shape[0] * 2, board.shape[1] * 2)
if not self._gray:
buffer_shape = buffer_shape + (3,)
buffer = np.zeros(buffer_shape, dtype=np.float32)
it = np.nditer(board, flags=['multi_index', 'refs_ok'])
while not it.finished:
entities_on_cell = str.split(str(board[it.multi_index]), '/')
mapped_value = palette[entities_on_cell[0]]
# draw 4 pixels per block
buffer[it.multi_index[0] * 2:it.multi_index[0] * 2 + 2,
it.multi_index[1] * 2:it.multi_index[1] * 2 + 2] = mapped_value
it.iternext()
for agent in entities:
agent_x = int(agent['x'])
agent_z = int(agent['z']) + 1
agent_pattern = buffer[agent_z * 2:agent_z * 2 + 2,
agent_x * 2:agent_x * 2 + 2]
# convert Minecraft yaw to 0=north, 1=west etc.
agent_direction = ((((int(agent['yaw']) - 45) % 360) // 90) - 1) % 4
if agent_direction == 0:
# facing north
agent_pattern[1, 0:2] = palette[agent['name']]
agent_pattern[0, 0:2] += palette[agent['name']]
agent_pattern[0, 0:2] /= 2.
elif agent_direction == 1:
# west
agent_pattern[0:2, 1] = palette[agent['name']]
agent_pattern[0:2, 0] += palette[agent['name']]
agent_pattern[0:2, 0] /= 2.
elif agent_direction == 2:
# south
agent_pattern[0, 0:2] = palette[agent['name']]
agent_pattern[1, 0:2] += palette[agent['name']]
agent_pattern[1, 0:2] /= 2.
else:
# east
agent_pattern[0:2, 0] = palette[agent['name']]
agent_pattern[0:2, 1] += palette[agent['name']]
agent_pattern[0:2, 1] /= 2.
buffer[agent_z * 2:agent_z * 2 + 2,
agent_x * 2:agent_x * 2 + 2] = agent_pattern
return buffer / 255.
class PigChaseEnvironment(MalmoEnvironment):
"""
Represent the Pig chase with two agents and a pig. Agents can try to catch
the pig (high reward), or give up by leaving the pig pen (low reward).
"""
AGENT_TYPE_0 = 0
AGENT_TYPE_1 = 1
AGENT_TYPE_2 = 2
AGENT_TYPE_3 = 3
VALID_START_POSITIONS = [
(2.5, 1.5), (3.5, 1.5), (4.5, 1.5), (5.5, 1.5), (6.5, 1.5),
(2.5, 2.5), (4.5, 2.5), (6.5, 2.5),
(2.5, 3.5), (3.5, 3.5), (4.5, 3.5), (5.5, 3.5), (6.5, 3.5),
(2.5, 4.5), (4.5, 4.5), (6.5, 4.5),
(2.5, 5.5), (3.5, 5.5), (4.5, 5.5), (5.5, 5.5), (6.5, 5.5)
]
VALID_YAW = [0, 90, 180, 270]
def __init__(self, remotes,
state_builder,
actions=ENV_ACTIONS,
role=0, exp_name="",
human_speed=False, randomize_positions=False):
assert isinstance(state_builder, MalmoStateBuilder)
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'pig_chase.xml')
print(filename)
self._mission_xml = open(filename, 'r').read()
# override tics per ms to play at human speed
if human_speed:
print('Setting mission to run at human speed')
self._mission_xml = re.sub('<MsPerTick>\d+</MsPerTick>',
'<MsPerTick>50</MsPerTick>',
self._mission_xml)
super(PigChaseEnvironment, self).__init__(self._mission_xml, actions,
remotes, role, exp_name, True)
self._internal_symbolic_builder = PigChaseSymbolicStateBuilder()
self._user_defined_builder = state_builder
self._randomize_positions = randomize_positions
self._agent_type = None
@property
def state(self):
return self._user_defined_builder.build(self)
@property
def done(self):
"""
Done if we have caught the pig
"""
return super(PigChaseEnvironment, self).done
def _construct_mission(self):
# set agent helmet
original_helmet = "iron_helmet"
if self._role == 0:
original_helmet = "diamond_helmet"
new_helmet = original_helmet
if self._agent_type == PigChaseEnvironment.AGENT_TYPE_0:
new_helmet = "iron_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_1:
new_helmet = "golden_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_2:
new_helmet = "diamond_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_3:
new_helmet = "leather_helmet"
xml = re.sub(r'type="%s"' % original_helmet,
r'type="%s"' % new_helmet, self._mission_xml)
# set agent starting pos
if self._randomize_positions and self._role == 0:
pos = [PigChaseEnvironment.VALID_START_POSITIONS[i]
for i in np.random.choice(
range(len(PigChaseEnvironment.VALID_START_POSITIONS)),
3, replace=False)]
while not (self._get_pos_dist(pos[0], pos[1]) > 1.1 and
self._get_pos_dist(pos[1], pos[2]) > 1.1 and
self._get_pos_dist(pos[0], pos[2]) > 1.1):
pos = [PigChaseEnvironment.VALID_START_POSITIONS[i]
for i in np.random.choice(
range(len(PigChaseEnvironment.VALID_START_POSITIONS)),
3, replace=False)]
xml = re.sub(r'<DrawEntity[^>]+>',
r'<DrawEntity x="%g" y="4" z="%g" type="Pig"/>' % pos[
0], xml)
xml = re.sub(
r'(<Name>%s</Name>\s*<AgentStart>\s*)<Placement[^>]+>' %
ENV_AGENT_NAMES[0],
r'\1<Placement x="%g" y="4" z="%g" pitch="30" yaw="%g"/>' %
(pos[1][0], pos[1][1],
np.random.choice(PigChaseEnvironment.VALID_YAW)), xml)
xml = re.sub(
r'(<Name>%s</Name>\s*<AgentStart>\s*)<Placement[^>]+>' %
ENV_AGENT_NAMES[1],
r'\1<Placement x="%g" y="4" z="%g" pitch="30" yaw="%g"/>' %
(pos[2][0], pos[2][1],
np.random.choice(PigChaseEnvironment.VALID_YAW)), xml)
return MissionSpec(xml, True)
def _get_pos_dist(self, pos1, pos2):
return np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
def reset(self, agent_type=None, agent_positions=None):
""" Overrides reset() to allow changes in agent appearance between
missions."""
if agent_type and agent_type != self._agent_type or \
self._randomize_positions:
self._agent_type = agent_type
self._mission = self._construct_mission()
return super(PigChaseEnvironment, self).reset()
def do(self, action):
"""
Do the action
"""
state, reward, done = super(PigChaseEnvironment, self).do(action)
return state, reward, self.done
def is_valid(self, world_state):
""" Pig Chase Environment is valid if the the board and entities are present """
if super(PigChaseEnvironment, self).is_valid(world_state):
obs = json.loads(world_state.observations[-1].text)
# Check we have entities
return (ENV_ENTITIES in obs) and (ENV_BOARD in obs)
return False
|
{
"content_hash": "5969b80d3aaf8f6387f2b9bf50d006d9",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 173,
"avg_line_length": 37.74264705882353,
"alnum_prop": 0.5349698032339762,
"repo_name": "village-people/flying-pig",
"id": "33484a02820ee9b1a6bcac1aa6dd08dc7a2daf4c",
"size": "11444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ai_challenge/pig_chase/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "370890"
},
{
"name": "Shell",
"bytes": "67"
},
{
"name": "XSLT",
"bytes": "372375"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import unittest
import coverage
testfolder = os.path.abspath(os.path.dirname(__file__))
package_root = os.path.abspath(os.path.join(testfolder, r"..\.."))
sys.path.append(package_root)
# needs to be called before importing the modules
cov = coverage.coverage(branch = True, omit = os.path.join(package_root, 'pywinauto', '*tests', '*.py'))
cov.start()
import pywinauto
pywinauto.actionlogger.enable()
# increase timings for AppVeyor
#pywinauto.timings.Timings.app_start_timeout = 40
#pywinauto.timings.Timings.window_find_timeout = 50
modules_to_test = [pywinauto]
def run_tests():
excludes = [] #'test_sendkeys']
suite = unittest.TestSuite()
sys.path.append(testfolder)
for root, dirs, files in os.walk(testfolder):
test_modules = [
file.replace('.py', '') for file in files if
file.startswith('test_') and
file.endswith('.py')]
test_modules = [mod for mod in test_modules if mod.lower() not in excludes]
for mod in test_modules:
#globals().update(__import__(mod, globals(), locals()).__dict__)
# import it
imported_mod = __import__(mod, globals(), locals())
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(imported_mod))
#unittest.main()#testRunner = runner)
#runner = unittest.TextTestRunner(verbosity = 2)
unittest.TextTestRunner(verbosity=1).run(suite)
cov.stop()
#print(cov.analysis())
print(cov.report())
cov.html_report(
directory = os.path.join(package_root, "Coverage_report"),
omit = [os.path.join(package_root, 'pywinauto', '*tests', '*.py'),
os.path.join(package_root, 'pywinauto', 'six.py'), ]
)
if __name__ == '__main__':
run_tests()
|
{
"content_hash": "69ba4cc803da08c504898981d985c8c8",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 104,
"avg_line_length": 29.196969696969695,
"alnum_prop": 0.6097560975609756,
"repo_name": "MagazinnikIvan/pywinauto",
"id": "5736c8d382b149f4566d273a86cdbede6cccf4c0",
"size": "3674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywinauto/unittests/testall.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2360"
},
{
"name": "PowerShell",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "1638638"
},
{
"name": "XSLT",
"bytes": "3485"
}
],
"symlink_target": ""
}
|
class CacheMixin(object):
'''
Tools to analyse direction of change for a given conditional.
'''
def store(self, field=None, nodes=None):
'''
store the cache for change analysis upon next iteration.
'''
field = field or getattr(self, 'field')
nodes = nodes or getattr(self, 'get_nodes')()
if field is None or nodes is None:
return False
__cache = self._get_cache()
if __cache.has_key(field) is not True:
__cache[field] = []
cnt=0
for node in nodes:
written = self._write_cache(node, field)
if written:
cnt += 1
self._set_cache(__cache)
return len(nodes) == cnt
def _get_cache(self):
'''
return a dict for use as a cache object
A new self.__cache = {} will be created if it
does not exist
'''
if hasattr(self, '__cache') is not True:
self.__dict__['__cache'] = {}
__cache = getattr(self,'__cache')
return __cache
def _has_cache(self):
return hasattr(self, '__cache')
def _write_cache(self, node, field):
__cache = self._get_cache()
try:
# original value
v = getattr(node, field)
except AttributeError as e:
return False
# original length
l = len(__cache[field])
# push value
__cache[field].append(v)
ll = len(__cache[field])
return (ll - 1) == l
def _set_cache(self, __cache):
'''
write an object as the cache dict.
'''
setattr(self, '__cache', __cache)
def store_cache(self, key, level=-1):
'''
return the cached store of the provided key
by default the last in is provided `level=-1`
'''
__cache = self._get_cache()
v = __cache[key] if __cache.has_key(key) else None
if v is not None:
return v[level]
return False
class Compare(object):
def __init__(self, condition=None):
self.condition = condition
def match(self, a, b):
return a == b
class CacheCompare(Compare, CacheMixin):
'''
A cache value is stored to compare against
'''
def match(self, a, b):
v = super(CacheCompare, self).match(a, b)
self.store(a)
return v
|
{
"content_hash": "858bc33f3db8a0923408fb9fd57956b5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 65,
"avg_line_length": 25.21875,
"alnum_prop": 0.5138372573316812,
"repo_name": "Strangemother/python-state-machine",
"id": "2a72520c32579d1594f6ffe0c2bd68e5c82a3d1e",
"size": "2421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scratch/machine_2/core/compares/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6992"
},
{
"name": "CSS",
"bytes": "723"
},
{
"name": "Makefile",
"bytes": "7422"
},
{
"name": "Python",
"bytes": "277923"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.tlib
XXX
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tlib.py 2014/09/27 12:51:43 garyo"
import SCons.Tool
import SCons.Tool.bcc32
import SCons.Util
def generate(env):
SCons.Tool.bcc32.findIt('tlib', env)
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'tlib'
env['ARFLAGS'] = SCons.Util.CLVar('')
env['ARCOM'] = '$AR $TARGET $ARFLAGS /a $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return SCons.Tool.bcc32.findIt('tlib', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "492cacd6e284ae9de87d494f8405e9df",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 34.54716981132076,
"alnum_prop": 0.72200983069361,
"repo_name": "unix1986/scons",
"id": "7eedee8649572677e39b5d49221c5a361d2b98f5",
"size": "1831",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "engine/SCons/Tool/tlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "537068"
},
{
"name": "Python",
"bytes": "1939355"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
}
|
import os,sys
from ftplib import FTP
from tempfile import mkdtemp
from shutil import rmtree
from http.client import HTTPConnection
from urllib.parse import urlsplit
def get_temp_dir(work_dir,prefix='temp'):
'''
This function returns a temporary directory
'''
try:
temp_dir=mkdtemp(prefix=prefix,dir=work_dir)
return temp_dir
except Exception as e:
print('Error: %s' % e)
def clean_temp_dir(temp_dir):
'''
This function delete a directory and all its contents
'''
if os.path.isdir(temp_dir):
try :
rmtree(temp_dir)
except Exception as e:
print('couldn\'t remove %s' % temp_dir)
else:
print('removed %s' % temp_dir)
def get_ftp_index(ftp_url='ftp.debian.org', dir='debian',index='README'):
'''
This function connect to a FTP server and retrieve a file
'''
with FTP(ftp_url) as ftp:
ftp.login()
ftp.cwd(dir)
ftp.retrbinary('RETR '+index, open(index,'wb').write)
ftp.quit()
def read_index_file(infile, f_header=[]):
'''
Read an index file and a list of fields (optional)
Returns a list of dictionary
'''
if len(f_header) == 0:
f_header=['EXPERIMENT_ID','FILE_TYPE','SAMPLE_NAME','EXPERIMENT_TYPE','FILE','LIBRARY_STRATEGY']
infile=os.path.abspath(infile)
if os.path.exists(infile) == False:
print('%s not found' % infile)
sys.exit(2)
with open(infile, 'r') as f:
header=[]
file_list=[]
for i in f:
row=i.split("\t")
if(header):
filtered_dict=dict((k,v) for k,v in dict(zip(header,row)).items() if k in header)
file_list.append(filtered_dict)
else:
header=row
return file_list
def check_ftp_url(full_url):
'''
This function checks if an url is accessible and returns its http response code
'''
url=urlsplit(full_url)
conn = HTTPConnection(url.netloc)
conn.request("HEAD",url.path)
res = conn.getresponse()
return res.status
if __name__=='__main__':
url='http://ftp.ebi.ac.uk/pub/databases/blueprint/data/homo_sapiens/GRCh38/Cell_Line/BL-2/Sporadic_Burkitt_lymphoma/ChIP-Seq/NCMLS/BL-2_c01.ERX297411.H3K4me1.bwa.GRCh38.20150528.bw'
code=check_ftp_url(url)
print(code)
|
{
"content_hash": "f0157b33205d50e2fdafbbd4b17af881",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 183,
"avg_line_length": 26.253012048192772,
"alnum_prop": 0.656264341441028,
"repo_name": "avikdatta/python_scripts",
"id": "8867f9cf124d784b9f64c9f2f903f4f0e4f074e5",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ftp_index_method.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1934"
},
{
"name": "Python",
"bytes": "60384"
}
],
"symlink_target": ""
}
|
tests = r"""
>>> from django.forms import *
>>> from django.forms.widgets import RadioFieldRenderer
>>> from django.utils.safestring import mark_safe
>>> from django.utils import formats
>>> import datetime
>>> import time
>>> import re
>>> try:
... from decimal import Decimal
... except ImportError:
... from django.utils._decimal import Decimal
>>> from django.utils.translation import activate, deactivate
>>> from django.conf import settings
###########
# Widgets #
###########
Each Widget class corresponds to an HTML form widget. A Widget knows how to
render itself, given a field name and some data. Widgets don't perform
validation.
# TextInput Widget ############################################################
>>> w = TextInput()
>>> w.render('email', '')
u'<input type="text" name="email" />'
>>> w.render('email', None)
u'<input type="text" name="email" />'
>>> w.render('email', 'test@example.com')
u'<input type="text" name="email" value="test@example.com" />'
>>> w.render('email', 'some "quoted" & ampersanded value')
u'<input type="text" name="email" value="some "quoted" & ampersanded value" />'
>>> w.render('email', 'test@example.com', attrs={'class': 'fun'})
u'<input type="text" name="email" value="test@example.com" class="fun" />'
# Note that doctest in Python 2.4 (and maybe 2.5?) doesn't support non-ascii
# characters in output, so we're displaying the repr() here.
>>> w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'})
u'<input type="text" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun" />'
You can also pass 'attrs' to the constructor:
>>> w = TextInput(attrs={'class': 'fun'})
>>> w.render('email', '')
u'<input type="text" class="fun" name="email" />'
>>> w.render('email', 'foo@example.com')
u'<input type="text" class="fun" value="foo@example.com" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = TextInput(attrs={'class': 'pretty'})
>>> w.render('email', '', attrs={'class': 'special'})
u'<input type="text" class="special" name="email" />'
'attrs' can be safe-strings if needed
>>> w = TextInput(attrs={'onBlur': mark_safe("function('foo')")})
>>> print w.render('email', '')
<input onBlur="function('foo')" type="text" name="email" />
# PasswordInput Widget ############################################################
>>> w = PasswordInput()
>>> w.render('email', '')
u'<input type="password" name="email" />'
>>> w.render('email', None)
u'<input type="password" name="email" />'
>>> w.render('email', 'test@example.com')
u'<input type="password" name="email" value="test@example.com" />'
>>> w.render('email', 'some "quoted" & ampersanded value')
u'<input type="password" name="email" value="some "quoted" & ampersanded value" />'
>>> w.render('email', 'test@example.com', attrs={'class': 'fun'})
u'<input type="password" name="email" value="test@example.com" class="fun" />'
You can also pass 'attrs' to the constructor:
>>> w = PasswordInput(attrs={'class': 'fun'})
>>> w.render('email', '')
u'<input type="password" class="fun" name="email" />'
>>> w.render('email', 'foo@example.com')
u'<input type="password" class="fun" value="foo@example.com" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = PasswordInput(attrs={'class': 'pretty'})
>>> w.render('email', '', attrs={'class': 'special'})
u'<input type="password" class="special" name="email" />'
>>> w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'})
u'<input type="password" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />'
The render_value argument lets you specify whether the widget should render
its value. You may want to do this for security reasons.
>>> w = PasswordInput(render_value=True)
>>> w.render('email', 'secret')
u'<input type="password" name="email" value="secret" />'
>>> w = PasswordInput(render_value=False)
>>> w.render('email', '')
u'<input type="password" name="email" />'
>>> w.render('email', None)
u'<input type="password" name="email" />'
>>> w.render('email', 'secret')
u'<input type="password" name="email" />'
>>> w = PasswordInput(attrs={'class': 'fun'}, render_value=False)
>>> w.render('email', 'secret')
u'<input type="password" class="fun" name="email" />'
# HiddenInput Widget ############################################################
>>> w = HiddenInput()
>>> w.render('email', '')
u'<input type="hidden" name="email" />'
>>> w.render('email', None)
u'<input type="hidden" name="email" />'
>>> w.render('email', 'test@example.com')
u'<input type="hidden" name="email" value="test@example.com" />'
>>> w.render('email', 'some "quoted" & ampersanded value')
u'<input type="hidden" name="email" value="some "quoted" & ampersanded value" />'
>>> w.render('email', 'test@example.com', attrs={'class': 'fun'})
u'<input type="hidden" name="email" value="test@example.com" class="fun" />'
You can also pass 'attrs' to the constructor:
>>> w = HiddenInput(attrs={'class': 'fun'})
>>> w.render('email', '')
u'<input type="hidden" class="fun" name="email" />'
>>> w.render('email', 'foo@example.com')
u'<input type="hidden" class="fun" value="foo@example.com" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = HiddenInput(attrs={'class': 'pretty'})
>>> w.render('email', '', attrs={'class': 'special'})
u'<input type="hidden" class="special" name="email" />'
>>> w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'})
u'<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = HiddenInput(attrs={'class': 'pretty'})
>>> w.render('email', '', attrs={'class': 'special'})
u'<input type="hidden" class="special" name="email" />'
Boolean values are rendered to their string forms ("True" and "False").
>>> w = HiddenInput()
>>> w.render('get_spam', False)
u'<input type="hidden" name="get_spam" value="False" />'
>>> w.render('get_spam', True)
u'<input type="hidden" name="get_spam" value="True" />'
# MultipleHiddenInput Widget ##################################################
>>> w = MultipleHiddenInput()
>>> w.render('email', [])
u''
>>> w.render('email', None)
u''
>>> w.render('email', ['test@example.com'])
u'<input type="hidden" name="email" value="test@example.com" />'
>>> w.render('email', ['some "quoted" & ampersanded value'])
u'<input type="hidden" name="email" value="some "quoted" & ampersanded value" />'
>>> w.render('email', ['test@example.com', 'foo@example.com'])
u'<input type="hidden" name="email" value="test@example.com" />\n<input type="hidden" name="email" value="foo@example.com" />'
>>> w.render('email', ['test@example.com'], attrs={'class': 'fun'})
u'<input type="hidden" name="email" value="test@example.com" class="fun" />'
>>> w.render('email', ['test@example.com', 'foo@example.com'], attrs={'class': 'fun'})
u'<input type="hidden" name="email" value="test@example.com" class="fun" />\n<input type="hidden" name="email" value="foo@example.com" class="fun" />'
You can also pass 'attrs' to the constructor:
>>> w = MultipleHiddenInput(attrs={'class': 'fun'})
>>> w.render('email', [])
u''
>>> w.render('email', ['foo@example.com'])
u'<input type="hidden" class="fun" value="foo@example.com" name="email" />'
>>> w.render('email', ['foo@example.com', 'test@example.com'])
u'<input type="hidden" class="fun" value="foo@example.com" name="email" />\n<input type="hidden" class="fun" value="test@example.com" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = MultipleHiddenInput(attrs={'class': 'pretty'})
>>> w.render('email', ['foo@example.com'], attrs={'class': 'special'})
u'<input type="hidden" class="special" value="foo@example.com" name="email" />'
>>> w.render('email', ['ŠĐĆŽćžšđ'], attrs={'class': 'fun'})
u'<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = MultipleHiddenInput(attrs={'class': 'pretty'})
>>> w.render('email', ['foo@example.com'], attrs={'class': 'special'})
u'<input type="hidden" class="special" value="foo@example.com" name="email" />'
Each input gets a separate ID.
>>> w = MultipleHiddenInput()
>>> w.render('letters', list('abc'), attrs={'id': 'hideme'})
u'<input type="hidden" name="letters" value="a" id="hideme_0" />\n<input type="hidden" name="letters" value="b" id="hideme_1" />\n<input type="hidden" name="letters" value="c" id="hideme_2" />'
# FileInput Widget ############################################################
FileInput widgets don't ever show the value, because the old value is of no use
if you are updating the form or if the provided file generated an error.
>>> w = FileInput()
>>> w.render('email', '')
u'<input type="file" name="email" />'
>>> w.render('email', None)
u'<input type="file" name="email" />'
>>> w.render('email', 'test@example.com')
u'<input type="file" name="email" />'
>>> w.render('email', 'some "quoted" & ampersanded value')
u'<input type="file" name="email" />'
>>> w.render('email', 'test@example.com', attrs={'class': 'fun'})
u'<input type="file" name="email" class="fun" />'
You can also pass 'attrs' to the constructor:
>>> w = FileInput(attrs={'class': 'fun'})
>>> w.render('email', '')
u'<input type="file" class="fun" name="email" />'
>>> w.render('email', 'foo@example.com')
u'<input type="file" class="fun" name="email" />'
>>> w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'})
u'<input type="file" class="fun" name="email" />'
Test for the behavior of _has_changed for FileInput. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileInput it is ignored.
>>> w = FileInput()
# No file was uploaded and no initial data.
>>> w._has_changed(u'', None)
False
# A file was uploaded and no initial data.
>>> w._has_changed(u'', {'filename': 'resume.txt', 'content': 'My resume'})
True
# A file was not uploaded, but there is initial data
>>> w._has_changed(u'resume.txt', None)
False
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
>>> w._has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'})
True
# Textarea Widget #############################################################
>>> w = Textarea()
>>> w.render('msg', '')
u'<textarea rows="10" cols="40" name="msg"></textarea>'
>>> w.render('msg', None)
u'<textarea rows="10" cols="40" name="msg"></textarea>'
>>> w.render('msg', 'value')
u'<textarea rows="10" cols="40" name="msg">value</textarea>'
>>> w.render('msg', 'some "quoted" & ampersanded value')
u'<textarea rows="10" cols="40" name="msg">some "quoted" & ampersanded value</textarea>'
>>> w.render('msg', mark_safe('pre "quoted" value'))
u'<textarea rows="10" cols="40" name="msg">pre "quoted" value</textarea>'
>>> w.render('msg', 'value', attrs={'class': 'pretty', 'rows': 20})
u'<textarea class="pretty" rows="20" cols="40" name="msg">value</textarea>'
You can also pass 'attrs' to the constructor:
>>> w = Textarea(attrs={'class': 'pretty'})
>>> w.render('msg', '')
u'<textarea rows="10" cols="40" name="msg" class="pretty"></textarea>'
>>> w.render('msg', 'example')
u'<textarea rows="10" cols="40" name="msg" class="pretty">example</textarea>'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = Textarea(attrs={'class': 'pretty'})
>>> w.render('msg', '', attrs={'class': 'special'})
u'<textarea rows="10" cols="40" name="msg" class="special"></textarea>'
>>> w.render('msg', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'})
u'<textarea rows="10" cols="40" name="msg" class="fun">\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</textarea>'
# CheckboxInput Widget ########################################################
>>> w = CheckboxInput()
>>> w.render('is_cool', '')
u'<input type="checkbox" name="is_cool" />'
>>> w.render('is_cool', None)
u'<input type="checkbox" name="is_cool" />'
>>> w.render('is_cool', False)
u'<input type="checkbox" name="is_cool" />'
>>> w.render('is_cool', True)
u'<input checked="checked" type="checkbox" name="is_cool" />'
Using any value that's not in ('', None, False, True) will check the checkbox
and set the 'value' attribute.
>>> w.render('is_cool', 'foo')
u'<input checked="checked" type="checkbox" name="is_cool" value="foo" />'
>>> w.render('is_cool', False, attrs={'class': 'pretty'})
u'<input type="checkbox" name="is_cool" class="pretty" />'
You can also pass 'attrs' to the constructor:
>>> w = CheckboxInput(attrs={'class': 'pretty'})
>>> w.render('is_cool', '')
u'<input type="checkbox" class="pretty" name="is_cool" />'
'attrs' passed to render() get precedence over those passed to the constructor:
>>> w = CheckboxInput(attrs={'class': 'pretty'})
>>> w.render('is_cool', '', attrs={'class': 'special'})
u'<input type="checkbox" class="special" name="is_cool" />'
You can pass 'check_test' to the constructor. This is a callable that takes the
value and returns True if the box should be checked.
>>> w = CheckboxInput(check_test=lambda value: value.startswith('hello'))
>>> w.render('greeting', '')
u'<input type="checkbox" name="greeting" />'
>>> w.render('greeting', 'hello')
u'<input checked="checked" type="checkbox" name="greeting" value="hello" />'
>>> w.render('greeting', 'hello there')
u'<input checked="checked" type="checkbox" name="greeting" value="hello there" />'
>>> w.render('greeting', 'hello & goodbye')
u'<input checked="checked" type="checkbox" name="greeting" value="hello & goodbye" />'
A subtlety: If the 'check_test' argument cannot handle a value and raises any
exception during its __call__, then the exception will be swallowed and the box
will not be checked. In this example, the 'check_test' assumes the value has a
startswith() method, which fails for the values True, False and None.
>>> w.render('greeting', True)
u'<input type="checkbox" name="greeting" />'
>>> w.render('greeting', False)
u'<input type="checkbox" name="greeting" />'
>>> w.render('greeting', None)
u'<input type="checkbox" name="greeting" />'
The CheckboxInput widget will return False if the key is not found in the data
dictionary (because HTML form submission doesn't send any result for unchecked
checkboxes).
>>> w.value_from_datadict({}, {}, 'testing')
False
>>> w._has_changed(None, None)
False
>>> w._has_changed(None, u'')
False
>>> w._has_changed(u'', None)
False
>>> w._has_changed(u'', u'')
False
>>> w._has_changed(False, u'on')
True
>>> w._has_changed(True, u'on')
False
>>> w._has_changed(True, u'')
True
# Select Widget ###############################################################
>>> w = Select()
>>> print w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select name="beatle">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
If the value is None, none of the options are selected:
>>> print w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
If the value corresponds to a label (but not to an option value), none of the options are selected:
>>> print w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
The value is compared to its str():
>>> print w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')])
<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
>>> print w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)])
<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
>>> print w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)])
<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
The 'choices' argument can be any iterable:
>>> from itertools import chain
>>> def get_choices():
... for i in range(5):
... yield (i, i)
>>> print w.render('num', 2, choices=get_choices())
<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>
>>> things = ({'id': 1, 'name': 'And Boom'}, {'id': 2, 'name': 'One More Thing!'})
>>> class SomeForm(Form):
... somechoice = ChoiceField(choices=chain((('', '-'*9),), [(thing['id'], thing['name']) for thing in things]))
>>> f = SomeForm()
>>> f.as_table()
u'<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>'
>>> f.as_table()
u'<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>'
>>> f = SomeForm({'somechoice': 2})
>>> f.as_table()
u'<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="">---------</option>\n<option value="1">And Boom</option>\n<option value="2" selected="selected">One More Thing!</option>\n</select></td></tr>'
You can also pass 'choices' to the constructor:
>>> w = Select(choices=[(1, 1), (2, 2), (3, 3)])
>>> print w.render('num', 2)
<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
>>> print w.render('num', 2, choices=[(4, 4), (5, 5)])
<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
# Choices are escaped correctly
>>> print w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me'))))
<select name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>
# Unicode choices are correctly rendered as HTML
>>> w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])
u'<select name="email">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>'
If choices is passed to the constructor and is a generator, it can be iterated
over multiple times without getting consumed:
>>> w = Select(choices=get_choices())
>>> print w.render('num', 2)
<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>
>>> print w.render('num', 3)
<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
<option value="4">4</option>
</select>
Choices can be nested one level in order to create HTML optgroups:
>>> w.choices=(('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
>>> print w.render('nestchoice', None)
<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
>>> print w.render('nestchoice', 'outer1')
<select name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
>>> print w.render('nestchoice', 'inner1')
<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
# NullBooleanSelect Widget ####################################################
>>> w = NullBooleanSelect()
>>> print w.render('is_cool', True)
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>
>>> print w.render('is_cool', False)
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>
>>> print w.render('is_cool', None)
<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>
>>> print w.render('is_cool', '2')
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>
>>> print w.render('is_cool', '3')
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>
>>> w._has_changed(False, None)
True
>>> w._has_changed(None, False)
True
>>> w._has_changed(None, None)
False
>>> w._has_changed(False, False)
False
>>> w._has_changed(True, False)
True
>>> w._has_changed(True, None)
True
>>> w._has_changed(True, True)
False
""" + \
r""" # [This concatenation is to keep the string below the jython's 32K limit].
# SelectMultiple Widget #######################################################
>>> w = SelectMultiple()
>>> print w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
>>> print w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
>>> print w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R" selected="selected">Ringo</option>
</select>
If the value is None, none of the options are selected:
>>> print w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
If the value corresponds to a label (but not to an option value), none of the options are selected:
>>> print w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
If multiple values are given, but some of them are not valid, the valid ones are selected:
>>> print w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G" selected="selected">George</option>
<option value="R">Ringo</option>
</select>
The value is compared to its str():
>>> print w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')])
<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
>>> print w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)])
<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
>>> print w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)])
<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
The 'choices' argument can be any iterable:
>>> def get_choices():
... for i in range(5):
... yield (i, i)
>>> print w.render('nums', [2], choices=get_choices())
<select multiple="multiple" name="nums">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>
You can also pass 'choices' to the constructor:
>>> w = SelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
>>> print w.render('nums', [2])
<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>
If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
>>> print w.render('nums', [2], choices=[(4, 4), (5, 5)])
<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
# Choices are escaped correctly
>>> print w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me'))))
<select multiple="multiple" name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>
# Unicode choices are correctly rendered as HTML
>>> w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])
u'<select multiple="multiple" name="nums">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>'
# Test the usage of _has_changed
>>> w._has_changed(None, None)
False
>>> w._has_changed([], None)
False
>>> w._has_changed(None, [u'1'])
True
>>> w._has_changed([1, 2], [u'1', u'2'])
False
>>> w._has_changed([1, 2], [u'1'])
True
>>> w._has_changed([1, 2], [u'1', u'3'])
True
# Choices can be nested one level in order to create HTML optgroups:
>>> w.choices = (('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
>>> print w.render('nestchoice', None)
<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
>>> print w.render('nestchoice', ['outer1'])
<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
>>> print w.render('nestchoice', ['inner1'])
<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>
>>> print w.render('nestchoice', ['outer1', 'inner2'])
<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2" selected="selected">Inner 2</option>
</optgroup>
</select>
# RadioSelect Widget ##########################################################
>>> w = RadioSelect()
>>> print w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input checked="checked" type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>
If the value is None, none of the options are checked:
>>> print w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>
If the value corresponds to a label (but not to an option value), none of the options are checked:
>>> print w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>
The value is compared to its str():
>>> print w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')])
<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>
>>> print w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)])
<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>
>>> print w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)])
<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>
The 'choices' argument can be any iterable:
>>> def get_choices():
... for i in range(5):
... yield (i, i)
>>> print w.render('num', 2, choices=get_choices())
<ul>
<li><label><input type="radio" name="num" value="0" /> 0</label></li>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
</ul>
You can also pass 'choices' to the constructor:
>>> w = RadioSelect(choices=[(1, 1), (2, 2), (3, 3)])
>>> print w.render('num', 2)
<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>
If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
>>> print w.render('num', 2, choices=[(4, 4), (5, 5)])
<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
<li><label><input type="radio" name="num" value="5" /> 5</label></li>
</ul>
RadioSelect uses a RadioFieldRenderer to render the individual radio inputs.
You can manipulate that object directly to customize the way the RadioSelect
is rendered.
>>> w = RadioSelect()
>>> r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
>>> for inp in r:
... print inp
<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
<label><input type="radio" name="beatle" value="P" /> Paul</label>
<label><input type="radio" name="beatle" value="G" /> George</label>
<label><input type="radio" name="beatle" value="R" /> Ringo</label>
>>> for inp in r:
... print '%s<br />' % inp
<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label><br />
>>> for inp in r:
... print '<p>%s %s</p>' % (inp.tag(), inp.choice_label)
<p><input checked="checked" type="radio" name="beatle" value="J" /> John</p>
<p><input type="radio" name="beatle" value="P" /> Paul</p>
<p><input type="radio" name="beatle" value="G" /> George</p>
<p><input type="radio" name="beatle" value="R" /> Ringo</p>
>>> for inp in r:
... print '%s %s %s %s %s' % (inp.name, inp.value, inp.choice_value, inp.choice_label, inp.is_checked())
beatle J J John True
beatle J P Paul False
beatle J G George False
beatle J R Ringo False
You can create your own custom renderers for RadioSelect to use.
>>> class MyRenderer(RadioFieldRenderer):
... def render(self):
... return u'<br />\n'.join([unicode(choice) for choice in self])
>>> w = RadioSelect(renderer=MyRenderer)
>>> print w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>
Or you can use custom RadioSelect fields that use your custom renderer.
>>> class CustomRadioSelect(RadioSelect):
... renderer = MyRenderer
>>> w = CustomRadioSelect()
>>> print w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>
A RadioFieldRenderer object also allows index access to individual RadioInput
objects.
>>> w = RadioSelect()
>>> r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
>>> print r[1]
<label><input type="radio" name="beatle" value="P" /> Paul</label>
>>> print r[0]
<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
>>> r[0].is_checked()
True
>>> r[1].is_checked()
False
>>> r[1].name, r[1].value, r[1].choice_value, r[1].choice_label
('beatle', u'J', u'P', u'Paul')
>>> r[10] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: list index out of range
# Choices are escaped correctly
>>> w = RadioSelect()
>>> print w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me'))))
<ul>
<li><label><input type="radio" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="radio" name="escape" value="good" /> you > me</label></li>
</ul>
# Unicode choices are correctly rendered as HTML
>>> w = RadioSelect()
>>> unicode(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]))
u'<ul>\n<li><label><input checked="checked" type="radio" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="radio" name="email" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>'
# Attributes provided at instantiation are passed to the constituent inputs
>>> w = RadioSelect(attrs={'id':'foo'})
>>> print w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label for="foo_0"><input checked="checked" type="radio" id="foo_0" value="J" name="beatle" /> John</label></li>
<li><label for="foo_1"><input type="radio" id="foo_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="foo_2"><input type="radio" id="foo_2" value="G" name="beatle" /> George</label></li>
<li><label for="foo_3"><input type="radio" id="foo_3" value="R" name="beatle" /> Ringo</label></li>
</ul>
# Attributes provided at render-time are passed to the constituent inputs
>>> w = RadioSelect()
>>> print w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')), attrs={'id':'bar'})
<ul>
<li><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></li>
<li><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></li>
<li><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></li>
</ul>
# CheckboxSelectMultiple Widget ###############################################
>>> w = CheckboxSelectMultiple()
>>> print w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
>>> print w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
>>> print w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
If the value is None, none of the options are selected:
>>> print w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
If the value corresponds to a label (but not to an option value), none of the options are selected:
>>> print w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
If multiple values are given, but some of them are not valid, the valid ones are selected:
>>> print w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>
The value is compared to its str():
>>> print w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')])
<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>
>>> print w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)])
<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>
>>> print w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)])
<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>
The 'choices' argument can be any iterable:
>>> def get_choices():
... for i in range(5):
... yield (i, i)
>>> print w.render('nums', [2], choices=get_choices())
<ul>
<li><label><input type="checkbox" name="nums" value="0" /> 0</label></li>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
</ul>
You can also pass 'choices' to the constructor:
>>> w = CheckboxSelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
>>> print w.render('nums', [2])
<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>
If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
>>> print w.render('nums', [2], choices=[(4, 4), (5, 5)])
<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
<li><label><input type="checkbox" name="nums" value="5" /> 5</label></li>
</ul>
# Choices are escaped correctly
>>> print w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me'))))
<ul>
<li><label><input type="checkbox" name="escape" value="1" /> 1</label></li>
<li><label><input type="checkbox" name="escape" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="escape" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="checkbox" name="escape" value="good" /> you > me</label></li>
</ul>
# Test the usage of _has_changed
>>> w._has_changed(None, None)
False
>>> w._has_changed([], None)
False
>>> w._has_changed(None, [u'1'])
True
>>> w._has_changed([1, 2], [u'1', u'2'])
False
>>> w._has_changed([1, 2], [u'1'])
True
>>> w._has_changed([1, 2], [u'1', u'3'])
True
# Unicode choices are correctly rendered as HTML
>>> w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])
u'<ul>\n<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>\n<li><label><input type="checkbox" name="nums" value="2" /> 2</label></li>\n<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>\n<li><label><input checked="checked" type="checkbox" name="nums" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="checkbox" name="nums" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>'
# Each input gets a separate ID
>>> print CheckboxSelectMultiple().render('letters', list('ac'), choices=zip(list('abc'), list('ABC')), attrs={'id': 'abc'})
<ul>
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>
# MultiWidget #################################################################
>>> class MyMultiWidget(MultiWidget):
... def decompress(self, value):
... if value:
... return value.split('__')
... return ['', '']
... def format_output(self, rendered_widgets):
... return u'<br />'.join(rendered_widgets)
>>> w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})))
>>> w.render('name', ['john', 'lennon'])
u'<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />'
>>> w.render('name', 'john__lennon')
u'<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />'
>>> w.render('name', 'john__lennon', attrs={'id':'foo'})
u'<input id="foo_0" type="text" class="big" value="john" name="name_0" /><br /><input id="foo_1" type="text" class="small" value="lennon" name="name_1" />'
>>> w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})), attrs={'id': 'bar'})
>>> w.render('name', ['john', 'lennon'])
u'<input id="bar_0" type="text" class="big" value="john" name="name_0" /><br /><input id="bar_1" type="text" class="small" value="lennon" name="name_1" />'
>>> w = MyMultiWidget(widgets=(TextInput(), TextInput()))
# test with no initial data
>>> w._has_changed(None, [u'john', u'lennon'])
True
# test when the data is the same as initial
>>> w._has_changed(u'john__lennon', [u'john', u'lennon'])
False
# test when the first widget's data has changed
>>> w._has_changed(u'john__lennon', [u'alfred', u'lennon'])
True
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
>>> w._has_changed(u'john__lennon', [u'john', u'denver'])
True
# SplitDateTimeWidget #########################################################
>>> w = SplitDateTimeWidget()
>>> w.render('date', '')
u'<input type="text" name="date_0" /><input type="text" name="date_1" />'
>>> w.render('date', None)
u'<input type="text" name="date_0" /><input type="text" name="date_1" />'
>>> w.render('date', datetime.datetime(2006, 1, 10, 7, 30))
u'<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />'
>>> w.render('date', [datetime.date(2006, 1, 10), datetime.time(7, 30)])
u'<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />'
You can also pass 'attrs' to the constructor. In this case, the attrs will be
included on both widgets.
>>> w = SplitDateTimeWidget(attrs={'class': 'pretty'})
>>> w.render('date', datetime.datetime(2006, 1, 10, 7, 30))
u'<input type="text" class="pretty" value="2006-01-10" name="date_0" /><input type="text" class="pretty" value="07:30:00" name="date_1" />'
Use 'date_format' and 'time_format' to change the way a value is displayed.
>>> w = SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M')
>>> w.render('date', datetime.datetime(2006, 1, 10, 7, 30))
u'<input type="text" name="date_0" value="10/01/2006" /><input type="text" name="date_1" value="07:30" />'
>>> w._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), [u'2008-05-06', u'12:40:00'])
True
>>> w._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), [u'06/05/2008', u'12:40'])
False
>>> w._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), [u'06/05/2008', u'12:41'])
True
>>> activate('de-at')
>>> settings.USE_L10N = True
>>> w._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), [u'06.05.2008', u'12:41'])
True
>>> deactivate()
>>> settings.USE_L10N = False
# DateTimeInput ###############################################################
>>> w = DateTimeInput()
>>> w.render('date', None)
u'<input type="text" name="date" />'
>>> d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
>>> print d
2007-09-17 12:51:34.482548
The microseconds are trimmed on display, by default.
>>> w.render('date', d)
u'<input type="text" name="date" value="2007-09-17 12:51:34" />'
>>> w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34))
u'<input type="text" name="date" value="2007-09-17 12:51:34" />'
>>> w.render('date', datetime.datetime(2007, 9, 17, 12, 51))
u'<input type="text" name="date" value="2007-09-17 12:51:00" />'
>>> activate('de-at')
>>> settings.USE_L10N = True
>>> w.render('date', d)
u'<input type="text" name="date" value="17.09.2007 12:51:34" />'
>>> deactivate()
>>> settings.USE_L10N = False
Use 'format' to change the way a value is displayed.
>>> w = DateTimeInput(format='%d/%m/%Y %H:%M')
>>> w.render('date', d)
u'<input type="text" name="date" value="17/09/2007 12:51" />'
>>> w._has_changed(d, '17/09/2007 12:51')
False
Make sure a custom format works with _has_changed. The hidden input will use
format.localize_input to display the initial value.
>>> data = datetime.datetime(2010, 3, 6, 12, 0, 0)
>>> custom_format = '%d.%m.%Y %H:%M'
>>> w = DateTimeInput(format=custom_format)
>>> w._has_changed(formats.localize_input(data), data.strftime(custom_format))
False
# DateInput ###################################################################
>>> w = DateInput()
>>> w.render('date', None)
u'<input type="text" name="date" />'
>>> d = datetime.date(2007, 9, 17)
>>> print d
2007-09-17
>>> w.render('date', d)
u'<input type="text" name="date" value="2007-09-17" />'
>>> w.render('date', datetime.date(2007, 9, 17))
u'<input type="text" name="date" value="2007-09-17" />'
We should be able to initialize from a unicode value.
>>> w.render('date', u'2007-09-17')
u'<input type="text" name="date" value="2007-09-17" />'
>>> activate('de-at')
>>> settings.USE_L10N = True
>>> w.render('date', d)
u'<input type="text" name="date" value="17.09.2007" />'
>>> deactivate()
>>> settings.USE_L10N = False
Use 'format' to change the way a value is displayed.
>>> w = DateInput(format='%d/%m/%Y')
>>> w.render('date', d)
u'<input type="text" name="date" value="17/09/2007" />'
>>> w._has_changed(d, '17/09/2007')
False
Make sure a custom format works with _has_changed. The hidden input will use
format.localize_input to display the initial value.
>>> data = datetime.date(2010, 3, 6)
>>> custom_format = '%d.%m.%Y'
>>> w = DateInput(format=custom_format)
>>> w._has_changed(formats.localize_input(data), data.strftime(custom_format))
False
# TimeInput ###################################################################
>>> w = TimeInput()
>>> w.render('time', None)
u'<input type="text" name="time" />'
>>> t = datetime.time(12, 51, 34, 482548)
>>> print t
12:51:34.482548
The microseconds are trimmed on display, by default.
>>> w.render('time', t)
u'<input type="text" name="time" value="12:51:34" />'
>>> w.render('time', datetime.time(12, 51, 34))
u'<input type="text" name="time" value="12:51:34" />'
>>> w.render('time', datetime.time(12, 51))
u'<input type="text" name="time" value="12:51:00" />'
We should be able to initialize from a unicode value.
>>> w.render('time', u'13:12:11')
u'<input type="text" name="time" value="13:12:11" />'
>>> activate('de-at')
>>> settings.USE_L10N = True
>>> w.render('date', d)
u'<input type="text" name="date" value="17.09.2007" />'
>>> deactivate()
>>> settings.USE_L10N = False
Use 'format' to change the way a value is displayed.
>>> w = TimeInput(format='%H:%M')
>>> w.render('time', t)
u'<input type="text" name="time" value="12:51" />'
>>> w._has_changed(t, '12:51')
False
Make sure a custom format works with _has_changed. The hidden input will use
format.localize_input to display the initial value.
>>> data = datetime.time(13, 0)
>>> custom_format = '%I:%M %p'
>>> w = TimeInput(format=custom_format)
>>> w._has_changed(formats.localize_input(data), data.strftime(custom_format))
False
# SplitHiddenDateTimeWidget ###################################################
>>> from django.forms.widgets import SplitHiddenDateTimeWidget
>>> w = SplitHiddenDateTimeWidget()
>>> w.render('date', '')
u'<input type="hidden" name="date_0" /><input type="hidden" name="date_1" />'
>>> d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
>>> print d
2007-09-17 12:51:34.482548
>>> w.render('date', d)
u'<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />'
>>> w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34))
u'<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />'
>>> w.render('date', datetime.datetime(2007, 9, 17, 12, 51))
u'<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:00" />'
>>> activate('de-at')
>>> settings.USE_L10N = True
>>> w.render('date', datetime.datetime(2007, 9, 17, 12, 51))
u'<input type="hidden" name="date_0" value="17.09.2007" /><input type="hidden" name="date_1" value="12:51:00" />'
>>> deactivate()
>>> settings.USE_L10N = False
"""
from django.utils import copycompat as copy
from unittest import TestCase
from django import forms
class SelectAndTextWidget(forms.MultiWidget):
"""
MultiWidget subclass
"""
def __init__(self, choices=[]):
widgets = [
forms.RadioSelect(choices=choices),
forms.TextInput
]
super(SelectAndTextWidget, self).__init__(widgets)
def _set_choices(self, choices):
"""
When choices are set for this widget, we want to pass those along to the Select widget
"""
self.widgets[0].choices = choices
def _get_choices(self):
"""
The choices for this widget are the Select widget's choices
"""
return self.widgets[0].choices
choices = property(_get_choices, _set_choices)
class WidgetTests(TestCase):
def test_12048(self):
# See ticket #12048.
w1 = SelectAndTextWidget(choices=[1,2,3])
w2 = copy.deepcopy(w1)
w2.choices = [4,5,6]
# w2 ought to be independent of w1, since MultiWidget ought
# to make a copy of its sub-widgets when it is copied.
self.assertEqual(w1.choices, [1,2,3])
|
{
"content_hash": "222defdd0a6d982a43f1667bc3d5dd93",
"timestamp": "",
"source": "github",
"line_count": 1310,
"max_line_length": 553,
"avg_line_length": 43.264885496183204,
"alnum_prop": 0.6282619051819962,
"repo_name": "t11e/django",
"id": "cc83a888cf12ba2d2276730a66a80cc90d4dac67",
"size": "56877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/forms/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "164056"
},
{
"name": "Python",
"bytes": "5708871"
},
{
"name": "Shell",
"bytes": "3459"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import requests
import time
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class DruidHook(BaseHook):
"""
Connection to Druid
:param druid_ingest_conn_id: The connection id to the Druid overlord machine which accepts index jobs
:type druid_ingest_conn_id: string
:param timeout: The interval between polling the Druid job for the status of the ingestion job
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=18000):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
schema = conn.extra_dejson.get('schema', 'http')
endpoint = conn.extra_dejson.get('endpoint', '')
return "http://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
req_index = requests.post(url, data=json_index_spec, headers=self.header)
if (req_index.status_code != 200):
raise AirflowException("Did not get 200 when submitting the Druid job to {}".format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
sec = sec + 1
if sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than %s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
|
{
"content_hash": "c58a27569119ab0053fa141fd08bff71",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 108,
"avg_line_length": 36.3974358974359,
"alnum_prop": 0.6114829165199014,
"repo_name": "cjqian/incubator-airflow",
"id": "655f66688d16bc4cce799ecd1bd2b9ce720c9c13",
"size": "3406",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "airflow/hooks/druid_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152247"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2609609"
},
{
"name": "Shell",
"bytes": "21139"
}
],
"symlink_target": ""
}
|
from datetime import date, datetime
from typing import Dict, List, Set, Tuple, Union
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
from robot.api.deco import not_keyword
not_keyword(TypedDict)
class Number:
pass
def string_to_int(value: str) -> int:
try:
return ['zero', 'one', 'two', 'three', 'four'].index(value.lower())
except ValueError:
raise ValueError(f"Don't know number {value!r}.")
def parse_bool(value: Union[str, int, bool]):
if isinstance(value, str):
value = value.lower()
return value not in ['false', '', 'epätosi', '\u2639', False, 0]
class UsDate(date):
@classmethod
def from_string(cls, value) -> date:
if not isinstance(value, str):
raise TypeError("Only strings accepted!")
try:
return cls.fromordinal(datetime.strptime(value, '%m/%d/%Y').toordinal())
except ValueError:
raise ValueError("Value does not match '%m/%d/%Y'.")
class FiDate(date):
@classmethod
def from_string(cls, value: str, ign1=None, *ign2, ign3=None, **ign4):
try:
return cls.fromordinal(datetime.strptime(value, '%d.%m.%Y').toordinal())
except ValueError:
raise RuntimeError("Value does not match '%d.%m.%Y'.")
class ClassAsConverter:
def __init__(self, name):
self.greeting = f'Hello, {name}!'
class ClassWithHintsAsConverter:
name: str
def __init__(self, value: Union[int, str]):
self.value = value
class AcceptSubscriptedGenerics:
def __init__(self, numbers: List[int]):
self.sum = sum(numbers)
class Strict:
pass
class Invalid:
pass
class TooFewArgs:
pass
class TooManyArgs:
def __init__(self, one, two):
pass
class NoPositionalArg:
def __init__(self, *varargs):
pass
class KwOnlyNotOk:
def __init__(self, arg, *, kwo, another):
pass
ROBOT_LIBRARY_CONVERTERS = {Number: string_to_int,
bool: parse_bool,
UsDate: UsDate.from_string,
FiDate: FiDate.from_string,
ClassAsConverter: ClassAsConverter,
ClassWithHintsAsConverter: ClassWithHintsAsConverter,
AcceptSubscriptedGenerics: AcceptSubscriptedGenerics,
Strict: None,
Invalid: 666,
TooFewArgs: TooFewArgs,
TooManyArgs: TooManyArgs,
NoPositionalArg: NoPositionalArg,
KwOnlyNotOk: KwOnlyNotOk,
'Bad': int}
def number(argument: Number, expected: int = 0):
if argument != expected:
raise AssertionError(f'Expected value to be {expected!r}, got {argument!r}.')
def true(argument: bool):
assert argument is True
def false(argument: bool):
assert argument is False
def us_date(argument: UsDate, expected: date = None):
assert argument == expected
def fi_date(argument: FiDate, expected: date = None):
assert argument == expected
def dates(us: 'UsDate', fi: 'FiDate'):
assert us == fi
def class_as_converter(argument: ClassAsConverter, expected):
assert argument.greeting == expected
def class_with_hints_as_converter(argument: ClassWithHintsAsConverter, expected=None):
assert argument.value == expected
def accept_subscripted_generics(argument: AcceptSubscriptedGenerics, expected):
assert argument.sum == expected
def with_generics(a: List[Number], b: Tuple[FiDate, UsDate], c: Dict[Number, FiDate], d: Set[Number]):
expected_date = date(2022, 9, 28)
assert a == [1, 2, 3], a
assert b == (expected_date, expected_date), b
assert c == {1: expected_date}, c
assert d == {1, 2, 3}, d
def typeddict(dates: TypedDict('Dates', {'fi': FiDate, 'us': UsDate})):
fi, us = dates['fi'], dates['us']
exp = date(2022, 9, 29)
assert isinstance(fi, FiDate) and isinstance(us, UsDate) and fi == us == exp
def number_or_int(number: Union[Number, int]):
assert number == 1
def int_or_number(number: Union[int, Number]):
assert number == 1
def strict(argument: Strict):
assert isinstance(argument, Strict)
def invalid(a: Invalid, b: TooFewArgs, c: TooManyArgs, d: KwOnlyNotOk):
assert (a, b, c, d) == ('a', 'b', 'c', 'd')
def non_type_annotation(arg1: 'Hello, world!', arg2: 2 = 2):
assert arg1 == arg2
|
{
"content_hash": "503ed42ce6d3d48181684356ee5a56af",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 102,
"avg_line_length": 25.424581005586592,
"alnum_prop": 0.6060206548011426,
"repo_name": "robotframework/robotframework",
"id": "3102d98cf2909fc7da71afca9856a6e34c7a2f40",
"size": "4552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atest/testdata/keywords/type_conversion/CustomConverters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44632"
},
{
"name": "HTML",
"bytes": "86871"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2764220"
},
{
"name": "RobotFramework",
"bytes": "1260097"
}
],
"symlink_target": ""
}
|
import hues
from django.core.management.base import BaseCommand
from elasticsearch_dsl.connections import connections
from elasticsearch_flex.indexes import registered_indices
class Command(BaseCommand):
help = 'Sync search indices, templates, and scripts.'
def add_arguments(self, parser):
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete existing index',
)
def handle(self, delete, *args, **options):
indices = registered_indices()
connection = connections.get_connection()
hues.info('Using connection', connection)
if len(indices):
hues.info('Discovered', len(indices), 'Indexes')
else:
hues.warn('No search index found')
for i, index in enumerate(indices, 1):
hues.info('==> Initializing', index.__name__)
with index().ensure_closed_and_reopened() as ix:
if delete:
hues.warn('Deleting existing index.')
ix.delete_index()
ix.init()
hues.success('--> Done {0}/{1}'.format(i, len(indices)))
|
{
"content_hash": "b4478f48c55861a31661c5e1e2dee3b0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 68,
"avg_line_length": 32.026315789473685,
"alnum_prop": 0.580115036976171,
"repo_name": "prashnts/dj-elasticsearch-flex",
"id": "44321cff440771f28fe8c2f692ea7f141cb78cc9",
"size": "1233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elasticsearch_flex/management/commands/flex_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2887"
},
{
"name": "Makefile",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "30547"
}
],
"symlink_target": ""
}
|
import logging
import time
import numpy as np
from basil.dut import Dut
chip = Dut("bdaq53_eth.yaml")
chip.init()
chip['CONTROL']['EN'] = 0
chip['CONTROL'].write()
logging.info("Starting data test ...")
chip['CONTROL']['EN'] = 1
chip['CONTROL'].write()
start = 0
for i in range(10):
time.sleep(1)
fifo_data = chip['FIFO'].get_data()
data_size = len(fifo_data)
data_gen = np.linspace(start, data_size - 1 + start, data_size, dtype=np.int32)
comp = (fifo_data == data_gen)
logging.info("%s: %.2f Mbits checked. OK?: %s" % (i, float(32 * data_size) / pow(10, 6), comp.all()))
start += data_size
chip['CONTROL']['EN'] = 0 # stop data source
chip['CONTROL'].write()
logging.info("Starting speed test ...")
testduration = 10
total_len = 0
tick = 0
tick_old = 0
start_time = time.time()
chip['CONTROL']['EN'] = 1
chip['CONTROL'].write()
while time.time() - start_time < testduration:
data = chip['FIFO'].get_data()
total_len += len(data) * 4 * 8
time.sleep(0.01)
tick = int(time.time() - start_time)
if tick != tick_old:
logging.info("Time: %f s" % (time.time() - start_time))
tick_old = tick
chip['CONTROL']['EN'] = 0x0 # stop data source
chip['CONTROL'].write()
logging.info("Bytes received: %s, average data rate: %s Mbit/s" % (total_len, round((total_len / 1e6 / testduration), 2)))
|
{
"content_hash": "90e530169d6122fa014d738dc3f3ed36",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 122,
"avg_line_length": 23.11864406779661,
"alnum_prop": 0.6151026392961877,
"repo_name": "SiLab-Bonn/basil",
"id": "340ec901b2cb71ddbd1ef43bbfae064469822615",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/bdaq/bdaq53_eth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "17821"
},
{
"name": "Python",
"bytes": "502781"
},
{
"name": "SystemVerilog",
"bytes": "2358"
},
{
"name": "Verilog",
"bytes": "428771"
}
],
"symlink_target": ""
}
|
import os
import sys
import json
import traceback
import random
import math
import matplotlib
import matplotlib.pyplot as plt
import pprint
colors = {'red': '#cd7058', 'blue': '#599ad3', 'orange': '#f9a65a', 'green': '#66cc66', 'black': '#000000', 'purple': '#990066'}
numbering_subplots = ['a', 'b', 'c', 'd', 'e', 'f']
def compute_average(datapoints, has_shift):
if len(datapoints) == 0:
return 0, 0, 0
num_freq = 0
sum_metric = 0
if not has_shift:
minimum = 0
else:
minimum = None
for key, value in datapoints.iteritems():
occurrence = float(key)
frequency = float(value)
num_freq += frequency
sum_metric += frequency * occurrence
if has_shift:
if minimum is None or occurrence < minimum:
minimum = occurrence
if num_freq <= 1:
return 0, 0, 0
mean = float(sum_metric) / float(num_freq)
sum_metric_squared = 0
for key, value in datapoints.iteritems():
occurrence = float(key)
frequency = float(value)
sum_metric_squared += frequency * (occurrence - mean) * (occurrence - mean)
variance = float(sum_metric_squared) / float(num_freq - 1)
standard_deviation = math.sqrt(variance)
return mean - minimum, variance, standard_deviation
def compute_median(datapoints, has_shift):
# TODO: very inefficient, could optimize this method
if len(datapoints) == 0:
return 0, 0, 0
values = []
minimum = None
for key, value in datapoints.iteritems():
occurrence = float(key)
frequency = float(value)
for i in range(int(frequency)):
values.append(occurrence)
if has_shift:
if minimum is None or occurrence < minimum:
minimum = occurrence
if not has_shift: minimum = 0
values = sorted(values)
median = values[len(values) / 2] - minimum
perc95 = values[int(float(len(values)) * .95)] - minimum
maximum = values[-1] - minimum
return median, perc95, maximum
def aggregate_datapoints(dirpath_data, testcases, algorithms, shifts):
print testcases, algorithms, shifts
aggregate = {}
for dirname, dirnames, filenames in os.walk(dirpath_data):
for filename in filenames:
basename, ext = os.path.splitext(filename)
if ext.lower() != '.json': continue
if '50000000' in filename: continue
if testcases != 'all' and not any(filename.startswith(testcase) for testcase in testcases.split(',')):
print 'skipping ' + filename
continue
if algorithms != 'all' and not any(algorithm in filename for algorithm in algorithms.split(',')):
print 'skipping ' + filename
continue
try:
filepath = os.path.join(dirname, filename)
print "Reading file [%s]" % (filepath,)
f = open(filepath, 'r')
text = f.read()
data_items = json.loads(text)
f.close()
has_shift = shifts and any(shift in filename for shift in shifts.split(','))
if not isinstance(data_items, list):
data_items = [data_items]
for data in data_items:
average, variance, stddev = compute_average(data['datapoints'], has_shift)
median, perc95, maximum = compute_median(data['datapoints'], has_shift)
ia = data['algorithm']
im = data['metric']
ib = data['parameters_hashmap_string']
ia = '%s-%s' % (ia, ib)
ii = data['instance']
ic = data['cycle']
it = data['testcase']
ip = data['parameters_testcase_string']
if '75' in ip:
print "before", ip
ip = ip.replace('lfm0.75', 'lfm0.80')
print "after", ip
it = '%s-%s' % (it, ip)
if im not in aggregate:
aggregate[im] = {}
if it not in aggregate[im]:
aggregate[im][it] = {}
if ia not in aggregate[im][it]:
aggregate[im][it][ia] = {}
if ic not in aggregate[im][it][ia]:
aggregate[im][it][ia][ic] = {}
for m in ['mean', 'median', 'perc95', 'standard_deviation', 'variance', 'maximum']:
if m not in aggregate[im][it][ia][ic]:
aggregate[im][it][ia][ic][m] = []
aggregate[im][it][ia][ic]['mean'].append(average)
aggregate[im][it][ia][ic]['standard_deviation'].append(stddev)
aggregate[im][it][ia][ic]['variance'].append(variance)
aggregate[im][it][ia][ic]['median'].append(median)
aggregate[im][it][ia][ic]['perc95'].append(perc95)
aggregate[im][it][ia][ic]['maximum'].append(maximum)
except:
print 'Crashed at file: [%s/%s]' % (dirname, filename)
print traceback.print_exc()
sys.exit(1)
return aggregate
def randomized_paired_sample_t_test(reference, candidate, details):
num_items = len(reference)
random.seed(None)
population = []
print 'ref cand', reference, candidate
diff = []
for i in range(num_items):
diff.append(reference[i] - candidate[i])
num_population = 10240
for k in range(num_population):
diff_new = []
for i in range(num_items):
sign = -1 if random.random() < 0.5 else 1
diff_new.append(diff[i] * sign)
mean_new = float(sum(diff_new)) / float(num_items)
population.append(mean_new)
count_passed = 0
mean = sum(diff) / num_items
population = sorted(population)
for mean_current in population:
if (mean > 0 and mean <= mean_current) or (mean < 0 and mean < mean_currrent):
break
count_passed += 1
if mean > 0:
count_passed = num_population - count_passed
if False and details:
print "*" * 64
print "*" * 64
print "details"
print "population", population[0], population[1], population[-2], population[-1]
print "mean", mean
print "count_passed: %f" % (float(count_passed),)
print "num_pop %f" % (float(num_population), )
p_value = float(count_passed) / float(num_population)
print "passed: %f" % (p_value,)
return p_value
def add_curve_to_plot(ax, aggregates, im, it, index_testcase, statistic, algorithms_ordering, filters, numbering_subplot, includes):
names = []
lines = []
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
algorithms = [None] * 5
for ia in aggregates[im][it].keys():
for pattern in algorithms_ordering.keys():
if pattern in ia:
order = algorithms_ordering[pattern]['order']
algorithms[order] = ia
for ia in algorithms:
if ia is None: continue
print "Generating curve for: stats:%s | metric:%s | testcase:%s | algorithm:%s" % (statistic, im, it, ia)
xs = []
ys = []
for cycle, stats in sorted(aggregates[im][it][ia].items()):
if 'loading' in it:
xs.append((cycle * 2.0) / 100.0)
else:
xs.append(cycle)
ys.append(sum(stats[statistic]) / len(stats[statistic]))
name = '[ERROR: unknown algorithm]'
color = '#000000'
linewidth = 3
zorder = 1
for k, v in filters.iteritems():
if k in ia:
name = filters[k]['name']
color = filters[k]['color']
linewidth = filters[k]['linewidth']
style = '-'
zorder = filters[k]['zorder']
break
if not any(pattern in ia for pattern in includes):
continue
line_current, = ax.plot(xs, ys, style, color=color, linewidth=linewidth, zorder=zorder)
names.append(name)
lines.append(line_current)
if 'loading' in it:
ax.set_xlabel('(%s) Load factor' % numbering_subplot)
else:
ax.set_xlabel('(%s) Iterations' % numbering_subplot)
if statistic == 'mean':
ax.set_ylabel('Mean %s' % im)
if True or 'loading' not in it:
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,100))
elif statistic == 'variance':
ax.set_ylabel('Variance of %s' % im)
if True or 'loading' not in it:
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,600))
elif statistic == 'standard_deviation':
ax.set_ylabel('Standard deviation of %s' % im)
elif statistic == 'median':
ax.set_ylabel('Median of %s' % im)
if True or 'loading' not in it:
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,100))
elif statistic == 'perc95':
ax.set_ylabel('95th percentile of %s' % im)
if True or 'loading' not in it:
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,100))
elif statistic == 'maximum':
ax.set_ylabel('Maximum %s' % im)
if True or 'loading' not in it:
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,180))
plt.title('Test case: %s' % (it.strip('-')))
ax.grid(True)
if any(metric in im for metric in ['blocks', 'aligned']) and statistic != 'variance':
labels=['16 B', '32 B', '64 B', '128 B', '256 B', '512 B', '1 KB', '2 KB', '4 KB', '8 KB', '16 KB', '32 KB', '64 KB', '128 KB']
plt.axis((x1,x2,4,4+len(labels)))
ax.set_yticks(range(4,4+len(labels)))
ax.set_yticklabels(labels)
plt.legend(lines, names).set_visible(False)
return names, lines
def plot_algorithms(aggregates):
for index_stat, statistic in enumerate(['mean', 'median', 'perc95', 'maximum', 'variance']):
for index_metric, im in enumerate(aggregates.keys()):
fig = plt.figure((index_stat+1) * 10000 + (index_metric+1) * 100 + 1)
legend = None
for index_testcase, it in enumerate(sorted(aggregates[im].keys())):
ax = fig.add_subplot(2, 2, index_testcase+1)
lines = []
names = []
names_temp, lines_temp = add_curve_to_plot(
ax=ax,
aggregates=aggregates,
im=im,
it=it,
index_testcase=index_testcase,
statistic=statistic,
algorithms_ordering = {
'linear': {'order': 0},
'backshift': {'order': 1},
'tombstone': {'order': 2},
'shadow': {'order': 3},
'bitmap': {'order': 4},
},
filters = {
'linear': { 'color': colors['blue'], 'name': 'Linear probing', 'linewidth': 8, 'zorder': 1 },
'backshift': { 'color': colors['orange'], 'name': 'Robin Hood (backward shift)', 'linewidth': 6, 'zorder': 2 },
'tombstone': { 'color': colors['red'], 'name': 'Robin Hood (tombstone)', 'linewidth': 4.5, 'zorder': 3 },
'shadow': { 'color': colors['green'], 'name': 'Hopscotch (shadow)', 'linewidth': 3, 'zorder': 4 },
'bitmap': { 'color': colors['black'], 'name': 'Hopscotch (bitmap)', 'linewidth': 1.75, 'zorder': 5 },
},
numbering_subplot=numbering_subplots[index_testcase],
includes=['10000-'],
)
names.extend(names_temp)
lines.extend(lines_temp)
legend = plt.legend(lines, names, prop={'size':12}, bbox_to_anchor=(0.2, -0.3))
if not os.path.isdir('plots/algorithms'):
os.mkdir('plots/algorithms')
fig.set_size_inches(10, 7.5)
plt.tight_layout()
plt.savefig('plots/algorithms/%s_%s.png' % (im.lower(), statistic), dpi=72, bbox_extra_artists=(legend,), bbox_inches='tight')
def plot_robinhood(aggregates):
for index_metric, im in enumerate(aggregates.keys()):
fig = plt.figure((index_metric+1) * 100 + 1)
for index_stat, statistic in enumerate(['mean', 'median', 'perc95', 'maximum', 'variance']):
ax = fig.add_subplot(3, 2, index_stat+1)
lines = []
names = []
for index_testcase, it in enumerate(sorted(aggregates[im].keys())):
names_temp, lines_temp = add_curve_to_plot(
ax=ax,
aggregates=aggregates,
im=im,
it=it,
index_testcase=index_testcase,
statistic=statistic,
algorithms_ordering = {
'10000-': {'order': 0},
'100000-': {'order': 1},
'1000000-': {'order': 2},
'10000000-': {'order': 3},
'50000000-': {'order': 4},
},
filters = {
'10000-': { 'color': colors['blue'], 'name': 'Robin Hood (backward shift, 10k)', 'linewidth': 8, 'zorder': 1 },
'100000-': { 'color': colors['orange'], 'name': 'Robin Hood (backward shift, 100k)', 'linewidth': 6, 'zorder': 2 },
'1000000-': { 'color': colors['red'], 'name': 'Robin Hood (backward shift, 1M)', 'linewidth': 4.5, 'zorder': 3 },
'10000000-': { 'color': colors['green'], 'name': 'Robin Hood (backward shift, 10M)', 'linewidth': 3, 'zorder': 4 },
'50000000-': { 'color': colors['black'], 'name': 'Robin Hood (backward shift, 50M)', 'linewidth': 1.75, 'zorder': 5 },
'100000000-': { 'color': colors['black'], 'name': 'Robin Hood (backward shift, 100M)', 'linewidth': 1.75, 'zorder': 5 },
},
numbering_subplot=numbering_subplots[index_stat],
includes=['backshift'],
)
names.extend(names_temp)
lines.extend(lines_temp)
legend = plt.legend(lines, names, prop={'size':12}, bbox_to_anchor=(2.10, 0.75))
fig.set_size_inches(10, 11.25)
plt.tight_layout()
if not os.path.isdir('plots/robinhood-backshift'):
os.mkdir('plots/robinhood-backshift')
plt.savefig('plots/robinhood-backshift/%s.png' % (im.lower()), dpi=72, bbox_extra_artists=(legend,), bbox_inches='tight')
if __name__=="__main__":
shifts = ""
if len(sys.argv) == 5:
shifts = sys.argv[4]
agg = aggregate_datapoints(dirpath_data=sys.argv[1],
testcases=sys.argv[2],
algorithms=sys.argv[3],
shifts=shifts)
plot_algorithms(agg)
plot_robinhood(agg)
|
{
"content_hash": "272236a6da89b40018b54b77f1786f0e",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 169,
"avg_line_length": 41.56641604010025,
"alnum_prop": 0.47223394633705157,
"repo_name": "amyvmiwei/hashmap",
"id": "2b9d11e9359d266f38578cc0c998a5c97f219430",
"size": "16651",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "111722"
},
{
"name": "Makefile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "16651"
}
],
"symlink_target": ""
}
|
from waflib.TaskGen import extension
from waflib import Task,Utils
def add_lua(self,node):
tsk=self.create_task('luac',node,node.change_ext('.luac'))
inst_to=getattr(self,'install_path',self.env.LUADIR and'${LUADIR}'or None)
if inst_to:
self.bld.install_files(inst_to,tsk.outputs)
return tsk
class luac(Task.Task):
run_str='${LUAC} -s -o ${TGT} ${SRC}'
color='PINK'
def configure(conf):
conf.find_program('luac',var='LUAC')
extension('.lua')(add_lua)
|
{
"content_hash": "04eb00a5722cd2abaf95800aa7a7ff1d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7130434782608696,
"repo_name": "yagince/text_ux",
"id": "0d48d4f8977badd58e3cc901bd9421b86d27587b",
"size": "605",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "vendor/ux-trie/ux-0.1.9/.waf-1.6.8-3e3391c5f23fbabad81e6d17c63a1b1e/waflib/Tools/lua.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6250"
},
{
"name": "Ruby",
"bytes": "5951"
}
],
"symlink_target": ""
}
|
"""
mlab: a simple scripting interface to Mayavi2 for 3D plotting.
Can be used inside Mayavi2 itself, in "ipython -wthread", or in any
application with the WxWidget mainloop running.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2007-2015, Enthought, Inc.
# License: BSD Style.
# Try forcing the use of wx 2.8 before any other import.
import sys
if not 'wx' in sys.modules:
try:
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit in ('wx', ''):
import wxversion
wxversion.ensureMinimal('2.8')
except ImportError:
""" wxversion not installed """
# Mayavi imports
from mayavi.tools.camera import view, roll, yaw, pitch, move
from mayavi.tools.figure import figure, clf, gcf, savefig, \
draw, sync_camera, close, screenshot
from mayavi.tools.engine_manager import get_engine, show_pipeline, \
options, set_engine
from mayavi.tools.show import show
from mayavi.tools.animator import animate
def show_engine():
""" This function is deprecated, please use show_pipeline.
"""
import warnings
warnings.warn('The show_engine function is deprecated, please use'
'show_pipeline', stacklevel=2)
return show_pipeline()
from .tools.helper_functions import contour3d, test_contour3d, \
quiver3d, test_quiver3d, test_quiver3d_2d_data, \
points3d, test_points3d, test_molecule, \
flow, test_flow, \
imshow, test_imshow, \
surf, test_surf, mesh, test_mesh, test_simple_surf, \
test_mesh_sphere, test_fancy_mesh,\
contour_surf, test_contour_surf, \
plot3d, test_plot3d, \
test_plot3d_anim, test_points3d_anim, test_contour3d_anim,\
test_simple_surf_anim, test_flow_anim, test_mesh_sphere_anim, \
triangular_mesh, test_triangular_mesh, barchart, \
test_barchart, test_mesh_mask_custom_colors
from .tools.decorations import colorbar, scalarbar, vectorbar, \
outline, axes, xlabel, ylabel, zlabel, text, title, \
orientation_axes, text3d
from .tools import pipeline
from .tools.tools import start_recording, stop_recording
if __name__ == "__main__":
import numpy
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi/1000.0
phi = numpy.arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
mu = phi*n_mer
x = numpy.cos(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
y = numpy.sin(mu)*(1+numpy.cos(n_long*mu/n_mer)*0.5)
z = numpy.sin(n_long*mu/n_mer)*0.5
pl = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.05, colormap='Spectral')
colorbar(orientation='vertical')
t = numpy.linspace(0, 4*numpy.pi, 100)
cos = numpy.cos
sin = numpy.sin
x = sin(2*t)
y = cos(t)
z = sin(2*t)
s = sin(t)
pts = points3d(x, y, z, s, colormap="YlGnBu", scale_factor=0.1,
extent=(-0.3,0.3, -0.3, 0.3, -0.2,0.2))
axes(xlabel='X', ylabel='Y', zlabel='Z')
outline(pl)
title('Mayavi rocks', height=0.85)
|
{
"content_hash": "13ecde971d1afa8eb2d3d523c63c7ee6",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 31.082474226804123,
"alnum_prop": 0.6560530679933665,
"repo_name": "dmsurti/mayavi",
"id": "8a01d5fb89716f89aaaf36a2a27997806592096b",
"size": "3015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mayavi/mlab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2494055"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import re
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_property
from pants.contrib.go.subsystems.archive_retriever import ArchiveRetriever
from pants.contrib.go.subsystems.fetch_error import FetchError
from pants.contrib.go.subsystems.fetcher import ArchiveFetcher, CloningFetcher
from pants.contrib.go.subsystems.go_import_meta_tag_reader import GoImportMetaTagReader
logger = logging.getLogger(__name__)
class FetcherFactory(Subsystem):
"""A fetcher that retrieves and unpacks remote libraries from archive files."""
options_scope = 'go-fetchers'
@classmethod
def subsystem_dependencies(cls):
return (super(FetcherFactory, cls).subsystem_dependencies() +
(ArchiveRetriever, GoImportMetaTagReader))
_DEFAULT_MATCHERS = {
# TODO: Add launchpad.net?
r'bitbucket\.org/(?P<user>[^/]+)/(?P<repo>[^/]+)':
ArchiveFetcher.UrlInfo(
url_format='https://bitbucket.org/\g<user>/\g<repo>/get/{rev}.tar.gz',
default_rev='tip',
strip_level=1),
r'github\.com/(?P<user>[^/]+)/(?P<repo>[^/]+)':
ArchiveFetcher.UrlInfo(
url_format='https://github.com/\g<user>/\g<repo>/archive/{rev}.tar.gz',
default_rev='master',
strip_level=1),
}
@classmethod
def register_options(cls, register):
super(FetcherFactory, cls).register_options(register)
register('--disallow-cloning-fetcher', type=bool, default=False, advanced=True,
help="If True, we only fetch archives explicitly matched by --matchers."
"Otherwise we fall back to cloning the remote repos, using Go's standard "
"remote dependency resolution protocol.")
register('--matchers', metavar='<mapping>', type=dict,
default=cls._DEFAULT_MATCHERS, advanced=True,
help="A mapping from a remote import path matching regex to an UrlInfo struct "
"describing how to fetch and unpack an archive of that remote import path. "
"The regex must match the beginning of the remote import path (no '^' anchor is "
"needed, it is assumed) until the first path element that is contained in the "
"archive. (e.g. for 'bazil.org/fuse/fs', which lives in the archive of "
"'bazil.org/fuse', it must match 'bazil.org/fuse'.)\n"
"\n"
"The UrlInfo struct is a 3-tuple with the following slots:\n"
"0. An url format string that is supplied to the regex match\'s `.expand` "
"method and then formatted with the remote import path\'s `rev`, "
"`import_prefix`, and `pkg`.\n"
"1. The default revision string to use when no `rev` is supplied; ie 'HEAD' or "
"'master' for git.\n"
"2. An integer indicating the number of leading path components to strip from "
"files upacked from the archive.")
def get_fetcher(self, import_path):
for matcher, unexpanded_url_info in self._matchers:
# Note that the url_formats are filled in in two stages. We match.expand them here,
# and the ArchiveFetcher applies .format() later, when it knows the rev.
match = matcher.match(import_path)
if match:
expanded_url_info = ArchiveFetcher.UrlInfo(match.expand(unexpanded_url_info.url_format),
unexpanded_url_info.default_rev,
unexpanded_url_info.strip_level)
return ArchiveFetcher(import_path, match.group(0), expanded_url_info,
ArchiveRetriever.global_instance())
if self.get_options().disallow_cloning_fetcher:
raise FetchError('Cannot fetch {}. No archive match, and remote repo cloning '
'disallowed.'.format(import_path))
return CloningFetcher(import_path, GoImportMetaTagReader.global_instance())
@memoized_property
def _matchers(self):
matchers = []
for regex, info in self.get_options().matchers.items():
matcher = re.compile(regex)
unexpanded_url_info = ArchiveFetcher.UrlInfo(*info)
matchers.append((matcher, unexpanded_url_info))
return matchers
|
{
"content_hash": "f39577509052d65a089f85a959a2710c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 99,
"avg_line_length": 49.05494505494506,
"alnum_prop": 0.6366487455197133,
"repo_name": "peiyuwang/pants",
"id": "800ad2d0a75201536a1cfb3e02f51f2fc3184c4d",
"size": "4611",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/subsystems/fetcher_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "463179"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5586816"
},
{
"name": "Rust",
"bytes": "168825"
},
{
"name": "Scala",
"bytes": "79707"
},
{
"name": "Shell",
"bytes": "64292"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
}
|
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from statusmap.settings import ProdConfig
from statusmap.assets import assets
from statusmap.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
)
from statusmap import public, user
def create_app(config_object=ProdConfig):
'''An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
'''
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
|
{
"content_hash": "d324a26b79b59ee8d0922f5a4b4c38af",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 73,
"avg_line_length": 26.745454545454546,
"alnum_prop": 0.6947654656696125,
"repo_name": "evolvedlight/statusmap",
"id": "f434ef75388b1307766ca7556aff4ffc8ee3cd83",
"size": "1495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statusmap/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "8686"
},
{
"name": "JavaScript",
"bytes": "240956"
},
{
"name": "Python",
"bytes": "24287"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('formidable', '0002_remove_access_display'),
]
operations = [
migrations.AlterField(
model_name='item',
name='label',
field=models.TextField(),
),
]
|
{
"content_hash": "254a5ee26be45d3b1fbaa787465c6a09",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 20.4375,
"alnum_prop": 0.5626911314984709,
"repo_name": "novafloss/django-formidable",
"id": "3448e8d49b3df8f1a3a2e6d2f97632681d2077d0",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formidable/migrations/0003_item_label_no_size_limit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3427"
},
{
"name": "Python",
"bytes": "108345"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
class TestBatchComputeEnvironment(BaseTest):
def test_batch_compute_update(self):
session_factory = self.replay_flight_data("test_batch_compute_update")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}, {"state": "ENABLED"}],
"actions": [{"type": "update-environment", "state": "DISABLED"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]["computeEnvironmentName"]]
)[
"computeEnvironments"
]
self.assertEqual(envs[0]["state"], "DISABLED")
def test_batch_compute_delete(self):
session_factory = self.replay_flight_data("test_batch_compute_delete")
p = self.load_policy(
{
"name": "batch-compute",
"resource": "batch-compute",
"filters": [{"computeResources.desiredvCpus": 0}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("batch")
envs = client.describe_compute_environments(
computeEnvironments=[resources[0]['computeEnvironmentName']]
)['computeEnvironments']
self.assertEqual(envs[0]['status'], 'DELETING')
class TestBatchDefinition(BaseTest):
def test_definition_deregister(self):
def_name = 'c7n_batch'
session_factory = self.replay_flight_data(
'test_batch_definition_deregister')
p = self.load_policy({
'name': 'batch-definition',
'resource': 'batch-definition',
'filters': [
{'containerProperties.image': 'amazonlinux'}],
'actions': [{'type': 'deregister'}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['jobDefinitionName'], 'c7n_batch')
client = session_factory(region='us-east-1').client('batch')
defs = client.describe_job_definitions(
jobDefinitionName=def_name)['jobDefinitions']
self.assertEqual(defs[0]['status'], 'INACTIVE')
|
{
"content_hash": "82e4577f512e48ae9cc27853e362ac72",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 88,
"avg_line_length": 39.25,
"alnum_prop": 0.5818658673660547,
"repo_name": "ewbankkit/cloud-custodian",
"id": "29d0312f56991ea19bf054ca21e88ed61e19ad84",
"size": "3254",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/test_batch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "145643"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "4913354"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
}
|
from baseparser import BaseParser
from bs4 import BeautifulSoup
import re
import datetime
DATE_FORMAT = '%A, %B %e %Y, %l:%M %p'
class WashPoParser(BaseParser):
SUFFIX = '?print=true'
domains = ['www.washingtonpost.com']
feeder_pat = '^http://www.washingtonpost.com/.*_story.html'
feeder_pages = ['http://www.washingtonpost.com/']
def _printableurl(self):
return re.sub('_story.html.*', '_print.html', self.url)
def _parse(self, html):
soup = BeautifulSoup(html)
self.meta = soup.findAll('meta')
elt = soup.find('h1', property="dc.title")
if elt is None:
self.real_article = False
return
self.title = elt.getText().strip()
elt = soup.find('h3', property="dc.creator")
if elt is None:
self.byline = ''
else:
self.byline = elt.getText().strip()
elt = soup.find('span', datetitle="published")
if elt is None:
self.date = ''
else:
date = datetime.datetime.fromtimestamp(float(elt['epochtime'])/1000)
self.date = date.strftime(DATE_FORMAT)
div = soup.find('div', id='content')
if div is None:
self.real_article = False
return
self.body = '\n'+'\n\n'.join([x.getText().strip() for x in div.findAll('p')])
|
{
"content_hash": "f3cc6b6c4d8148cf7a95a370dee5c1f7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 30.931818181818183,
"alnum_prop": 0.5657604702424688,
"repo_name": "bjowi/newsdiffs",
"id": "06c7bfa868ca536000c5e303fc4630b0dd40ea83",
"size": "1361",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "parsers/washpo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "2853"
},
{
"name": "HTML",
"bytes": "56882"
},
{
"name": "JavaScript",
"bytes": "192921"
},
{
"name": "Python",
"bytes": "146728"
}
],
"symlink_target": ""
}
|
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
import logging
from config_manager import ConfigManager
try:
from mock import patch
except ImportError:
if sys.version_info < (3, 0, 0):
print("[-] The mock module is needed to create mock objects,"
"\ninstall it from https://pypi.org/project/mock/"
"\nor run `pip install mock`.")
raise
else:
sys.exit("""
The mock module is needed to create mock objects,
install it from https://pypi.org/project/mock/
or run `pip install mock`.
""")
from testfixtures import log_capture
from config_logger import Logger
class LoggerTest(unittest.TestCase):
config0 = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'basic': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'basic',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'sut_logger': {
'level': 'DEBUG',
'handlers': ['console']
},
},
'root': {
'handlers': ['console'],
'level': 'WARNING'
}
}
config1 = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'basic': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'filters': {
'info_special': {
'()': 'config_logger.filters.SameLevelFilter',
'level': 'logging.INFO',
},
'critical_special': {
'()': 'config_logger.filters.LessEqualLevelFilter',
'level': 'logging.CRITICAL',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': 'DEBUG',
'stream': 'ext://sys.stdout'
},
'critical_file_handler': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': 'CRITICAL',
'filters': ['critical_special'],
'stream': 'ext://sys.stdout'
},
'error_file_handler': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': 'ERROR',
'stream': 'ext://sys.stdout'
},
'info_file_handler': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': 'INFO',
'filters': ['info_special'],
'stream': 'ext://sys.stdout'
},
'warning_file_handler': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': 'WARNING',
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'test_error_logger': {
'handlers': ['error_file_handler'],
'level': 'ERROR',
'propagate': False
}
},
'root': {
'handlers': ['console', 'info_file_handler', 'error_file_handler'],
'level': 'INFO'
}
}
config2 = {
'version': 1,
'disable_existing_loggers': False,
'root':
{
'level': 'WARNING'
}
}
config3 = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'basic': {
'format': '%(asctime)s - %(passed_argument_1)s - %(passed_argument_2)s - %(levelname)s - %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'basic',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'sut_logger': {
'level': 'DEBUG',
'handlers': ['console']
},
}
}
@patch.object(ConfigManager, 'load_config')
def test_logger_constructor(self, mocked_config):
# Given
mocked_config.return_value = None
name = 'test_error_logger'
# When
logger = Logger(name=name, default_conf=self.config1)
# Then
self.assertEqual(logger.logger.name, 'test_error_logger')
self.assertEqual(logging.getLevelName(logger.logger.level), 'ERROR')
self.assertEqual(logging.getLevelName(logger.logger.root.level), 'INFO')
@patch.object(ConfigManager, 'load_config')
def test_logger_constructor_logging_level(self, mocked_config):
# Given
mocked_config.return_value = None
name = 'root_logger'
default_level = logging.ERROR
# When
logger = Logger(name=name, default_level=default_level, default_conf=self.config2)
# Then
self.assertEqual(logger.logger.name, 'root_logger')
self.assertEqual(logging.getLevelName(logger.logger.level), 'NOTSET')
@patch.object(ConfigManager, 'load_config')
def test_logger_constructor_logging_level_root_and_logger(self, mocked_config):
# Given
mocked_config.return_value = None
name = 'sut_logger'
# When
logger = Logger(name=name, default_conf=self.config0)
# Then
self.assertEqual(logger.logger.name, 'sut_logger')
self.assertEqual(logging.getLevelName(logger.logger.level), 'DEBUG')
self.assertEqual(logging.getLevelName(logger.logger.root.level), 'WARNING')
def test_logger_constructor_logging_level_when_incorrect_conf_file(self):
# Given
name = 'sut_logger'
cfg_path = 'wrong/path.to/log_conf.yaml'
# When
logger = Logger(name=name, cfg_path=cfg_path)
# Then
self.assertEqual(logger.logger.name, 'sut_logger')
self.assertEqual(logging.getLevelName(logger.logger.level), 'INFO')
def test_logger_logging_level_and_root_level_when_ino_conf_file_specified(self):
# Given
name = 'sut_logger'
# When
logger = Logger(name=name, default_conf=self.config0)
# Then
self.assertEqual(logger.logger.name, 'sut_logger')
self.assertEqual(logging.getLevelName(logger.logger.level), 'DEBUG')
self.assertEqual(logging.getLevelName(logger.logger.root.level), 'WARNING')
@patch.object(ConfigManager, 'load_config')
@log_capture('sut_logger')
def test_debug_logger(self, l, mocked_config):
# Given
mocked_config.return_value = None
logger = Logger(name='sut_logger', default_conf=self.config1)
# When
logger.debug('a debug message')
# Then
l.check(('sut_logger', 'DEBUG', 'a debug message'))
@patch.object(ConfigManager, 'load_config')
@log_capture('root_logger')
def test_info_logger(self, l, mocked_config):
# Given
mocked_config.return_value = None
logger = Logger(name='root_logger', default_conf=self.config1)
# When
logger.info('a message')
# Then
l.check(('root_logger', 'INFO', 'a message'))
@patch.object(ConfigManager, 'load_config')
@log_capture('root_logger')
def test_warning_logger(self, l, mocked_config):
# Given
mocked_config.return_value = None
logger = Logger(name='root_logger', default_conf=self.config1)
# When
logger.warning('a warning')
# Then
l.check(('root_logger', 'WARNING', 'a warning'))
@patch.object(ConfigManager, 'load_config')
@log_capture('root_logger')
def test_error_logger(self, l, mocked_config):
# Given
mocked_config.return_value = None
logger = Logger(name='root_logger', default_conf=self.config1)
# When
logger.error('an error')
# Then
l.check(('root_logger', 'ERROR', 'an error'))
@patch.object(ConfigManager, 'load_config')
@log_capture('root_logger')
def test_critical_logger(self, l, mocked_config):
# Given
mocked_config.return_value = None
logger = Logger(name='root_logger', default_conf=self.config1)
# When
logger.critical('a critical error')
# Then
l.check(('root_logger', 'CRITICAL', 'a critical error'))
@patch.object(ConfigManager, 'load_config')
def test_extra_parameters_passed(self, mocked_config):
# Given
mocked_config.return_value = None
name = 'sut_logger'
# When
logger = Logger(name=name, default_conf=self.config3, extra={'passed_argument_1': 'A contextual info',
'passed_argument_2': 'Another contextual info'})
# Then
self.assertEqual(logger.logger.logger.handlers[0].formatter._fmt,
'%(asctime)s - %(passed_argument_1)s - %(passed_argument_2)s - %(levelname)s - %(message)s')
self.assertEqual(logger.logger.logger.name, 'sut_logger')
self.assertEqual(logging.getLevelName(logger.logger.logger.level), 'DEBUG')
|
{
"content_hash": "b431c108e12e9c7816f185b60bd79dcd",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 117,
"avg_line_length": 34.09964412811388,
"alnum_prop": 0.5245251513254018,
"repo_name": "afxentios/config-logger",
"id": "814510db5f9fc3169be7ab772a996db7db530b11",
"size": "9582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/logger_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16294"
}
],
"symlink_target": ""
}
|
"""This file sets up the django environment."""
import os
import django
from django.conf import settings
import logging
from grr.lib import config_lib
from grr.lib import registry
class DjangoInit(registry.InitHook):
"""Initialize the Django environment."""
def RunOnce(self):
"""Configure the Django environment."""
if django.VERSION[0] == 1 and django.VERSION[1] < 5:
msg = ("The installed Django version is too old. We need 1.5+. You can "
"install a new version with 'sudo easy_install Django'.")
logging.error(msg)
raise RuntimeError(msg)
base_app_path = os.path.normpath(os.path.dirname(__file__))
# Note that Django settings are immutable once set.
django_settings = {
"DEBUG": config_lib.CONFIG["AdminUI.django_debug"],
"TEMPLATE_DEBUG": config_lib.CONFIG["AdminUI.django_debug"],
"SECRET_KEY": config_lib.CONFIG["AdminUI.django_secret_key"],
# Set to default as we don't supply an HTTPS server.
# "CSRF_COOKIE_SECURE": not FLAGS.django_debug, # Only send over HTTPS.
# Where to find url mappings.
"ROOT_URLCONF": "grr.gui.urls",
"TEMPLATE_DIRS": ("%s/templates" % base_app_path,),
# Don't use the database for sessions, use a file.
"SESSION_ENGINE": "django.contrib.sessions.backends.file",
"ALLOWED_HOSTS": config_lib.CONFIG["AdminUI.django_allowed_hosts"],
"USE_I18N": False,
}
# The below will use conf/global_settings/py from Django, we need to
# override every variable we need to set.
settings.configure(**django_settings)
try:
# This is necessary for Django >= 1.7 but fails for 1.6 and below.
django.setup()
except AttributeError:
pass
if settings.SECRET_KEY == "CHANGE_ME":
msg = "Please change the secret key in the settings module."
logging.error(msg)
class GuiPluginsInit(registry.InitHook):
"""Initialize the GUI plugins once Django is initialized."""
pre = ["DjangoInit"]
def RunOnce(self):
"""Import the plugins once only."""
# pylint: disable=unused-variable,g-import-not-at-top
from grr.gui import gui_plugins
# pylint: enable=unused-variable,g-import-not-at-top
def GetWSGIHandler():
from django.core.handlers import wsgi # pylint: disable=g-import-not-at-top
return wsgi.WSGIHandler()
|
{
"content_hash": "62982936b3acbd1976a6a344c6347586",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.6672297297297297,
"repo_name": "pchaigno/grr",
"id": "c5921f2724772e1e5a65c2f953fe22573060b8c5",
"size": "2390",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gui/django_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "276081"
},
{
"name": "CMake",
"bytes": "3044"
},
{
"name": "CSS",
"bytes": "12677"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "71587"
},
{
"name": "JavaScript",
"bytes": "228300"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "197889"
},
{
"name": "Python",
"bytes": "5172085"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43112"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from pyqtgraph import Vector, SRTTransform3D
class Frame(object):
"""One or more frames of imaging data, including meta information.
Expects *info* to be a dictionary with some minimal information:
* 'frameTransform' maps from the image coordinates (0,0 at top-left) to
the coordinate system of the imaging device.
* 'deviceTransform' maps from the coordiante system of the imaging device to
global coordinates.
"""
def __init__(self, data, info):
object.__init__(self)
self._data = data
self._info = info
## Complete transform maps from image coordinates to global.
if 'transform' not in info:
info['transform'] = SRTTransform3D(self.deviceTransform() * self.frameTransform())
def asarray(self):
"""Assuming this frame object represents multiple frames, return an array with one Frame per frame
"""
return [Frame(frame, self.info().copy()) for frame in self.data()]
def data(self):
"""Return raw imaging data.
"""
return self._data
def info(self):
"""Return the meta info dict for this frame.
"""
return self._info
def getImage(self):
"""Return processed image data.
By default, this method just returns self.data().
"""
return self._data
def deviceTransform(self):
"""Return the transform that maps from imager device coordinates to global."""
return SRTTransform3D(self._info['deviceTransform'])
def frameTransform(self):
"""Return the transform that maps from this frame's image coordinates
to its imager device coordinates. This transform takes into account
the camera's region and binning settings.
"""
return SRTTransform3D(self._info['frameTransform'])
def globalTransform(self):
"""Return the transform that maps this frame's image coordinates
to global coordinates. This is equivalent to (deviceTransform * frameTransform).
"""
return SRTTransform3D(self._info['transform'])
def mapFromFrameToGlobal(self, obj):
"""Map *obj* from the frame's data coordinates to global coordinates.
"""
return self.globalTransform().map(obj)
def saveImage(self, dh, filename):
"""Save this frame data to *filename* inside DirHandle *dh*.
The file name must endwith ".ma" (for MetaArray) or any supported image file extension.
"""
data = self.getImage()
info = self.info()
if filename.endswith('.ma'):
return dh.writeFile(data, filename, info, fileType="MetaArray", autoIncrement=True)
else:
return dh.writeFile(data, filename, info, fileType="ImageFile", autoIncrement=True)
|
{
"content_hash": "43c1a8b22b5cc0cb58980535a5d4f57f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 106,
"avg_line_length": 36.49367088607595,
"alnum_prop": 0.63579604578564,
"repo_name": "campagnola/acq4",
"id": "598f0960411e70bad1e3ced121a2ff5b823e8627",
"size": "2883",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "acq4/util/imaging/frame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "335"
},
{
"name": "C",
"bytes": "1301111"
},
{
"name": "C++",
"bytes": "340035"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "3484434"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
def readImg(imgf, dim=32, augment=True):
'''
This function loads in an image and computes dim reduction.
---
I: imgf=image file path, dim=downsampled image size, use 64
O: vector (dim-reduced) representation of the image
---
For testing this function, we can use:
imgf = 'Data/images_train/100008.jpg'
'''
import numpy as np
import cv2
from scipy.cluster.vq import whiten, kmeans
# Read data file (0 = greyscale, otherwise = rgb)
img = cv2.imread(imgf)
# Scale data by dividing by 255
img = img / float(255)
# Crop images to 200x200
img = img[112:312, 112:312]
# Downsample images to 64x64 (dim x dim)
img = cv2.resize(img, (dim, dim), interpolation=cv2.INTER_CUBIC)
# Data augmentation
if augment:
from augmentImg import augmentImg
img = augmentImg(img, dim)
# Flatten data [each col represents the r/g/b color]
#img = np.reshape(img, (dim*dim,3))
img = np.reshape( img, (dim*dim,img.shape[2]) )
# Whiten data
img = whiten(img)
# Convert images to vector
img = np.reshape(img, np.prod(img.shape))
return img
|
{
"content_hash": "584bb100966eab8c092e1d77afeb5e8b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 27.441860465116278,
"alnum_prop": 0.6203389830508474,
"repo_name": "mattdelhey/kaggle-galaxy",
"id": "3a07cf1c99d23476c05aad719094cdcf4001a864",
"size": "1180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "readImg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62279"
},
{
"name": "R",
"bytes": "1064"
},
{
"name": "TeX",
"bytes": "739"
}
],
"symlink_target": ""
}
|
"""Utilities for collecting objects based on "is" comparison."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.python.util.compat import collections_abc
class _ObjectIdentityWrapper(object):
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
Since __eq__ is based on object identity, it's safe to also define __hash__
based on object ids. This lets us add unhashable types like trackable
_ListWrapper objects to object-identity collections.
"""
__slots__ = ["_wrapped"]
def __init__(self, wrapped):
self._wrapped = wrapped
@property
def unwrapped(self):
return self._wrapped
def _assert_type(self, other):
if not isinstance(other, _ObjectIdentityWrapper):
raise TypeError("Cannot compare wrapped object with unwrapped object")
def __lt__(self, other):
self._assert_type(other)
return id(self._wrapped) < id(other._wrapped) # pylint: disable=protected-access
def __gt__(self, other):
self._assert_type(other)
return id(self._wrapped) > id(other._wrapped) # pylint: disable=protected-access
def __eq__(self, other):
if other is None:
return False
self._assert_type(other)
return self._wrapped is other._wrapped # pylint: disable=protected-access
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Wrapper id() is also fine for weakrefs. In fact, we rely on
# id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
# weakref.ref(a) in _WeakObjectIdentityWrapper.
return id(self._wrapped)
def __repr__(self):
return "<{} wrapping {!r}>".format(type(self).__name__, self._wrapped)
class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
def __init__(self, wrapped):
super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
class Reference(_ObjectIdentityWrapper):
"""Reference that refers an object.
```python
x = [1]
y = [1]
x_ref1 = Reference(x)
x_ref2 = Reference(x)
y_ref2 = Reference(y)
print(x_ref1 == x_ref2)
==> True
print(x_ref1 == y)
==> False
```
"""
# Disabling super class' unwrapped field.
unwrapped = property()
def deref(self):
"""Returns the referenced object.
```python
x_ref = Reference(x)
print(x is x_ref.deref())
==> True
```
"""
return self._wrapped
class ObjectIdentityDictionary(collections_abc.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
def __repr__(self):
return "ObjectIdentityDictionary(%s)" % repr(self._storage)
class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
class ObjectIdentitySet(collections_abc.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
def __init__(self, *args):
self._storage = set([self._wrap_key(obj) for obj in list(*args)])
@staticmethod
def _from_storage(storage):
result = ObjectIdentitySet()
result._storage = storage # pylint: disable=protected-access
return result
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def update(self, items):
self._storage.update([self._wrap_key(item) for item in items])
def intersection(self, items):
return self._storage.intersection([self._wrap_key(item) for item in items])
def difference(self, items):
return ObjectIdentitySet._from_storage(
self._storage.difference([self._wrap_key(item) for item in items]))
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
class ObjectIdentityWeakSet(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
|
{
"content_hash": "c3a95850882201161c9849de5d9f573e",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 85,
"avg_line_length": 27.186147186147185,
"alnum_prop": 0.6643312101910828,
"repo_name": "arborh/tensorflow",
"id": "a5ad1e772450f00c1a1eea372caa64008cfe1bf5",
"size": "6280",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/util/object_identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76730781"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299305"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38757009"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.files import SparkFiles
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, BatchedSerializer
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two function together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def read_single_udf(pickleSer, infile):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# the last returnType will be the return type of UDF
return arg_offsets, wrap_udf(row_func, return_type)
def read_udfs(pickleSer, infile):
num_udfs = read_int(infile)
if num_udfs == 1:
# fast path for single UDF
_, udf = read_single_udf(pickleSer, infile)
mapper = lambda a: udf(*a)
else:
udfs = {}
call_udf = []
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
# Create function like this:
# lambda a: (f0(a0), f1(a1, a2), f2(a3))
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
is_sql_udf = read_int(infile)
if is_sql_udf:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
java_port = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
main(sock_file, sock_file)
|
{
"content_hash": "513ef0dfbcca76a2533f4551c8765b13",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 95,
"avg_line_length": 34.855670103092784,
"alnum_prop": 0.6223010943507838,
"repo_name": "DataReplyUK/datareplyuk",
"id": "cf47ab8f96c6d1c70c8a4990138e926394fb6c2c",
"size": "7547",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "GenesAssociation/spark-2.0.0-bin-hadoop2.7/python/pyspark/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "28006"
},
{
"name": "CSS",
"bytes": "1179"
},
{
"name": "HTML",
"bytes": "2046818"
},
{
"name": "Java",
"bytes": "372355"
},
{
"name": "Makefile",
"bytes": "16997"
},
{
"name": "Python",
"bytes": "2254516"
},
{
"name": "R",
"bytes": "221857"
},
{
"name": "Scala",
"bytes": "553833"
},
{
"name": "Shell",
"bytes": "57950"
}
],
"symlink_target": ""
}
|
"""Amazon boto3 interface."""
from __future__ import absolute_import, unicode_literals
try:
import boto3
from botocore import exceptions
from botocore.awsrequest import AWSRequest
from botocore.response import get_response
except ImportError:
boto3 = None
class _void(object):
pass
class BotoCoreError(Exception):
pass
exceptions = _void()
exceptions.BotoCoreError = BotoCoreError
AWSRequest = _void()
get_response = _void()
__all__ = (
'exceptions', 'AWSRequest', 'get_response'
)
|
{
"content_hash": "7a01c2b601a437e29c43277eeb87b650",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 22.04,
"alnum_prop": 0.6733212341197822,
"repo_name": "cloudera/hue",
"id": "8c962df91087de4c48dc09c9881f8a11e4ddae0e",
"size": "575",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/kombu-4.3.0/kombu/asynchronous/aws/ext.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from zipline.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.sid, int)
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_protocol(event):
"""Assert that an event is valid input to zp.FEED_FRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_unframe_protocol(event):
"""Same as above."""
assert event.type in DATASOURCE_TYPE
|
{
"content_hash": "86a3fb826f981c90c3e3668c60f261a4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 29.68421052631579,
"alnum_prop": 0.693853427895981,
"repo_name": "lsbardel/zipline",
"id": "9bd8f2c583b432be3705381f546c900334db42d8",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/gens/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # ({{ cookiecutter.project_slug }}/config/settings/common.py - 3 = {{ cookiecutter.project_slug }}/)
APPS_DIR = ROOT_DIR.path('{{ cookiecutter.project_slug }}')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'{{ cookiecutter.project_slug }}.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': '{{ cookiecutter.project_slug }}.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres://{% if cookiecutter.windows == 'y' %}localhost{% endif %}/{{cookiecutter.project_slug}}'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = '{{ cookiecutter.timezone }}'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
INSTALLED_APPS += ('{{cookiecutter.project_slug}}.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
########## END CELERY
{% endif %}
# Location of root django.contrib.admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
{
"content_hash": "87854bec1073204ab8ad0765dd1ae2db",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 147,
"avg_line_length": 38.09243697478992,
"alnum_prop": 0.6169203617913082,
"repo_name": "ingenioustechie/cookiecutter-django-openshift",
"id": "3e11ee4333ab92cbf813a7b2c8f0a266d5a437c1",
"size": "9090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/config/settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5140"
},
{
"name": "CSS",
"bytes": "1768"
},
{
"name": "HTML",
"bytes": "20949"
},
{
"name": "JavaScript",
"bytes": "3670"
},
{
"name": "Makefile",
"bytes": "5652"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "66898"
},
{
"name": "Shell",
"bytes": "4622"
}
],
"symlink_target": ""
}
|
import json,numpy,sys,os,shutil
import theano
import theano.tensor as T
from pythonDnn.utils.utils import dimshuffle
from pythonDnn.io_modules.file_writer import write_dataset
from pythonDnn.io_modules.file_reader import read_dataset
from pythonDnn.io_modules import create_folder_structure_if_not_exists
import logging
logger = logging.getLogger(__name__)
def export_data(data_spec,export_path,out_fn,out_featdim):
logger.info("%s data exporter will be initialized to export to %s",
data_spec['reader_type'],export_path);
exporter = DataExporter.get_instance(data_spec,export_path);
exporter.dump_data(out_fn,out_featdim);
##########################################BASE CLASS##############################################################
'''
DataExporter class focuses on exporting the features which is derived from neural network model
'''
class DataExporter(object):
'''Exports the data just like the input available'''
data_spec = None
export_path = None
@staticmethod
def get_instance(data_spec,export_path):
if(data_spec['reader_type']=='T1'):
return T1DataExporter(data_spec,export_path);
elif(data_spec['reader_type']=='T2'):
return T2DataExporter(data_spec,export_path);
elif(data_spec['reader_type']=='NP'):
return NPDataExporter(data_spec,export_path);
else:
logger.critical(" '%s\' reader_type is not defined...",data_spec['reader_type'])
def dump_data(self,out_fn):
pass
##########################################T1 DataExporter##############################################################
'''
T1DataExporter focuses on exporting the features which is derived from neural network model where datset is \
represented as T1 Dataset
'''
class T1DataExporter(DataExporter):
def __init__(self,data_spec,export_path):
self.data_spec = data_spec;
self.export_path = export_path;
def dump_data(self,out_fn,out_featdim):
filepath = self.data_spec['base_path'] + os.sep + self.data_spec['filename']
copy_path = create_folder_structure_if_not_exists(self.export_path + os.sep + self.data_spec['filename'])
shutil.copy(filepath,copy_path); #copies the file directly
self.filehandle = open(filepath,'rb')
line = self.filehandle.readline(); # reading file header
header = line.split();
num_classes = int(header[1]);
for idx in xrange(num_classes):
in_child_options = self.data_spec.copy();
in_child_options['filename'] = self.filehandle.readline().strip() #filename of individual classes
in_child_options['reader_type'] = "TD"
in_child_options['label'] = idx;
file_reader = read_dataset(in_child_options,pad_zeros=True) #taking only one reader
out_child_options = in_child_options.copy();
out_child_options['base_path'] = self.export_path;
out_child_options['featdim'] = out_featdim;
out_child_options['writer_type'] = "TD"
file_writer = write_dataset(out_child_options);
batch_size=file_reader.batch_size
while (not file_reader.is_finish()):
for batch_index in xrange(file_reader.nBatches):
s_idx = batch_index*batch_size;
e_idx = s_idx + batch_size
data = out_fn(file_reader.feat[s_idx:e_idx])
label = file_reader.label[s_idx:e_idx];
if ((batch_index == file_reader.nBatches-1) and (not file_reader.num_pad_frames == 0)) :
data=data[:-file_reader.num_pad_frames]
label = label[:-file_reader.num_pad_frames]
file_writer.write_data(data,label);
file_reader.read_next_partition_data(pad_zeros=True);
logger.debug('T1 Dataexporter : data is exported to %s' % self.export_path);
##########################################T2 DataExporter##############################################################
'''
T2DataExporter focuses on exporting the features which is derived from neural network model where datset is \
represented as T2 Dataset
'''
class T2DataExporter(DataExporter):
def __init__(self,data_spec,export_path):
self.data_spec = data_spec;
self.export_path = export_path;
def dump_data(self,out_fn,out_featdim):
filepath = self.data_spec['base_path'] + os.sep + self.data_spec['filename']
copy_path = create_folder_structure_if_not_exists(self.export_path + os.sep + self.data_spec['filename'])
shutil.copy(filepath,copy_path); #copies the file directly
self.filehandle = open(filepath,'rb')
line = self.filehandle.readline(); # reading file header
header = line.split();
num_classes = int(header[1]);
for idx in xrange(num_classes):
level1_filename = self.filehandle.readline().strip();
level1_filepath = self.data_spec['base_path'] + os.sep + level1_filename #filename of individual classes
copy_path = create_folder_structure_if_not_exists(self.export_path + os.sep + level1_filename)
shutil.copy(level1_filepath,copy_path); #copies the index file directly from the source directly
self.level1FileHandle = open(level1_filepath,'rb');
level2_filepath = self.level1FileHandle.readline().strip();
while level2_filepath.__len__()!= 0:
in_child_options = self.data_spec.copy();
in_child_options['filename'] = level2_filepath #filename of individual classes
in_child_options['reader_type'] = "TD"
in_child_options['label'] = idx;
file_reader = read_dataset(in_child_options,pad_zeros=True) #taking only one reader
out_child_options = in_child_options.copy();
out_child_options['base_path'] = self.export_path; #updating the base_path
out_child_options['featdim'] = out_featdim;
out_child_options['writer_type'] = "TD"
file_writer = write_dataset(out_child_options);
batch_size=file_reader.batch_size
while not file_reader.is_finish():
for batch_index in xrange(file_reader.nBatches):
s_idx = batch_index * batch_size; e_idx = s_idx + batch_size
data = out_fn(file_reader.feat[s_idx:e_idx])
label = file_reader.label[s_idx:e_idx];
if ((batch_index == file_reader.nBatches-1) and (not file_reader.num_pad_frames == 0)) :
data=data[:-file_reader.num_pad_frames]
label = label[:-file_reader.num_pad_frames]
file_writer.write_data(data,label);
file_reader.read_next_partition_data(pad_zeros=True);
level2_filepath = self.level1FileHandle.readline().strip();
logger.debug('T2 Dataexporter : data is exported to %s' % self.export_path);
##########################################NP DataExporter##############################################################
'''
NPDataExporter focuses on exporting the features which is derived from neural network model where datset is \
represented as NP Dataset
'''
class NPDataExporter(DataExporter):
def __init__(self,data_spec,export_path):
self.data_spec = data_spec;
self.export_path = export_path;
def dump_data(self,out_fn,out_featdim):
file_reader = read_dataset(self.data_spec,pad_zeros=True) #taking only one reader
out_options = self.data_spec.copy();
out_options['base_path'] = self.export_path; #updating the base_path
out_options['featdim'] = out_featdim;
out_options['writer_type'] = "NP"
file_writer = write_dataset(out_options);
batch_size=file_reader.batch_size
while not file_reader.is_finish():
for batch_index in xrange(file_reader.nBatches):
s_idx = batch_index * batch_size; e_idx = s_idx + batch_size
data = out_fn(file_reader.feat[s_idx:e_idx])
label = file_reader.label[s_idx:e_idx];
if ((batch_index == file_reader.nBatches-1) and (not file_reader.num_pad_frames == 0)) :
data=data[:-file_reader.num_pad_frames]
label = label[:-file_reader.num_pad_frames]
file_writer.write_data(data,label);
file_reader.read_next_partition_data(pad_zeros=True);
logger.debug('NP Dataexporter : data is exported to %s' % self.export_path);
|
{
"content_hash": "a21dda05b1ee61006d0b1861ebd29395",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 119,
"avg_line_length": 40.805263157894736,
"alnum_prop": 0.6640010318586353,
"repo_name": "IITM-DONLAB/python-dnn",
"id": "517b449917b08ff9dba6c54efd4fce1dc459b00c",
"size": "7753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pythonDnn/io_modules/data_exporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "205626"
},
{
"name": "Shell",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from functools import partial
from django.contrib.contenttypes.models import ContentType
from django.db import models, migrations
def remove_old_contentype(content_type, apps, schema_editor):
"""If we change model name, we need to remove its ContentType entry."""
ContentType.objects.filter(app_label='workshops', model=content_type) \
.delete()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('workshops', '0010_merge'),
]
operations = [
migrations.RenameModel(
old_name='Skill',
new_name='Lesson',
),
migrations.RenameField(
model_name='qualification',
old_name='skill',
new_name='lesson',
),
migrations.RunPython(partial(remove_old_contentype, 'skill')),
]
|
{
"content_hash": "2f1f2e173bb399b7f1d2f0187dc63608",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 27.8125,
"alnum_prop": 0.6235955056179775,
"repo_name": "swcarpentry/amy",
"id": "0bbaa837c5bd0624a804b704e45d3192bd61019d",
"size": "914",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "amy/workshops/migrations/0011_auto_20150612_0803.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "252906"
},
{
"name": "JavaScript",
"bytes": "22092"
},
{
"name": "Makefile",
"bytes": "2915"
},
{
"name": "Python",
"bytes": "1431147"
}
],
"symlink_target": ""
}
|
from bpy.types import Panel
class MMDMaterialPanel(Panel):
bl_idname = 'MATERIAL_PT_mmd_tools_material'
bl_label = 'MMD Material'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
@classmethod
def poll(cls, context):
material = context.active_object.active_material
return material and material.mmd_material
def draw(self, context):
material = context.active_object.active_material
mmd_material = material.mmd_material
layout = self.layout
col = layout.column(align=True)
col.label('Information:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'name_j')
r = c.row()
r.prop(mmd_material, 'name_e')
r = c.row()
r.prop(mmd_material, 'comment')
col = layout.column(align=True)
col.label('Color:')
c = col.column()
r = c.row()
r.prop(material, 'diffuse_color')
r = c.row()
r.label('Diffuse Alpha:')
r.prop(material, 'alpha')
r = c.row()
r.prop(mmd_material, 'ambient_color')
r = c.row()
r.prop(material, 'specular_color')
r = c.row()
r.label('Specular Alpha:')
r.prop(material, 'specular_alpha')
col = layout.column(align=True)
col.label('Shadow:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'is_double_sided')
r.prop(mmd_material, 'enabled_drop_shadow')
r = c.row()
r.prop(mmd_material, 'enabled_self_shadow_map')
r.prop(mmd_material, 'enabled_self_shadow')
col = layout.column(align=True)
col.label('Edge:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'enabled_toon_edge')
r.prop(mmd_material, 'edge_weight')
r = c.row()
r.prop(mmd_material, 'edge_color')
class MMDTexturePanel(Panel):
bl_idname = 'MATERIAL_PT_mmd_tools_texture'
bl_label = 'MMD Texture'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
@classmethod
def poll(cls, context):
material = context.active_object.active_material
return material and material.mmd_material
def draw(self, context):
material = context.active_object.active_material
mmd_material = material.mmd_material
layout = self.layout
tex_slots = material.texture_slots.values()
col = layout.column(align=True)
row = col.row(align=True)
row.label('Texture:')
r = row.column(align=True)
if tex_slots[0]:
tex = tex_slots[0].texture
if tex.type == 'IMAGE' and tex.image:
r2 = r.row(align=True)
r2.prop(tex.image, 'filepath', text='')
r2.operator('mmd_tools.material_remove_texture', text='', icon='PANEL_CLOSE')
else:
r.operator('mmd_tools.material_remove_texture', text='Remove', icon='PANEL_CLOSE')
col.label('Texture is invalid.', icon='ERROR')
else:
r.operator('mmd_tools.material_open_texture', text='Add', icon='FILESEL')
row = col.row(align=True)
row.label('Sphere Texture:')
r = row.column(align=True)
if tex_slots[1]:
tex = tex_slots[1].texture
if tex.type == 'IMAGE' and tex.image:
r2 = r.row(align=True)
r2.prop(tex.image, 'filepath', text='')
else:
r.operator('mmd_tools.material_remove_sphere_texture', text='Remove', icon='PANEL_CLOSE')
col.label('Sphere Texture is invalid.', icon='ERROR')
else:
r.operator('mmd_tools.material_open_texture', text='Add', icon='FILESEL')
col = layout.column(align=True)
c = col.column()
r = c.row()
r.prop(mmd_material, 'is_shared_toon_texture')
if mmd_material.is_shared_toon_texture:
r.prop(mmd_material, 'shared_toon_texture')
r = c.row()
r.prop(mmd_material, 'toon_texture')
r = c.row()
r.prop(mmd_material, 'sphere_texture_type')
|
{
"content_hash": "91e7813b00f6a5d1ef76b19d46525d78",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 105,
"avg_line_length": 33.25396825396825,
"alnum_prop": 0.5613365155131265,
"repo_name": "sugiany/blender_mmd_tools",
"id": "64712e746acaff8bc2abcf579a83d7c95ec38f93",
"size": "4215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmd_tools/panels/prop_material.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335110"
}
],
"symlink_target": ""
}
|
import xml.etree.ElementTree as ET
import pprint
import re
import codecs
import json
"""
Clean, format the osm data into a JSON format for import into mongodb
"""
# REGEX to check for all lower case characters in a string
lower = re.compile(r'^([a-z]|_)*$')
# REGEX to check for colon values
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
# REGEX to check for mongodb specific characters
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
'''
street_type_re
Regex which scans for the following items listed below to determine
if we have a match for a street name which is shortened, abbriviated and
is only comparing the last word at the end of the string.
https://docs.python.org/2/library/re.html
\b assert position at a word boundary (^\w|\w$|\W\w|\w\W)
\S+ match any non-white space character [^\r\n\t\f ]
Quantifier: + Between one and unlimited times, as many times as possible, giving back as needed [greedy]
\.? matches the character . literally
Quantifier: ? Between zero and one time, as many times as possible, giving back as needed [greedy]
$ assert position at end of the string
https://www.regex101.com/#python
'''
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# The expected street names
expected_street_names = ["Avenue", "Boulevard", "Circle", "Commons", "Court", "Drive", "Highway", "Lane", "Loop", "Parkway", "Place", "Road",
"Sqaure", "Street", "Trail"]
'''
map_old_to_new
Create Dictionary to map abreiviations to full street's suffix
'''
map_old_to_new = {
"Ave" : "Avenue",
"Ave." : "Avenue",
"Blvd" : "Boulevard",
"Blvd." : "Boulevard",
"Cir" : "Circle",
"Cmn" : "Commons",
"Crt" : "Court",
"Crt." : "Court",
"Dr" : "Drive",
"Dr." : "Drive",
"Hwy" : "Highway",
"Ln" : "Lane",
"Ln." : "Lane",
"LN" : "Lane",
"Lp" : "Loop",
"PARK":"Park",
"Pk" : "Parkway",
"Pk." : "Parkway",
"Pl" : "Place",
"Pl." : "Place",
"Rd.": "Road",
"Rd" : "Road",
"Sq": "Sqaure",
"Sq.": "Sqaure",
"St": "Street",
"St.": "Street",
"Tr": "Trail",
"Tr.": "Trail",
"Ashwood": "Ashwood Street"
}
'''
Create the CREATED dictionary to store the a node's meta data
'''
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
'''
Create the POSITION Dictionary, which contains the latititude and
the longititued. Lat is in the 0 position Lon is in the 1 position,
This will be used as a lookup dictionary to determine if a key
exits in an element
'''
POSITION = ["lat","lon"]
def shape_element(element):
'''
shape_element will peform the following tasks:
- if second level tag "k" value contains problematic characters, it should be ignored
- if second level tag "k" value starts with "addr:", it should be added to a dictionary "address"
- if second level tag "k" value does not start with "addr:", but contains ":", you can process it
same as any other tag.
- if there is a second ":" that separates the type/direction of a street,
the tag should be ignored, for example:
'''
# Create the node dictionary
node = {}
# Add the created object to the node dictionary
node['created'] = {}
# For Lat and Lon we will store these in a 'pos' (position)
# we need lat, lon and in specific order (LAT, LON)
node['pos'] =[0 for i in range(2)]
# Search only through the node and way types
if element.tag == "node" or element.tag == "way" :
# add the type to the node, the tag of the element
node['type'] = element.tag
# Search through the node and way types
# to build the CREATED and POSITION dictionaries
for k,v in element.attrib.iteritems():
# CREATE VALUES {"version", "changeset", "timestamp", "user", "uid"}
if k in CREATED:
node['created'][k] = v
#TODO: make sure time is formated from string to date
# Lat is in first position, Lon second position
# In JSON and mongodb we need to represent the Lat and Lon as floats
elif k in POSITION:
if k=="lat":
node['pos'][0]=(float(v))
else: # Lon
node['pos'][1]=(float(v))
# Key was not in the CREATED or POSITION dictionary
# Add a new key value pair
else:
node[k] = v
'''
Setup processing for the TAGS - Addresses and other meta data for the
node and way objects
'''
# Instantiate the address dictionary
address = {}
'''
Search all the subelements and prepare valid tags for processing
Any ignored data will be emitted to the console
'''
for tag in element.iter("tag"):
if is_valid_tag(tag) == True:
# address attributes - create the dictionary object to hold
# the attributes.
# use a slice of the item from beginning for 5 characters
if tag.attrib['k'][:5] == "addr:":
# Set the keyName to the text to the RIGHT of the colon, dropping "addr:"
newKey = tag.attrib['k'][5:]
# if there is a second ":" that separates the
# type/direction of a street ignore it - Per Assignment
if newKey.count(":")> 0:
print "found colon, and it's not address - ignoring it", newKey
else:
# Add new key to the address object, and assign the
# value to the key
address[newKey] = tag.attrib['v']
# Clean the Address
if newKey == "street":
clean_name = update_streetname(tag.attrib['v'], map_old_to_new)
address[newKey] = clean_name
# Clean the postcode
if newKey == "postcode":
clean_zip = update_zipcode(tag.attrib['v'] )
address[newKey] = clean_zip
# Clean the state, assume all states should be AK
if newKey == "state":
if tag.attrib['v'] != "AK":
address[newKey] = "AK"
# clean the city name
if newKey == "city":
clean_city = update_city(tag.attrib['v'])
address[newKey] = clean_city
# we have a generic tag item with no colon, to be added root on the node/way object
elif tag.attrib['k'].count(":") < 1:
plainKey = tag.attrib['k']
#print "Plain KEY", tag.attrib['k'], tag.attrib['v']
node[plainKey] = tag.attrib['v']
# For keys similar to the "addr:" key process these keys like the generic keys
elif tag.attrib['k'].count(":") == 1 and tag.attrib['k'][:5] != "addr:" and tag.attrib['k'][5:] != "created" :
# Get the length to the colon, and get the text from the
# right of colon to the end for the key.
# We are going to strip off the first text to the left of
# the colon, for readability and mongodb
keyIndex = tag.attrib['k'].find(":")
# increment by one so we start at the new key name
keyIndex += 1
# Get the key name and create a dictionary for this key and value
oddKey = tag.attrib['k'][keyIndex:]
node[oddKey] = tag.attrib['v']
else:
print "Ingnore tag - tag is invalid" , tag.attrib['k'], tag.attrib['v']
# Search for any node_refs in the sub arrays - just for the way tag, per instructions
node_refs = []
if element.tag =="way":
for ndref in element.iter("nd"):
node_refs.append(ndref.attrib['ref'])
# Check to see if we have any node_refs, if we do add the node_refs to the node
if len(node_refs) > 0:
node['node_refs'] = node_refs
# Check to see if we have any addresses, if we have addresses add the addresses to the node
if len(address)>0:
node['address'] = address
return node
else:
return None
def is_valid_tag(element):
'''
Check for Valid Tags and return true for valid tags false for invalid
'''
isValid = True
if problemchars.search(element.attrib['k']):
isValid = False
else: # Count all the others as valid
isValid = True
return isValid
def process_map(file_in, pretty = False):
'''
Process map reads in the OpenStreet Map file
and writes out to file the JSON data structure
file_in is the path and filename, pretty parameter formats the json
'''
# Keep the same filename and just append .json to the filename
file_out = "{0}.2.json".format(file_in)
data = []
with codecs.open(file_out, "w") as fo:
# Go element by element to read the file
for _, element in ET.iterparse(file_in):
el = shape_element(element)
# If we have an element add it to the dictionary
# and write the data to a file
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2)+"\n")
else:
fo.write(json.dumps(el) + "\n")
return data
def update_streetname(name, map_old_to_new):
'''
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
'''
for iName in map_old_to_new.keys():
#Check to see if we find a match for a bad value in our map
match = re.search(iName, name)
#if match is found then remap the old value with new value
if match:
name = re.sub(iName+'$', map_old_to_new[iName], name)
return name
def update_zipcode(zipcode):
'''
Clean the zip code
These are a few of the errors one might encounter
{ "_id" : "Homer, AK 99603", "count" : 2 }
{ "_id" : "AK", "count" : 1 }
{ "_id" : "Alaska", "count" : 1 }
{ "_id" : "AK 99501-2129", "count" : 1 }
{ "_id" : "AK 99501-2118", "count" : 1 }
'''
# use regex to remove all strings from zipcode, this
# will leave us with a numeric number which should be
# 5 or 9 characters long
zipcode_clean = re.sub(r"\D", "", zipcode)
return zipcode_clean
def update_city(cityname):
'''
TODO
Scan the dictionary of city names and
fix bad spellings, improve alogrithim over time
'''
if cityname == "Anchoage":
cityname = "Anchorage"
return cityname
def test():
# NOTE: if you are running this code on your computer, with a larger dataset,
# call the process_map procedure with pretty=False. The pretty=True option adds
# additional spaces to the output, making it significantly larger.
data = process_map('Alaska_Small.xml', False)
pprint.pprint(len(data))
print "DONE"
if __name__ == "__main__":
test()
|
{
"content_hash": "78ff0485033922a38a362d1f52a399a4",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 141,
"avg_line_length": 37.0126582278481,
"alnum_prop": 0.544203146374829,
"repo_name": "craignicholson/P2",
"id": "4112ddb3f10e10531e8623a04c0b34749c9a3128",
"size": "11742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lesson_6/PreparingForDatabaseV2/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35345"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.