prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
"""Provides Django-Admin form field."""
# coding=utf-8
from django.utils.translation import ugettext_lazy as _
from django. | forms.fields import Field, ValidationError
from tempo.django.widgets import RecurrentEventSetWidget
from tempo.recurrenteventset import RecurrentEventSet
class RecurrentEventSetField(Field):
"""Form field, for usage in admin forms.
Represents RecurrentEventSet.""" |
# pylint: disable=no-init
widget = RecurrentEventSetWidget
def clean(self, value):
"""Cleans and validates RecurrentEventSet expression."""
# pylint: disable=no-self-use
if value is None:
return None
if not RecurrentEventSet.validate_json(value):
raise ValidationError(_('Invalid input.'),
code='invalid')
return RecurrentEventSet.from_json(value)
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import pytest
import requests
import requests.exceptions
from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, MOCK, TEST_IMAGE
from tests.util import uuid_value
from osbs.utils import ImageName
from atomic_reactor.core import ContainerTasker
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from atomic_reactor.inner import DockerBuildWorkflow
from tests.constants import MOCK_SOURCE
if MOCK:
from tests.docker_mock import mock_docker
@pytest.fixture()
def temp_image_name():
return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
@pytest.fixture()
def is_registry_running():
"""
is docker registry running (at {docker0,lo}:5000)?
"""
try:
lo_response = requests.get(LOCALHOST_REGISTRY_HTTP)
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
try:
lo_response = requests.get(DOCKER0_REGISTRY_HTTP) # leap of faith
except requests.exceptions.ConnectionError:
return False
if not lo_response.ok:
return False
return True
@pytest.fixture(scope="module")
def docker_tasker():
if MOCK:
mock_docker()
ct = ContainerTasker(re | try_times=0)
ct.build_method = CONTAINER_DOCKERPY_BUILD_METHOD
return ct
@pytest.fixture(params=[True, False])
def reactor_config_map(request):
return request.param
@pytest.fixture(params=[True, False])
def inspect_only(request):
return request.param
@pytest.fixture
def user_params(monkeypatch):
"""
Setting default image_tag in the env var USER_PARAMS. Any tests requiring
to create an instance of | :class:`DockerBuildWorkflow` requires this fixture.
"""
monkeypatch.setenv('USER_PARAMS', json.dumps({'image_tag': TEST_IMAGE}))
@pytest.fixture
def workflow(user_params):
return DockerBuildWorkflow(source=MOCK_SOURCE)
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
if report.passed or report.skipped:
del cells[:]
|
[Line(Point(3, 3), Point(3, 5)), Line(Point(3, 3), Point(5, 3))]
assert Circle(Point(5, 5), 2).tangent_lines(Point(5 - 2*sqrt(2), 5)) == \
[Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 - sqrt(2))),
Line(Point(5 - 2*sqrt(2), 5), Point(5 - sqrt(2), 5 + sqrt(2))),]
# Properties
major = 3
minor = 1
e4 = Ellipse(p2, minor, major)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
# independent of orientation
e4 = Ellipse(p2, major, minor)
assert e4.focus_distance == sqrt(major**2 - minor**2)
ecc = e4.focus_distance / major
assert e4.eccentricity == ecc
assert e4.periapsis == major*(1 - ecc)
assert e4.apoapsis == major*(1 + ecc)
# Intersection
l1 = Line(Point(1, -5), Point(1, 5))
l2 = Line(Point(-5, -1), Point(5, -1))
l3 = Line(Point(-1, -1), Point(1, 1))
l4 = Line(Point(-10, 0), Point(0, 10))
pts_c1_l3 = [Point(sqrt(2)/2, sqrt(2)/2), Point(-sqrt(2)/2, -sqrt(2)/2)]
assert intersection(e2, l4) == []
assert intersection(c1, Point(1, 0)) == [Point(1, 0)]
assert intersection(c1, l1) == [Point(1, 0)]
assert intersection(c1, l2) == [Point(0, -1)]
assert intersection(c1, l3) in [pts_c1_l3, [pts_c1_l3[1], pts_c1_l3[0]]]
assert intersection(c1, c2) in [[(1,0), (0,1)],[(0,1),(1,0)]]
assert intersection(c1, c3) == [(sqrt(2)/2, sqrt(2)/2)]
# some special case intersections
csmall = Circle(p1, 3)
cbig = Circle(p1, 5)
cout = Circle(Point(5, 5), 1)
# one circle inside of another
assert csmall.intersection(cbig) == []
# separate circles
assert csmall.intersection(cout) == []
# coincident circles
assert csmall.intersection(csmall) == csmall
v = sqrt(2)
t1 = Triangle(Point(0, v), Point(0, -v), Point(v, 0))
points = intersection(t1, c1)
assert len(points) == 4
assert Point(0, 1) in points
assert Point(0, -1) in points
assert Point(v/2, v/2) in points
assert Point(v/2, -v/2) in points
circ = Circle(Point(0, 0), 5)
elip = Ellipse(Point(0, 0), 5, 20)
assert intersection(circ, elip) in \
[[Point(5, 0), Point(-5, 0)], [Point(-5, 0), Point(5, 0)]]
assert elip.tangent_lines(Point(0, 0)) == []
elip = Ellipse(Point(0, 0), 3, 2)
assert elip.tangent_lines(Point(3, 0)) == [Line(Point(3, 0), Point(3, -12))]
e1 = Ellipse(Point(0, 0), 5, 10)
e2 = Ellipse(Point(2, 1), 4, 8)
a = S(53)/17
c = 2*sqrt(3991)/17
assert e1.intersection(e2) == [Point(a - c/8, a/2 + c), Point(a + c/8, a/2 - c)]
# Combinations of above
assert e3.is_tangent(e3.tangent_lines(p1 + Point(y1, 0))[0])
e = Ellipse((1, 2), 3, 2)
assert e.tangent_lines(Point(10, 0)) == \
[Line(Point(10, 0), Point(1, 0)),
Line(Point(10, 0), Point(S(14)/5, S(18)/5))]
# encloses_point
e = Ellipse((0, 0), 1, 2)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(e.center + Point(e.hradius + Rational(1, 10), 0)) is False
e = Ellipse((0, 0), 2, 1)
assert e.encloses_point(e.center)
assert e.encloses_point(e.center + Point(0, e.vradius - Rational(1, 10)))
assert e.encloses_point(e.center + Point(e.hradius - Rational(1, 10), 0))
assert e.encloses_point(e.center + Point(e.hradius, 0)) is False
assert e.encloses_point(e.center + Point(e.hradius + Rational(1, 10), 0)) is False
def test_ellipse_random_point():
e3 = Ellipse(Point(0, 0), y1, y1)
rx, ry = Symbol('rx'), Symbol('ry')
for ind in xrange(0, 5):
r = e3.random_point()
# substitution should give zero*y1**2
assert e3.equation(rx, ry).subs(zip((rx, ry), r.args)
).n(3).as_coeff_Mul()[0] < 1e-10
def test_polygon():
t = Triangle(Point(0, 0), Point(2, 0), Point(3, 3))
assert Polygon(Point(0, 0), Point(1, 0), Point(2, 0), Point(3, 3)) == t
assert Polygon(Point(1, 0), Point(2, 0), Point(3, 3), Point(0, 0)) == t
assert Polygon(Point(2, 0), Point(3, 3), Point(0, 0), Point(1, 0)) == t
p1 = Polygon(
Point(0, 0), Point(3,-1),
Point(6, 0), Point(4, 5),
Point(2, 3), Point(0, 3))
p2 = Polygon(
Point(6, 0), Point(3,-1),
Point(0, 0), Point(0, 3),
Point(2, 3), Point(4, 5))
p3 = Polygon(
Point(0, 0), Point(3, 0),
Point(5, 2), Point(4, 4))
p4 = Polygon(
Point(0, 0), Point(4, 4),
Point(5, 2), Point(3, 0))
#
# General polygon
#
assert p1 == p2
assert len(p1) == 6
assert len(p1.sides) == 6
assert p1.perimeter == 5+2*sqrt(10)+sqrt(29)+sqrt(8)
assert p1.area == 22
assert not p1.is_convex()
assert p3.is_convex()
assert p4.is_convex() # ensure convex for both CW and CCW point specification
#
# Regular polygon
#
p1 = RegularPolygon(Point(0, 0), 10, 5)
p2 = RegularPolygon(Point(0, 0), 5, 5)
assert p1 != p2
assert p1.interior_angle == 3*pi/5
assert p1.exterior_angle == 2*pi/5
assert p2.apothem == 5*cos(pi/5)
assert p2.circumcircle == Circle(Point(0, 0), 5)
assert p2.incircle == Circle(Point(0, 0), p2.apothem)
assert p1.is_convex()
assert p1.rotation == 0
p1.spin(pi/3)
assert p1.rotation == pi/3
assert p1[0] == Point(5, 5*sqrt(3))
# while spin works in place (notice that rotation is 2pi/3 below)
# rotate returns a new object
p1_old = p1
assert p1.rotate(pi/3) == RegularPolygon(Point(0, 0), 10, 5, 2*pi/3)
assert p1 == p1_old
#
# Angles
#
angles = p4.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
angles = p3.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
#
# Triangle
#
p1 = Point(0, 0)
p2 = Point(5, 0)
p3 = Point(0, 5)
t1 = Triangle(p1, p2, p3)
t2 = Triangle(p1, p2, Point(Rational(5,2), sqrt(Rational(75,4))))
t3 = Triangle(p1, Point(x1, 0), Point(0, x1))
s1 = t1.sides
s2 = t2.sides
s3 = t3.sides
# Basic stuff
assert Triangle(p1, p1, p1) == p1
assert Triangle(p2, p2*2, p2*3) == Segment(p2, p2*3)
assert t1.area == R | ational(25,2)
assert t1.is_right()
assert t2.is_right() == False
assert t3.is_right()
assert p1 in t1
assert t1.sides[0] in t1
assert Segment((0, 0), (1, 0)) in t1
assert Point(5, 5) not in t2
assert t1.is_convex()
assert feq(t1.angles[p1].evalf(), pi.evalf()/2)
assert t1.is_equilateral() == False
assert t2.is_equilateral()
assert t3.is_equilateral() == False
assert are_similar(t1, t2) == False
| assert are_similar(t1, t3)
assert are_similar(t2, t3) == False
# Bisectors
bisectors = t1.bisectors()
assert bisectors[p1] == Segment(p1, Point(Rational(5,2), Rational(5,2)))
ic = (250 - 125*sqrt(2)) / 50
assert t1.incenter == Point(ic, ic)
# Inradius
assert t1.inradius == 5 - 5*sqrt(2)/2
assert t2.inradius == 5*sqrt(3)/6
assert t3.inradius == x1**2/((2 + sqrt(2))*Abs(x1))
# Medians + Centroid
m = t1.medians
assert t1.centroid == Point(Rational(5,3), Rational(5,3))
assert m[p1] == Segment(p1, Point(Rational(5,2), Rational(5,2)))
assert t3.medians[p1] == Segment(p1, Point(x1/2, x1/2))
assert intersection(m[p1], m[p2], m[p3]) == [t1.centroid]
# Perpendicular
altitudes = t1.altitud |
cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)
def create_symmetric_decryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES)
and not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5
# extended life ends 2020).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
def pbkdf2_hmac_supported(self, algorithm):
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
return self.hmac_supported(algorithm)
else:
# OpenSSL < 1.0.0 has an explicit PBKDF2-HMAC-SHA1 function,
# so if the PBKDF2_HMAC function is missing we only support
# SHA1 via PBKDF2_HMAC_SHA1.
return isinstance(algorithm, hashes.SHA1)
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
buf = self._ffi.new("char[]", length)
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode("ascii"))
assert evp_md != self._ffi.NULL
res = self._lib.PKCS5_PBKDF2_HMAC(
key_material,
len(key_material),
salt,
len(salt),
iterations,
evp_md,
length,
buf
)
assert res == 1
else:
if not isinstance(algorithm, hashes.SHA1):
raise UnsupportedAlgorithm(
"This version of OpenSSL only supports PBKDF2HMAC with "
"SHA1.",
_Reasons.UNSUPPORTED_HASH
)
res = self._lib.PKCS5_PBKDF2_HMAC_SHA1(
key_material,
len(key_material),
salt,
len(salt),
iterations,
length,
buf
)
assert res == 1
return self._ffi.buffer(buf)[:]
def _err_string(self, code):
err_buf = self._ffi.new("char[]", 256)
self._lib.ERR_error_string_n(code, err_buf, 256)
return self._ffi.string(err_buf, 256)[:]
def _consume_errors(self):
errors = []
while True:
code = self._lib.ERR_get_error()
if code == 0:
break
lib = self._lib.ERR_GET_LIB(code)
func = self._lib.ERR_GET_FUNC(code)
reason = self._lib.ERR_GET_REASON(code)
errors.append(_OpenSSLError(code, lib, func, reason))
return errors
def _unknown_error(self, error):
return InternalError(
"Unknown error code {0} from OpenSSL, "
"you should probably file a bug. {1}.".format(
error.code, self._err_string(error.code)
)
)
def _bn_to_int(self, bn):
if six.PY3:
# Python 3 has constant time from_bytes, so use that.
bn_num_bytes = (self._lib.BN_num_bits(bn) + 7) // 8
bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes)
bin_len = self._lib.BN_bn2bin(bn, bin_ptr)
assert bin_len > 0
assert bin_ptr != self._ffi.NULL
return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big")
else:
# Under Python 2 the best we can do is hex()
hex_cdata = self._lib.BN_bn2hex(bn)
assert hex_cdata != self._ffi.NULL
hex_str = self._ffi.string(hex_cdata)
self._lib.OPENSSL_free(hex_cdata)
return int(hex_str, 16)
def _int_to_bn(self, num, bn=None):
"""
Converts a python integer to a BIGNUM. The returned BIGNUM will not
be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will
be discarded after use.
"""
if bn is None:
bn = self._ffi.NULL
if six.PY3:
# Python 3 has constant time to_bytes, so use that.
binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big")
bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)
assert bn_ptr != self._ffi.NULL
return bn_ptr
else:
# Under Python 2 the best we can do is hex()
hex_num = hex(num).rstrip("L").lstrip("0x").encode("ascii") or b"0"
bn_ptr = self._ffi.new("BIGNUM **")
bn_ptr[0] = bn
res = self._lib.BN_hex2bn(bn_ptr, hex_num)
assert res != 0
assert bn_ptr[0] != self._ffi.NULL
return bn_ptr[0]
def generate_rsa_private_key(self, public_exponent, key_size):
rsa._verify_rsa_parameters(public_exponent, key_size)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
bn = self._int_to_bn(public_exponent)
bn = self._ffi.gc(bn, self._lib.BN_free)
res = self._lib.RSA_generate_key_ex(
rsa_cdata, | key_size, bn, self._ffi.NULL
)
assert res == 1
return _RSAPrivateKey(self, rsa_cdata)
def generate_rsa_parameters_supported(self, public_exponent, key_size):
return (public_exponent >= 3 and public_exponent & 1 != 0 and
key_size >= 512)
def load_rsa_private_numbers(self, numbers):
rsa._check_private_key_components(
numbers.p,
numbers.q,
numbers.d,
| numbers.dmp1,
numbers.dmq1,
numbers.iqmp,
numbers.public_numbers.e,
numbers.public_numbers.n
)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.p = self._int_to_bn(numbers.p)
rsa_cdata.q = self._int_to_bn(numbers.q)
rsa_cdata.d = self._int_to_bn(numbers.d)
rsa_cdata.dmp1 = self._int_to_bn(numbers.dmp1)
rsa_cdata.dmq1 = self._int_to_bn(numbers.dmq1)
rsa_cdata.iqmp = self._int_to_bn(numbers.iqmp)
rsa_cdata.e = self._int_to_bn(numbers.public_numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.public_numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
return _RSAPrivateKey(self, rsa_cdata)
def load_rsa_public_numbers(self, numbers):
rsa._check_public_key_components(numbers.e, numbers.n)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.e = self._int_to_bn(numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
return _RSAPublicKey(self, rsa_cdata)
def _bytes_to_bio(self, data):
"""
Return a _MemoryBIO namedtuple of (BIO, char*).
The char* is the storage for the BIO and it must stay alive until the
BIO is finished with.
"""
data_char_p = self._ffi.new("char[]", data)
bio = self._lib.BIO_new_mem_buf(
data_char_p, len(data)
)
assert bio != self._ffi.NULL
return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)
def _create_mem_bio(self):
"""
Creates an empty memory BIO.
"""
bio_method = self._lib.BIO_s_mem()
assert bio_method != self._ffi.NULL
bio = self._lib.BIO_new(bio_method)
assert bio != self._ffi.NULL
bio = self._ffi.gc(bio, self._lib.BIO_free)
return bio
def _read_mem_bio(self, bio):
"""
Reads a memory BIO. This only works on memory BIOs.
" |
person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# KLUDGE to allow tests to work.
import sys
sys.path.insert(0, '../../build/swig/python')
import cueify
import struct
import unittest
# Create a binary track descriptor from a full TOC.
def TRACK_DESCRIPTOR(session, adr, ctrl, track,
abs_min, abs_sec, abs_frm, min, sec, frm):
return [session, (((adr & 0xF) << 4) | (ctrl & 0xF)), 0, track,
abs_min, abs_sec, abs_ | frm, 0, min, sec, frm]
serialized_mock_full_toc = [(((13 + 2 * 3) * 11 + 2) >> 8),
(((13 + 2 * 3) * 11 + 2) & 0xFF), 1, 2]
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA0, 0, 0, 0, 1, cueify.SESSION_MODE_1, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA1, 0, 0, 0, 12, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 0xA2, 0, 0, 0, 51, 44, 26))
serialized_mock_full_toc.extend(
T | RACK_DESCRIPTOR(1, 1, 4, 1, 0, 0, 0, 0, 2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 2, 0, 0, 0, 4, 47, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 3, 0, 0, 0, 7, 42, 57))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 4, 0, 0, 0, 13, 47, 28))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 5, 0, 0, 0, 18, 28, 50))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 6, 0, 0, 0, 21, 56, 70))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 7, 0, 0, 0, 24, 56, 74))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 8, 0, 0, 0, 30, 10, 55))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 9, 0, 0, 0, 34, 17, 20))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 10, 0, 0, 0, 39, 18, 66))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 11, 0, 0, 0, 43, 16, 40))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(1, 1, 4, 12, 0, 0, 0, 47, 27, 61))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA0, 0, 0, 0, 13, cueify.SESSION_MODE_2, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA1, 0, 0, 0, 13, 0, 0))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 0xA2, 0, 0, 0, 57, 35, 13))
serialized_mock_full_toc.extend(
TRACK_DESCRIPTOR(2, 1, 6, 13, 1, 2, 3, 54, 16, 26))
class TestFullTOCFunctions(unittest.TestCase):
def test_serialization(self):
# Test both deserialization and serialization (since, unlike
# in the C code, the Python library does not support directly
# specifying the mock TOC.
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
s = full_toc.serialize()
self.assertEqual(full_toc.errorCode, cueify.OK)
self.assertEqual(len(s), len(serialized_mock_full_toc))
self.assertEqual(
s,
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc))
def test_getters(self):
full_toc = cueify.FullTOC()
self.assertTrue(
full_toc.deserialize(
struct.pack(
"B" * len(serialized_mock_full_toc),
*serialized_mock_full_toc)))
self.assertEqual(full_toc.firstSession, 1)
self.assertEqual(full_toc.lastSession, 2)
self.assertEqual(len(full_toc.tracks), 13)
self.assertEqual(full_toc.tracks[0].session, 1)
self.assertEqual(full_toc.tracks[12].session, 2)
self.assertEqual(full_toc.tracks[0].controlFlags, 4)
self.assertEqual(full_toc.tracks[12].controlFlags, 6)
self.assertEqual(full_toc.tracks[0].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[12].subQChannelFormat, 1)
self.assertEqual(len(full_toc.sessions), 2)
self.assertEqual(len(full_toc.sessions[0].pseudotracks), 3)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 4)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].controlFlags, 6)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_FIRST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LAST_TRACK_PSEUDOTRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[0].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].subQChannelFormat, 1)
self.assertEqual(full_toc.tracks[0].pointAddress.min, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.sec, 0)
self.assertEqual(full_toc.tracks[0].pointAddress.frm, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.min, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.sec, 0)
self.assertEqual(full_toc.sessions[1].pseudotracks[cueify.FULL_TOC_LEAD_OUT_TRACK].pointAddress.frm, 0)
self.assertEqual(full_toc.tracks[12].pointAddress.min, 1)
self.assertEqual(full_toc.tracks[12].pointAddress.sec, 2)
self.assertEqual(full_toc.tracks[12].pointAddress.frm, 3)
self.assertEqual(full_toc.tracks[0].address.min, 0)
self.assertEqual(full_toc.tracks[0].address.sec, 2)
self.assertEqual(full_toc.tracks[0].address.frm, 0)
self.assertEqual(full_toc.tracks[12].address.min, 54)
self.assertEqual(full_toc.tracks[12].address.sec, 16)
self.assertEqual(full_toc.tracks[12].address.frm, 26)
self.assertEqual(full_toc.sessions[0].firstTrack, 1)
self.assertEqual(full_toc.sessions[1].firstTrack, 13)
self.assertEqual(full_toc.sessions[0].lastTrack, 12)
self.assertEqual(full_toc.sessions[1].lastTrack, 13)
self.assertEqual(full_toc.firstTrack, 1)
self.assertEqual(full_toc.lastTrack, 13)
self.assertEqual(full_toc.sessions[0].type, cueify.SESSION_MODE_1)
self.assertEqual(full_toc.sessions[1].type, cueify.SESSION_MODE_2)
self.assertEqual(full_toc.sessions[1].leadoutAddress.min, 57)
self.assertEqual(full_toc.sessions[1].leadoutAddress.sec, 35)
self.assertEqual(full_toc.sessions[1].leadoutAddress.frm, 13)
self.assertEqual(full_toc.discLength.min, 57)
self.assertEqual(full_toc.discLength.sec, 35)
self.assertEqual(full_toc.discLength.frm, 13)
self.assertEqual(full_toc.tracks[11].length.min, 4)
self.assertEqual(full_toc.tracks[11].length.sec, 16)
self.assertEqual(full_toc.tracks[11].length.frm, 40)
self.assertEqual(full_toc.sessions[1].length.min, 3)
self.assertEqual(full_toc.sessions[1].length.sec, 18)
sel |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0016_change_page_url_path_to_text_field"),
]
operations = [
migrations.AlterField(
model_name="grouppagepermission",
name="permission_type",
field=models.CharField(
choices=[
("add", "Add/edit pages you own"),
("edit", "Edit any page"),
("publish", "Publish any page"),
("lock", "Lock/unlock any page"),
],
max_length=20,
| verbose_name="Permission type",
),
preserve_default=True,
),
]
| |
from digitalio import DigitalInOut, Direction, Pull
import board
import time
import neopixel
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
pixelPin = board.D2
pixelNumber = 8
strip = neopixel.NeoPixel(pixelPin, pixelNumber, brightness=1, auto_write=False)
switch = DigitalInOut(board.D1)
switch.direction = Di | rection.INPUT
switch.pull = Pull.UP
def wheel(pos):
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if (pos < 85):
return (int(pos * 3), int(255 - (pos * 3)), 0)
elif (pos < 170):
pos -= 85
return (int(255 - pos * 3), 0, int(pos * 3))
else:
pos -= 170
return (0, int(pos * 3), int(255 - pos * 3))
def rainbow_cycle(wait):
for outer in range(255):
for inner in range(len(strip)):
| index = int((inner * 256 / len(strip)) + outer)
strip[inner] = wheel(index & 255)
strip.write()
time.sleep(wait)
while True:
if switch.value:
led.value = False
strip.fill((0, 0, 0))
strip.write()
else:
led.value = True
# strip.fill((255, 0, 0))
rainbow_cycle(0.001)
# time.sleep(0.01) |
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
ENDLESS_GAME = config.GetBool('endless-ring-game', 0)
NUM_RING_GROUPS = 16
MAX_TOONXZ = 15.0
MAX_LAT = 5
MAX_FIELD_SPAN = 135
CollisionRadius = 1.5
CollideMask = ToontownGlobals.CatchGameBitmask
TARGET_RADIUS = MAX_TOONXZ / 3.0 * 0.9
targetColors = ((TTLocalizer.ColorRed, VBase4(1.0, 0.4, 0.2, 1.0)),
(TTLocalizer.ColorGreen, VBase4(0.0, 0.9, 0.2, 1.0)),
(TTLocalizer.ColorOrange, VBase4(1.0, 0.5, 0.25, 1.0)),
(TTLocalizer.ColorPurple, VBase4(1.0, 0.0, 1.0, 1.0)),
(TTLocalizer.ColorWhite, VBase4(1.0, 1.0, 1.0, 1.0)),
(TTLocalizer.ColorBlack, VBase4(0.0, 0.0, 0.0, 1.0)),
(TTLocalizer.ColorYellow, VBase4(1.0, 1.0, 0.2, 1.0)))
ENVIRON_LENGTH = 300
ENVIRON_WIDTH = 150.0
ringColorSelection = [(0, 1, 2),
3,
4,
5,
6]
colorRed = {}
colorRed['Red'] = 1.0
colorRed['Green'] = 0.0
colorRed['Blue'] = 0.0
colorRed['Alpha'] = 0.5
colorBlue = {}
colorBlue['Red'] = 0.0
colorBlue['Green'] = 0.0
colorBlue['Blue'] = 1.0
colorBlue['Alpha'] = 0.5
colorGreen = {}
colorGreen['Red'] = 0.0
colorGreen['Green'] = 1.0
colorGreen['Blue'] = 0.0
colorGreen['Alpha'] = 0.5
colorYellow = {}
colorYellow['Red'] = 1.0
colorYellow['Green'] = 1.0
colorYellow['Blue'] = 0.0
colorYellow['Alpha'] = 0.5
colorPurple = {}
colorPurple['Red'] = 0.75
colorPurple['Green'] = 0.0
colorPurple['Blue'] = 1.0
colorPurple['Alpha'] = 0.5
colorOrange = {}
colorOrange['Red'] = 1.0
colorOrange['Green'] = 0.6
colorOrange['Blue'] = 0.0
colorOrange['Alpha'] = 0.5
colorBlack = {}
colorBlack['Red'] = 0.0
colorBlack['Green'] = 0.0
colorBlack['Blue'] = 0.0
colorBlack['Alpha'] = 1.0
colorWhite = {}
colorWhite['Red'] = 1.0
colorWhite['Green'] = 1.0
colorWhite['Blue'] = 1.0
colorWhite['Alpha'] = 1.0
difficultyPatterns = {ToontownGlobals.ToontownCentral: [[8,
4,
2,
0],
[10,
16,
21,
28],
[31,
15,
7,
3.5],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
10,
2],
ToontownGlobals.DonaldsDock: [[7,
4,
2,
0],
[11,
17,
| 23,
32],
[29,
13,
6.5,
3.2],
| [colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
9,
2],
ToontownGlobals.DaisyGardens: [[6,
4,
2,
0],
[11,
18,
25,
34],
[29,
13,
6.5,
3.1],
[colorRed,
colorGreen,
colorBlue,
colorYellow],
[2,
2,
2,
1],
8,
2],
ToontownGlobals.MinniesMelodyland: [[6,
4,
2,
0],
[12,
19,
27,
37],
[28,
12,
6,
3.0],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
8,
2],
ToontownGlobals.TheBrrrgh: [[5,
4,
2,
0],
[12,
20,
29,
40],
[25,
12,
5.5,
2.5],
[colorGreen,
colorBlue,
colorYellow,
colorPurple],
[2,
2,
2,
1],
7,
2],
ToontownGlobals.DonaldsDreamland: [[4,
3,
1,
0],
[12,
21,
31,
42],
[20,
10,
4.5,
2.0],
[colorBlue,
colorYellow,
colorPurple,
colorOrange],
[2,
2,
2,
1],
7,
2]}
|
import lorun
import os
import codecs
import random
import subprocess
import config
import sys
RESULT_MAP = [
2, 10, 5, 4, 3, 6, 11, 7, 12
]
class Runner:
def __init__(self):
return
def compile(self, judger, srcPath, outPath):
cmd = config.langCompile[judger.lang] % {'root': sys.path[0], 'src': srcPath, 'target': outPath}
p = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.STDOUT)
retval = p.wait()
return (retval, p.stdout.read())
def judge(self, judger, srcPath, outPath, inFile, ansFile, memlimit, timelimit):
cmd = config.langRun[judger.lang] % {'src': srcPath, 'target': outPath}
fout_path = "".join([sys.path[0], "/", "%s/%d.out" % (config.dataPath["tempPath"], random.randint(0, 65536))])
if os. | path.exists(fout_path):
os.remove(fout_path)
|
fin = open(inFile, 'rU')
fout = open(fout_path, 'w')
runcfg = {
'args': cmd.split(" "),
'fd_in': fin.fileno(),
'fd_out': fout.fileno(),
'timelimit': int(timelimit),
'memorylimit': int(memlimit)
}
rst = lorun.run(runcfg)
fin.close()
fout.close()
if rst['result'] == 0:
fans = open(ansFile, 'rU')
fout = open(fout_path, 'rU')
crst = lorun.check(fans.fileno(), fout.fileno())
fout.close()
fans.close()
return (RESULT_MAP[crst], int(rst['memoryused']), int(rst['timeused']))
return (RESULT_MAP[rst['result']], 0, 0)
|
__author__ = 'Dr. Masroor Ehsan'
__email__ = 'masroore@gmail.com'
__copyright__ = 'Copyright 2013, Dr. Masroor Ehsan'
__license__ = 'BSD'
__version__ = '0.1.1'
from datetime import datetime
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
__all__ = ['parse_single_article', 'parse_story_set', 'parse_article_set']
def _find_and_set(key, rootNode, dict_obj, cb=None):
node = rootNode.find(key)
if node is not None:
dict_obj[key] = cb(node.text) if cb is not None else node.text
def _parse_datetime(input):
return datetime.strptime(input, "%Y-%m-%d %H:%M:%S")
def _parse_category_set(rootNode, tagName='category'):
categories = []
categoriesNode = rootNode.find('categories_set')
for categoryNode in categoriesNode.findall(tagName):
category = {}
_find_and_set('name', categoryNode, category)
_find_and_set('dashed_name', categoryNode, category)
if len(category) > 0:
categories.append(category)
return categories
def parse_category_set(content):
rootNode = etree.fromstring(content)
return _parse_category_set(rootNode)
def parse_single_article(content):
rootNode = etree.fromstring(content)
return _parse_single_article(rootNode)
def _parse_single_topic(rootNode):
topic = {}
_find_and_set('name', rootNode, topic)
_find_and_set('topic_group', rootNode, topic)
_find_and_set('topic_subclassification', rootNode, topic)
_find_and_set('score', rootNode, topic, float)
_find_and_set('image_url', rootNode, topic)
_find_and_set('link', rootNode, topic)
_find_and_set('guid', rootNode, topic)
_find_and_set('topic_classification', rootNode, topic)
_find_and_set('description', rootNode, topic)
return topic if len(topic) > 0 else None
def _parse_topic_set(rootNode):
topicSetNode = rootNode.find('topic_set')
topic_set = []
if topicSetNode is not None:
for node in topicSetNode.findall('topic'):
topic = _parse_single_topic(node)
if topic is not None:
topic_set.append(topic)
return topic_set if len(topic_set) > 0 else None
def _parse_thumbnail(rootNode, dict_obj):
thumbNode = rootNode.find('thumbnail')
if thumbNode is not None:
thumb = {}
_find_and_set('original_image', thumbNode, thumb)
_find_and_set('link', thumbNode, thumb)
if len(thumb) > 0:
dict_obj['thumbnail'] = thumb
def _parse_single_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_find_and_set('title', rootNode, article)
_find_and_set('created_at', rootNode, article, _parse_datetime)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('score', rootNode, article, float)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
catNode = rootNode.find('category')
article['category'] = {
'name': catNode.find('name').text,
'dashed_name': catNode.find('dashed_name').text}
authorSetNode = rootNode.find('author_set')
if authorSetNode is not None:
article['author_set'] = []
for authorNode in authorSetNode.findall('author'):
author = {
'guid': authorNode.find('guid').text,
'first_name': authorNode.find('first_name').text,
'last_name': authorNode.find('last_name').text,
}
article['author_set'].append(author)
topic_set = _parse_topic_set(rootNode)
if topic_set:
article['topic_set'] = topic_set
srcNode = rootNode.find('source')
source_dict = {}
_find_and_set('website', srcNode, source_dict)
_find_and_set('name', srcNode, source_dict)
_find_and_set('circulation', srcNode, source_dict, int)
_find_and_set('country', srcNode, source_dict)
_find_and_set('company_type', srcNode, source_dict)
_find_and_set('founded', srcNode, source_dict)
_find_and_set('staff_authors', srcNode, source_dict, int)
_find_and_set('frequency', srcNode, source_dict)
_find_and_set('owner', srcNode, source_dict)
_find_and_set('guid', srcNode, source_dict)
_find_and_set('is_blog', srcNode, source_dict, bool)
_find_and_set('thumbnail', srcNode, source_dict)
_find_and_set('description', srcNode, source_dict)
mediaNode = srcNode.find('media_type')
media_dict = {}
_find_and_set('name', mediaNode, media_dict)
_find_and_set('dashed_name', mediaNode, media_dict)
if len(media_dict) > 0:
source_dict['media_type'] = media_dict
if len(source_dict) > 0:
article['source'] = source_dict
return article
def _parse_author_set(rootNode):
authorSetNode = rootNode.find('author_set')
authors = []
if authorSetNode is not None:
for node in authorSetNode.findall('author'):
author = {}
_find_and_set('guid', node, author)
_find_and_set('name', node, author)
if len(author) > 0:
authors.append(author)
return authors if len(authors) > 0 else None
def _parse_story_set_article(rootNode):
article = {}
_find_and_set('description', rootNode, article)
_fi | nd_and_set('title', rootNode, article)
_find_and_set('published_at', rootNode, article, _parse_datetime)
_find_and_set('link', rootNode, article)
_find_and_set('guid', rootNode, article)
categories = _parse_category_set(rootNode, tagName='categories')
if categories is not None:
article['categories_set'] | = categories
sourceNode = rootNode.find('source')
if sourceNode is not None:
source_dict = {}
_find_and_set('name', sourceNode, source_dict)
_find_and_set('guid', sourceNode, source_dict)
if len(source_dict) > 0:
article['source'] = source_dict
author_set = _parse_author_set(rootNode)
if author_set is not None:
article['author_set'] = author_set
return article
def _parse_story_node(rootNode):
story = {}
_find_and_set('num_articles', rootNode, story, int)
_find_and_set('guid', rootNode, story)
articles = []
for articleNode in rootNode.find('article_set').findall('article'):
article = _parse_story_set_article(articleNode)
if article is not None:
articles.append(article)
if len(articles) > 0:
story['article_set'] = articles
return story
def parse_story_set(content):
rootNode = etree.fromstring(content)
story_set = []
for storyNode in rootNode.findall('story'):
story_set.append(_parse_story_node(storyNode))
return story_set
def parse_article_set(content):
rootNode = etree.fromstring(content)
#<article_set num_found="197218">
article_set = []
for storyNode in rootNode.findall('article'):
article_set.append(_parse_single_article(storyNode))
return article_set |
import tak
from . import tps
import attr
import re
@attr.s
class PTN(object):
tags = attr.ib()
moves = attr.ib()
@classmethod
def parse(cls, text):
head, tail = text.split("\n\n", 1)
tags_ = re.findall(r'^\[(\w+) "([^"]+)"\]$', head, re.M)
tags = dict(tags_)
tail = re.sub(r'{[^}]+}', ' ', tail)
moves = []
tokens = re.split(r'\s+', tail)
for t in tokens:
if t == '--':
continue
if re.search(r'\A(0|R|F|1|1/2)-(0|R|F|1|1/2)\Z', t):
continue
if re.match(r'\A\d+\.\Z', t):
continue
if t == '':
continue
t = re.sub(r"['!?]+$", '', t)
m = parse_move(t)
moves.append(m)
return cls(tags = tags, moves = moves)
def initial_position(self):
if 'TPS' in self.tags:
return tps.parse_tps(self.tags['TPS'])
return tak.Position.from_config(
tak.Config(size = int(self.tags['Size'])))
slide_map = {
'-': tak.MoveType.SLIDE_DOWN,
'+': tak.MoveType.SLIDE_UP,
'<': tak.MoveType.SLIDE_LEFT,
'>': tak.MoveType.SLIDE_RIGHT,
}
slide_rmap = dict((v, k) for (k, v) in slide_map.items())
place_map = {
'': tak.MoveType.PLACE_FLAT,
'S': tak.MoveType.PLACE_STANDING,
'C': tak.MoveType.PLACE_CAPSTONE,
'F': tak.MoveType.PLACE_FLAT,
}
place_rmap = {
tak.MoveType.PLACE_FLAT: '',
tak.MoveType.PLACE_STANDING: 'S',
tak.MoveType.PLACE_CAPSTONE: 'C',
}
def parse_move(move):
m = re.search(r'\A([CFS]?)([1-8]?)([a-h])([1-8])([<>+-]?)([1-8]*)[CFS]?\Z', move)
if not m:
raise BadMove(move, "malformed move")
stone, pickup, file, rank, dir, drops = m.groups()
x = ord(file) - ord('a')
y = ord(rank) - ord('1')
if pickup and not dir:
raise BadMove(move, "pick up but no direction")
typ = None
if dir:
typ = slide_map[dir]
else:
typ = place_map[stone]
slides = None
if drops:
slides = tuple(ord(c) - ord('0') for c in drops)
if (drops or pickup) and not dir:
raise BadMove(move, "pickup/drop without a directio | n")
if dir and not pickup and not slides:
pickup = '1'
if pickup and not slides:
slides = (int(pickup),)
if pickup and int(pickup) != sum(slides):
raise BadMove(move, "inconsistent pickup and drop: {0} v {1}".format(pickup, drops))
return tak.Move(x, y, typ, sli | des)
def format_move(move):
bits = []
bits.append(place_rmap.get(move.type, ''))
if move.type.is_slide():
pickup = sum(move.slides)
if pickup != 1:
bits.append(pickup)
bits.append(chr(move.x + ord('a')))
bits.append(chr(move.y + ord('1')))
if move.type.is_slide():
bits.append(slide_rmap[move.type])
if len(move.slides) > 1:
bits += [chr(d + ord('0')) for d in move.slides]
return ''.join(map(str, bits))
class BadMove(Exception):
def __init__(self, move, error):
self.move = move
self.error = error
super().__init__("{0}: {1}".format(error, move))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_ | literals
# do this when > 1.6!!!
# from django.db import migrations, models
from gazetteer.models import GazSource,GazSourceConfig,LocationTypeField,CodeFieldConfig,NameFieldCon | fig
from skosxl.models import Concept, Scheme, MapRelation
from gazetteer.settings import TARGET_NAMESPACE_FT
def load_base_ft():
(sch,created) = Scheme.objects.get_or_create(uri=TARGET_NAMESPACE_FT[:-1], defaults = { 'pref_label' :"Gaz Feature types" })
try:
(ft,created) = Concept.objects.get_or_create(term="ADMIN", defaults = { 'pref_label' :"Populated Place", 'definition':"Populated place"} , scheme = sch)
except:
pass
# now set up cross references from NGA feature types namespace
# now set up harvest config
def load_ft_mappings() :
pass
def load_config() :
try:
GazSourceConfig.objects.filter(name="TM_WorldBoundaries").delete()
except:
pass
config=GazSourceConfig.objects.create(lat_field="lat", name="TM_WorldBoundaries", long_field="lon")
NameFieldConfig.objects.create(config=config,language="en", as_default=True, languageNamespace="", field="name", languageField="")
LocationTypeField.objects.create(field='"ADMIN"',namespace=TARGET_NAMESPACE_FT, config=config)
CodeFieldConfig.objects.create(config=config,field="iso3",namespace="http://mapstory.org/id/countries/iso3")
CodeFieldConfig.objects.create(config=config,field="iso2",namespace="http://mapstory.org/id/countries/iso2")
CodeFieldConfig.objects.create(config=config,field="un",namespace="http://mapstory.org/id/countries/un")
CodeFieldConfig.objects.create(config=config,field="fips",namespace="http://mapstory.org/id/countries/fips")
(s,created) = GazSource.objects.get_or_create(source="tm_world_borders", config=config, source_type="mapstory")
print (s,created)
"""
class Migration(migrations.Migration):
initial = True
dependencies = [
#('yourappname', '0001_initial'),
]
operations = [
migrations.RunPython(load_ft_mappings),
migrations.RunPython(load_config),
]
"""
|
a = {"abc": | "d<caret> | ef"} |
# Make your image, region, and location changes then change the from-import
# to match.
from configurables_akeeton_desktop import *
import hashlib
import java.awt.Toolkit
import json
import os
import shutil
import time
Settings.ActionLogs = True
Settings.InfoLogs = True
Settings.DebugLogs = True
Settings.LogTime = True
Settings.AutoWaitTimeout = AUTO_WAIT_TIMEOUT_SECONDS
TEMP_DIR_PREFIX = time.strftime("MTGO-scry-bug_%Y-%m-%d_%H-%M-%S", time.gmtime())
TEMP_PATH = tempfile.mkdtemp(prefix=TEMP_DIR_PREFIX)
attempts = 0
def main():
global attempts
attempts += 1
ATTEMPT_NUM_PATH = get_attempt_number_path(attemp | ts)
HITS_PATH = os.path.join(ATTEMPT_NUM_PATH, HITS_DIR)
MISSES_PATH = os.path.join(ATTEMPT_NUM_PATH, MISSES_DIR)
print "TEMP_PATH:", TEMP_PATH
print "ATTEMPT_NUM_PATH", ATTEMPT_NUM_PATH
print "HITS_PATH:", HITS_PATH
print "MISSES_PATH:", MISSES_PATH
os.mkdir(ATTEMPT_NUM_PATH)
os.mkdir(HITS_PATH)
os.mkdir(MISSES_PATH)
iterations = 0
hits = 0
card_hash_to_times_card_sent_to_bottom = ['card_hash_to_times_ | card_sent_to_bottom', ZeroValueDict()]
card_hash_to_times_card_sent_to_bottom_and_drawn = ['card_hash_to_times_card_sent_to_bottom_and_drawn', ZeroValueDict()]
card_hash_to_times_card_drawn = ['card_hash_to_times_card_drawn', ZeroValueDict()]
card_hash_to_capture = ['card_hash_to_capture', {}]
while True:
REGION_PLAY.wait("play.png")
time.sleep(0.5)
REGION_PLAY.click(LOCATION_PLAY)
time.sleep(1.0)
REGION_MULLIGAN_KEEP.wait("mulligan_keep.png")
for i in range(0, 7):
REGION_MULLIGAN_KEEP.wait("mulligan_highlighted_keep.png")
time.sleep(2.0) # I swear if I have to keep incrementing this value...
REGION_MULLIGAN_KEEP.click(LOCATION_MULLIGAN)
time.sleep(1.0)
REGION_TEMPORARY_ZONE.wait("temporary_zone.png")
time.sleep(0.5)
click(LOCATION_TEMPORARY_ZONE_CARD)
time.sleep(0.5)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY.click(LOCATION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY)
time.sleep(0.1)
REGION_CHAT_PUT_A_CARD_ON_THE_BOTTOM_OF_THE_LIBRARY.wait("chat_put_a_card_on_the_bottom_of_the_library.png")
time.sleep(0.1)
card_sent_to_bottom_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
hover(LOCATION_FIRST_CARD_IN_HAND) # Update the preview with the drawn card.
time.sleep(0.5)
card_drawn_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
copy_path = ""
card_sent_to_bottom_hash = hash_file(card_sent_to_bottom_capture)
card_drawn_hash = hash_file(card_drawn_capture)
card_hash_to_times_card_sent_to_bottom[1][card_sent_to_bottom_hash] += 1
card_hash_to_times_card_drawn[1][card_drawn_hash] += 1
if card_sent_to_bottom_hash == card_drawn_hash:
hits += 1
card_hash_to_times_card_sent_to_bottom_and_drawn[1][card_sent_to_bottom_hash] += 1
copy_path = HITS_PATH
else:
copy_path = MISSES_PATH
iterations += 1
print "{0}/{1}".format(hits, iterations)
card_sent_to_bottom_capture_dest_path = os.path.join(copy_path, str(iterations) + "_bottom.png")
card_drawn_capture_dest_path = os.path.join(copy_path, str(iterations) + "_drawn.png")
shutil.move(card_sent_to_bottom_capture, card_sent_to_bottom_capture_dest_path)
shutil.move(card_drawn_capture, card_drawn_capture_dest_path)
card_hash_to_capture[1][card_sent_to_bottom_hash] = card_sent_to_bottom_capture_dest_path
card_hash_to_capture[1][card_drawn_hash] = card_drawn_capture_dest_path
with open(os.path.join(ATTEMPT_NUM_PATH, 'stats.json'), 'w') as stats_file:
json.dump(card_hash_to_times_card_sent_to_bottom_and_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_sent_to_bottom, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_times_card_drawn, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
json.dump(card_hash_to_capture, stats_file, sort_keys=True, indent=4)
stats_file.write('\n')
stats_file.write('{0}/{1}'.format(hits, iterations))
click(LOCATION_X_CLOSE)
REGION_CONCEDE_MATCH_BUTTON.wait("concede_match.png")
time.sleep(0.1)
type('\n')
class ZeroValueDict(dict):
def __missing__(self, key):
return 0
def hash_file(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as opened_file:
buf = opened_file.read()
hasher.update(buf)
return hasher.hexdigest()
def get_attempt_number_path(attempts):
return os.path.join(TEMP_PATH, 'attempt_{0}'.format(attempts))
if __name__ == '__main__':
while True:
try:
main()
except FindFailed as e:
for i in range(0, TIMES_TO_BEEP_ON_FIND_FAIlED):
java.awt.Toolkit.getDefaultToolkit().beep()
time.sleep(1.0)
print e
with open(os.path.join(get_attempt_number_path(attempts), 'error.log'), 'w') as errorlog:
errorlog.write(str(e))
raise e # Replace this with a way to reset MTGO to a starting state so we can try again.
|
import pyaudio
import struct
from threading import Thread, Condition
import time
from logging import thread
import socket
CHUNK = 2**12
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
class AudioReader(Thread):
def __init__(self, raw = False, remote = False, host = 'localhost', port = 9999):
Thread.__init__(self)
self.active = False
self.listeners = []
self.condition = Condition()
self.quit = False
self.raw = raw
self.remote = remote
self.host = host
self.port = port
def pause(self):
self.active = False
def play(self):
self.active = True
self.condition.acquire()
self.condition.notify()
self.condition.release()
def stop(self):
if not self.active:
self.play()
self.active = False
self.quit = True
def readData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.stream = pyaudio.PyAudio().open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while s | elf.active:
data = self.stream.read(CHUNK)
if not self.raw:
count = len(data) / 2
fmt = "%dh" % (count)
shorts = struct.unpack(fmt, data)
else:
shorts = data
for l in self.listeners:
l(shorts)
| self.stream.close()
def readRemoteData(self):
self.condition.acquire()
self.condition.wait()
self.condition.release()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
buf = []
while self.active:
data = self.socket.recv((CHUNK*2-len(buf))*2)
if not self.raw:
count = len(data) / 2
fmt = "%dh" % (count)
shorts = struct.unpack(fmt, data)
buf.extend(shorts)
if len(buf)>=CHUNK*2:
for l in self.listeners:
l(buf)
buf=[]
else:
for l in self.listeners:
l(data)
self.socket.close()
def run(self):
while not self.quit:
if not self.remote:
self.readData()
else:
self.readRemoteData()
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
from collections.abc import Mapping
class EqualityComparableID:
__slots__ = ()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.id == other.id
return NotImplemented
class HashableID(EqualityComparableID):
__slots__ = ()
def __hash__(self):
return self.id
class DataMapping(Mapping):
__slots__ = ()
def __contains__(self, item):
return item in self.data
| def __getattr__(self, name):
try:
return self.data[name]
except KeyErr | or:
raise AttributeError from None
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError from None
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
|
import sys, os
def timeSplit( ETR ):
h = int(ETR/3600)
m = int(ETR - 3600*h)/60
s = int(ETR - 3600*h - 60*m)
return h, m, s
def printProgress( current, total, deltaIter, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
ETR = deltaTime*(total - current)/float(deltaIter)
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hour | s - 60*minutes)
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.stdout. write(terminalString)
sys.stdout.flush()
|
def printProgressTime( current, total, deltaTime ):
terminalString = "\rProgress: "
if total==0: total+=1
percent = 100.*current/total
nDots = int(percent/5)
dotsString = "[" + nDots*"." + (20-nDots)*" " + "]"
percentString = "{0:.0f}%".format(percent)
if current != 0:
ETR = (deltaTime*(total - current))/float(current)
#print ETR
hours = int(ETR/3600)
minutes = int(ETR - 3600*hours)/60
seconds = int(ETR - 3600*hours - 60*minutes)
ETRstring = " ETR= {0}:{1:02}:{2:02} ".format(hours, minutes, seconds)
else: ETRstring = " ETR= "
if deltaTime < 0.0001: ETRstring = " ETR= "
terminalString += dotsString + percentString + ETRstring
sys.stdout. write(terminalString)
sys.stdout.flush()
def ensureDirectory( dirName ):
if not os.path.exists(dirName):
os.makedirs(dirName) |
#
# This file is part of opsd.
#
# opsd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opsd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with opsd. If not, see <http://www.gnu.org/licenses/>.
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
from warwick.observatory.dome import (
CommandStatus as DomeCommandStatus,
DomeShutterStatus,
DomeHeartbeatStatus)
from warwick.observatory.operations.constants import DomeStatus
from warwick.observatory.common import daemons, validation
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': ['module'],
'required': [
'daemon', 'movement_timeout', 'heartbeat_timeout'
],
'properties': {
'daemon': {
'type': 'string',
'daemon_name': True
},
'movement_timeout': {
'type': 'number',
'minimum': 0
},
'heartbeat_timeout': {
'type': 'number',
'minimum': 0
}
}
}
def validate_config(config_json):
return validation.validation_errors(config_json, CONFIG_SCHEMA, {
'daemon_name': validation.daemon_name_validator,
})
class DomeInterface:
"""Interface to allow the dome controller to operate an Astrohaven dome via domed"""
def __init__(self, dome_config_json):
self._daemon = getattr(daemons, dome_config_json['daemon'])
# Communications timeout when opening or closing the dome (takes up to ~80 seconds for the onemetre dome)
self._movement_timeout = dome_config_json['movement_timeout']
# Timeout period (seconds) for the dome controller
# The dome heartbeat is pinged once per LOOP_DELAY when the dome is under
# automatic control and is fully open or fully closed. This timeout should
# be large enough to account for the time it takes to open and close the dome
self._heartbeat_timeout = dome_config_json['heartbeat_timeout']
de | f query_status(self):
with self._daemon.connect() as dome:
status = dome.status()
if status['heartbeat_status'] in [DomeHeartbeatStatus.TrippedClosing,
DomeHeartbeatStatus.TrippedIdle]:
return DomeStatus.Timeout
if status['shutter_a'] == DomeShutterStatus.Closed and \
status['shutter_b'] == DomeShutterStatus.Closed:
return DomeStatus.Closed
if status['shutter_a'] in [DomeShutte | rStatus.Opening, DomeShutterStatus.Closing] or \
status['shutter_b'] in [DomeShutterStatus.Opening, DomeShutterStatus.Closing]:
return DomeStatus.Moving
return DomeStatus.Open
def ping_heartbeat(self):
print('dome: sending heartbeat ping')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def disable_heartbeat(self):
print('dome: disabling heartbeat')
with self._daemon.connect() as dome:
ret = dome.set_heartbeat_timer(self._heartbeat_timeout)
return ret == DomeCommandStatus.Succeeded
def close(self):
print('dome: sending heartbeat ping before closing')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: closing')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.close_shutters('ba')
return ret == DomeCommandStatus.Succeeded
def open(self):
print('dome: sending heartbeat ping before opening')
with self._daemon.connect() as dome:
dome.set_heartbeat_timer(self._heartbeat_timeout)
print('dome: opening')
with self._daemon.connect(timeout=self._movement_timeout) as dome:
ret = dome.open_shutters('ab')
return ret == DomeCommandStatus.Succeeded
|
"""Twitter crawler script"""
import tweepy
from database import MongoDB
class Twitter(object): # pylint: disable=too-few-public-methods
"""Class Twitter"""
def __init__(self):
self.consumer_key = "40GvlhlFPNbVGkZnPncPH8DgB"
self.consumer_secret = "G595ceskX8iVH34rsuLSqpFROL0 | brp8ezzZR2dGvTKvcpPsKPw"
self.access_token = "397905190-LXMFC0clhtDxx5cITBWVFqVUKNQBKuqM06Ls4k5n"
self.access_token_secret = " | nPzoHy5UwzOPUZVZO3JhBFRL3WgdM0jJKignxIzQ6nAS1"
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_token_secret)
self.api = tweepy.API(self.auth)
# Method to print our tweets
def print_tweets(self, count=1):
tweets = self._user_timeline(count)
for tweet in tweets:
print tweet.encode('utf-8')
# Method to save our tweets
def save_tweets(self, count=1):
database = MongoDB("verificacion")
coll = database.collection("tweets")
tweets = self._user_timeline(count)
for tweet in tweets:
coll.insert({"tweet": tweet})
# Returns the *count* numbers of tweets of your timeline and save it into a database
def _user_timeline(self, count=200):
tweets = []
public_tweets = self.api.user_timeline(id=self.auth.get_username(), count=count)
for tweet in public_tweets:
text = tweet.text
tweets.append(text)
return tweets
if __name__ == '__main__':
twepp = Twitter()
twepp.print_tweets(10)
twepp.save_tweets(10) |
# -*- coding: utf-8 -*-
"""
Kay preparse management command.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from os import listdir, path, mkdir
from werkzeug.utils import import_string
import kay
import kay.app
from kay.utils import local
from kay.utils.jinja2utils.compiler import compile_dir
from kay.management.utils import print_status
IGNORE_FILENAMES = {
'kay': ('debug', 'app_template'),
'app': ('kay'),
}
def find_template_dir(target_path, ignore_filenames):
ret = []
for filename in listdir(target_path):
target_fullpath = path.join(target_path, filename)
if path.isdir(target_fullpath):
if filename.startswith(".") or filename in ignore_filenames:
continue
if filename == "templates":
ret.append(target_fullpath)
else:
ret = ret + find_template_dir(target_fullpath, ignore_filenames)
else:
continue
return ret
def do_preparse_bundle():
"""
Pre compile all the jinja2 templates in Kay itself.
"""
print_status("Compiling bundled templates...")
app = kay.app.get_application()
env = app.app.jinja2_env
for dir in find_template_dir(kay.KAY_DIR, ('debug','app_template')):
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
print_status("Finished compiling bundled templates...")
def do_preparse_apps():
"""
Pre compile all the jinja2 templates in your applications.
"""
from kay.conf import LazySettings
print_status("Compiling templates...")
application = kay.app.get_application()
applications = [application]
settings_treated = []
for key, settings_name in \
application.app.app_settings.PER_DOMAIN_SETTINGS.iteritems():
if not settings_name in settings_treated:
applications.append(kay.app.get_application(
settings=LazySettings(settings_module=settings_name)))
settings_treated.append(settings_name)
for app in applications:
compile_app_templates(app.app) # pass KayApp instance
for key, submount_app in app.mounts.iteritems():
if isinstance(submount_app, kay.app.KayApp):
compile_app_templates(submount_app)
print_status("Finished compiling templates...")
def prepare_destdir(dir):
def replace_dirname(orig):
if 'templates' in orig:
return orig.replace('templates', 'templates_compiled')
else:
return orig+'_compiled'
dest = replace_dirname(dir)
if path.isdir(dest):
for d, subdirs, files in os.walk(dest):
for f in files:
compiled_filename = "%s/%s" % (d, f)
orig_filename = compiled_filename.replace(dest, dir)
if not path.isfile(orig_filename):
os.unlink(compiled_filename)
print_status("%s does not exist. So, '%s' is removed." % (
orig_filename, compiled_filename))
else:
mkdir(dest)
return dest
def compi | le_app_templates(app):
env = app.jinja2_env
target_dirs = [dir for dir in app.app_settings.TEMPLATE_DIRS\
if os.path.isdir(dir)]
for app in app.app_settings.INSTALLED_APPS:
if app.startswith("kay."):
continue
mod = import_string(app)
target_dirs.extend(find_templ | ate_dir(os.path.dirname(mod.__file__),
('kay')))
for dir in target_dirs:
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
|
from linked_list import LinkedList
class Stack(object):
def __ini | t__(self, iterable=None):
self._list = LinkedList(iterable)
def push(self, val):
self._list.insert( | val)
def pop(self):
return self._list.pop()
|
#!/usr/bin/env python
# Dependencies.py - discover, read, and write dependencies file for make.
# The format like the output from "g++ -MM" which produces a
# list of header (.h) files used by source files (.cxx).
# As a module, provides
# FindPathToHeader(header, includePath) -> path
# FindHeadersInFile(filePath) -> [headers]
# FindHeadersInFileRecursive(filePath, includePath, renames) -> [paths]
# FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames) -> [dependencies]
# ExtractDependencies(input) -> [dependencies]
# TextFromDependencies(dependencies)
# WriteDependencies(output, dependencies)
# UpdateDependencies(filepath, dependencies)
# PathStem(p) -> stem
# InsertSynonym(dependencies, current, additional) -> [dependencies]
# If run as a script reads from stdin and writes to stdout.
# Only tested with ASCII file names.
# Copyright 2019 by Neil Hodgson <neilh@scintilla.org>
# The License.txt file describes the conditions under which this software may be distributed.
# Requires Python 2.7 or later
import codecs, glob, os, sys
if __name__ == "__main__":
import FileGenerator
else:
from . import FileGenerator
continuationLineEnd = " \\"
def FindPathToHeader(header, includePath):
for incDir in includePath:
relPath = os.path.join(incDir, header)
if os.path.exists(relPath):
return relPath
return ""
fhifCache = {} # Remember the includes in each file. ~5x speed up.
def FindHeadersInFile(filePath):
if filePath not in fhifCache:
headers = []
with codecs.open(filePath, "r", "utf-8") as f:
for line in f:
if line.strip().startswith("#include"):
parts = line.split()
if len(parts) > 1:
header = parts[1]
if header[0] != '<': # No system headers
headers.append(header.strip('"'))
fhifCache[filePath] = headers
return fhifCache[filePath]
def FindHeadersInFileRecursive(filePath, includePath, renames):
headerPaths = []
for header in FindHeadersInFile(filePath):
if header in renames:
header = renames[header]
relPath = FindPathToHeader(header, includePath)
if relPath and relPath not in headerPaths:
headerPaths.append(relPath)
subHeaders = FindHeadersInFileRecursive(relPath, includePath, renames)
headerPaths.extend(sh for sh in subHeaders if sh not in headerPaths)
return headerPaths
def RemoveStart(relPath, start):
if relPath.startswith(start):
return relPath[len(start):]
return relPath
def ciKey(f):
return f.lower()
def FindDependencies(sourceGlobs, includePath, objExt, startDirectory, renames={}):
deps = []
for sourceGlob in sourceGlobs:
sourceFiles = glob.glob(sourceGlob)
# Sorting the files minimizes deltas as order | returned by OS may be arbitrary
sourceFiles.sort(key=ciKey)
for sourceName in sourceFiles:
objName = os.path.splitext(os.path.basename(sourceName))[0]+objExt
headerPaths = FindHeadersInFileRecursive(sourceName, includePath, renames)
depsForSource = [sourceName] + headerPaths
depsToAppend = [RemoveStart(fn.replace("\\", "/"), startDirectory) for
fn in depsForSource]
deps.append([objName, depsToAppend])
return deps
def PathStem(p):
""" R | eturn the stem of a filename: "CallTip.o" -> "CallTip" """
return os.path.splitext(os.path.basename(p))[0]
def InsertSynonym(dependencies, current, additional):
""" Insert a copy of one object file with dependencies under a different name.
Used when one source file is used to create two object files with different
preprocessor definitions. """
result = []
for dep in dependencies:
result.append(dep)
if (dep[0] == current):
depAdd = [additional, dep[1]]
result.append(depAdd)
return result
def ExtractDependencies(input):
""" Create a list of dependencies from input list of lines
Each element contains the name of the object and a list of
files that it depends on.
Dependencies that contain "/usr/" are removed as they are system headers. """
deps = []
for line in input:
headersLine = line.startswith(" ") or line.startswith("\t")
line = line.strip()
isContinued = line.endswith("\\")
line = line.rstrip("\\ ")
fileNames = line.strip().split(" ")
if not headersLine:
# its a source file line, there may be headers too
sourceLine = fileNames[0].rstrip(":")
fileNames = fileNames[1:]
deps.append([sourceLine, []])
deps[-1][1].extend(header for header in fileNames if "/usr/" not in header)
return deps
def TextFromDependencies(dependencies):
""" Convert a list of dependencies to text. """
text = ""
indentHeaders = "\t"
joinHeaders = continuationLineEnd + os.linesep + indentHeaders
for dep in dependencies:
object, headers = dep
text += object + ":"
for header in headers:
text += joinHeaders
text += header
if headers:
text += os.linesep
return text
def UpdateDependencies(filepath, dependencies, comment=""):
""" Write a dependencies file if different from dependencies. """
FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep +
TextFromDependencies(dependencies))
def WriteDependencies(output, dependencies):
""" Write a list of dependencies out to a stream. """
output.write(TextFromDependencies(dependencies))
if __name__ == "__main__":
""" Act as a filter that reformats input dependencies to one per line. """
inputLines = sys.stdin.readlines()
deps = ExtractDependencies(inputLines)
WriteDependencies(sys.stdout, deps)
|
"""
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
| self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders pa | rameters to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
|
#task_H
def dijkstra(start, graph):
n = len(graph)
D = [None] * n
D[start] = 0
index = 0
Q = [start]
while index < len(Q):
v = Q[index]
index += 1
for u in graph[v]:
if D[u] == None or D[v] + min(graph[v][u]) < D[u]:
D[u] = D[v] + min(graph[v][u])
Q.append(u)
return D
def reverse(graph):
n = len(graph)
graph_reversed = {x: {} for x, y in zip(range(n), range(n) | )}
for i in range(n):
for v in graph[i]:
for w in graph[i][v]:
add(graph_reversed, v, i, w)
def add(graph, a, b, w):
if b in graph[a]:
grph[a][b].append(w)
else:
g | raph[a][b] = [w]
def min_vertex(x, D, graph):
A = {v: w + D[v] for v, w in zip([u for u in graph[x].keys if D[u] != None], [min(graph[x][u]) for u in graph[x].keys if D[u] != None])}
L = list(A.items)
min_i = L[0][0]
min_v = L[0][1]
for v in A:
if A[v] < min_v:
min_v = A[v]
min_i = v
return min_i
def path(graph, D, s, f):
graph = reverse(graph)
x = f
P = [f]
while x != s:
x = min_vertex(x, D, graph)
P.append(x)
return P[-1::-1]
n, m, s, f = tuple(map(int, input().split()))
graph = {x: {} for x, y in zip(range(n), range(n))}
for i in range(m):
a, b, w = tuple(map(int, input().split()))
add(graph, a, b, w)
add(graph, b, a, w)
D = dijkstra(s, graph)
print(*path(graph, D, s, f)) |
from . import test_attachment
fast | _suite = [test_attachment,
| ]
|
from .forms import SetupForm
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from splunkdj.decorators.render import render_to
from splunkdj.setup import create_setup_view_context
@login_required
def home(request):
# Redirect to the default view, which happens to be a non-framework view
return redirect('/en-us/app/twitter2/twitter_general')
@render_to('twitter2:setup.html')
@login_required
def setup(request): |
result = create_setup_view_context(
request,
SetupForm,
reverse('twitter2:home'))
# HACK: Workaround DVPL-4647 (Splunk 6.1 and below):
# Refresh current app's state so that non-framework views
# o | bserve when the app becomes configured.
service = request.service
app_name = service.namespace['app']
service.apps[app_name].post('_reload')
return result
|
gnals to nose that this function is or is not a test
Parameters
----------
tf : bool
If True specifies this is a test, not a test otherwise
e.g
>>> from numpy.testing.decorators import setastest
>>> @setastest(False)
... def func_with_test_in_name(arg1, arg2): pass
...
>>>
This decorator cannot use the nose namespace, because it can be
called from a non-test module. See also istest and nottest in
nose.tools
'''
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable.
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decisi | on. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a fun | ction, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
''' Make function raise KnownFailureTest exception if fail_condition is true
Parameters
----------
fail_condition : bool or callable.
Flag to determine whether to mark test as known failure (True)
or not (False). If the condition is a callable, it is used at
runtime to dynamically make the decision. This is useful for
tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a KnownFailureTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager:
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def deprecated(conditional=True):
"""This decorator can be used to filter Deprecation Warning, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable.
Flag to determine whether to mark test as deprecated or not. If the
condition is a callable, it is used at runtime to dynamically make the
decision.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
.. versionadded:: 1.4.0
"""
def deprecate_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
ctx = WarningManager(record=True)
l = ctx.__enter__()
warnings.simplefilter('always')
try:
|
"""Test the Advantage Air Sensor Platform."""
from datetime import timedelta
from json import loads
from homeassistant.components.advantage_air.const import DOMAIN as ADVANTAGE_AIR_DOMAIN
from homeassistant.components.advantage_air.sensor import (
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
ADVANTAGE_AIR_SET_COUNTDOWN_VALUE,
)
from homeassistant.config_entries import RELOAD_AFTER_UPDATE_DELAY
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt
from tests.common import async_fire_time_changed
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_sensor_platform(hass, aioclient_mock):
"""Test sensor platform."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test First TimeToOn Sensor
e | ntity_id = "sensor.ac_one_time_to_on"
state = hass.st | ates.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOn"
value = 20
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOn"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First TimeToOff Sensor
entity_id = "sensor.ac_one_time_to_off"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOff"
value = 0
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOff"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First Zone Vent Sensor
entity_id = "sensor.zone_open_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-vent"
# Test Second Zone Vent Sensor
entity_id = "sensor.zone_closed_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-vent"
# Test First Zone Signal Sensor
entity_id = "sensor.zone_open_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 40
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-signal"
# Test Second Zone Signal Sensor
entity_id = "sensor.zone_closed_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-signal"
# Test First Zone Temp Sensor (disabled by default)
entity_id = "sensor.zone_open_with_sensor_temperature"
assert not hass.states.get(entity_id)
registry.async_update_entity(entity_id=entity_id, disabled_by=None)
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 25
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-temp"
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version | 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for | more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import survey_update_wizard
|
import subprocess
from typing import List, Optional
from approvaltests import ensure_file_exists
from approvaltests.command import Command
from approvaltests.core.reporter import Reporter
from approvaltests.utils import to_json
PROGRAM_FILES = "{ProgramFiles}"
class GenericDiffReporterConfig:
def __init__(self, name: str, path: str, extra_args: Optional[List[str]] = None):
self.name = name
self.path = path
self.extra_args = extra_args or []
def serialize(self):
result = [self.name, self.path]
if self.extra_args:
result.append(self.extra_args)
return result
def create_config(config) -> GenericDiffReporterConfig:
return GenericDiffReporterConfig(*config)
class GenericDiffReporter(Reporter):
"""
A reporter that launches
an external diff tool given by config.
"""
@staticmethod
def create(diff_tool_path: str) -> "GenericDiffReporter":
return GenericDiffReporter(create_config(["custom", diff_tool_path]))
def __init__(self, config: GenericDiffReporterConfig) -> None:
self.name = config.name
self.path = self.expand_program_files(config.path)
self.extra_args = config.extra_args
def __str__(self) -> str:
if self.extra_args:
config = {
"name": self.name,
"path": self.path,
"arguments": self.extra_args,
}
else:
| config = {"name": self.name, "path": self.path}
return to_json(config)
@staticmethod
def run_command(command_array):
subprocess.Popen(command_array)
def g | et_command(self, received: str, approved: str) -> List[str]:
return [self.path] + self.extra_args + [received, approved]
def report(self, received_path: str, approved_path: str) -> bool:
if not self.is_working():
return False
ensure_file_exists(approved_path)
command_array = self.get_command(received_path, approved_path)
self.run_command(command_array)
return True
def is_working(self) -> bool:
found = Command(self.path).locate()
if not found:
return False
else:
self.path = found
return True
@staticmethod
def expand_program_files(path: str) -> str:
if PROGRAM_FILES not in path:
return path
for candidate in [
r"C:/Program Files",
r"C:/Program Files (x86)",
r"C:/ProgramW6432",
]:
possible = path.replace(PROGRAM_FILES, candidate)
if Command.executable(possible):
return possible
return path.replace(PROGRAM_FILES, "C:/Program Files") |
es have no head commit
parent_commits = list()
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
""":param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = list()
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line | .split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline() |
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = None
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode('ascii')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode('ascii')
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'repla |
# -*- coding: utf-8 -*-
# 中文对齐不能用python自带的函数,需要自己根据中文长度增/减空格
# Python 2.7.12 & matplotlib 2.0.0
import re
from urllib2 import *
import matplotlib.pyplot as plt
#Get a set of records from nba.hupu.com due to given team
def getDataSet(team):
statUserAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'
statHeaders = {'User-Agent': statUserAgent}
statRequest = Request('https://nba.hupu.com/schedule/' + team, headers=statHeaders)
statResponse = urlopen(statRequest, timeout = 10)
statHtml = statResponse.read()
#Load Game information using regular expression
statPattern = re.compile(
'''<tr.*?<a.*?>(.*?)</a>.*?>(.*?)</a>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?</tr>''',
re.S)
#store all the records that corresponds to the RE rule
statDataSet = re.findall(statPattern, statHtml)
return statDataSet
class Record:
# initialization
def __init__(self, team):
self.team = team #Name of the team in Chinese
self.num = 0 #Number of the game
self.place = '' #Home/Road
self.done = True #True is the game is finished, else False
self.result = '' #The result of the game in Chinese
self.opp = '' #Opppnent of the game in Chinese
self.score = '' #The score of 2 teams in string like " XX:XX"
self.scoreSelf = '' #The score of team
self.scoreOpp = '' #The score of opponent
self.scoreDiff = 0 #The difference in scores (positive if win/negative if lose)
self.dateTime = '' #The date and time of this game (Beijing Time) in string
self.date = '' #The date of this game
self.time = '' #The time of this game
# Load seperated data from a list generated by regular expression decomposing
def Load(self, statRecord, count):
#Get the number of this game record
self.num = count
#if this games is unfinished
if statRecord[3].strip() == '-':
self.done = False
#To find out if it is a Home Game or a Road Game
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
else:
self.place = '主'
self.opp = statRecord[0]
#if the game is finished
else:
#Get the result of this game
if statRecord[3].strip() == '胜':
| self.result = '胜'
else:
self.result = '负'
if statRecord[0] == self.team:
self.place = '客'
self.opp = statRecord[1]
| #Get the score of this game
self.scoreSelf = re.findall(r'^\d+', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'\d+$', statRecord[2].strip())[0]
self.score = self.scoreSelf + '-' + self.scoreOpp
else:
self.place = '主'
self.opp = statRecord[0]
self.scoreSelf = re.findall(r'\d+$', statRecord[2].strip())[0]
self.scoreOpp = re.findall(r'^\d+', statRecord[2].strip())[0]
self.score = self.scoreOpp + '-' + self.scoreSelf
self.scoreDiff = eval(self.scoreSelf) - eval(self.scoreOpp)
#Get the date and time of this game
self.dateTime = statRecord[4]
self.date = self.dateTime.split()[0]
self.time = self.dateTime.split()[1]
# Print game message
def Print(self):
#Trick here to fill in suitable spaces to align Chinese
spaceExtraSelf = ' '
spaceExtraOpp = ' '
if len(self.team) == 9: spaceExtraSelf = ' '
if len(self.team) == 5: spaceExtraSelf = ' '
if len(self.opp) == 9: spaceExtraOpp = ' '
if len(self.opp) == 5: spaceExtraOpp = ' '
if self.done == True:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.scoreSelf, self.scoreOpp,
self.scoreDiff, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %3s : %3s %+4d %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.scoreOpp, self.scoreSelf,
self.scoreDiff, self.dateTime))
else:
if self.place == '客':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraSelf, self.team, self.opp, spaceExtraOpp, self.dateTime))
if self.place == '主':
print ('Game %2s %s%10s VS %-10s%s %s' % (
self.num, spaceExtraOpp, self.opp, self.team, spaceExtraSelf, self.dateTime))
if __name__ == "__main__":
#Dictionary of team's English and Chinese names
#We need english names to implement url and Chinese name to print in Console
teams = {'spurs': '马刺', 'rockets': '火箭', 'grizzlies': '灰熊', 'pelicans':'鹈鹕', 'mavericks':'小牛',
'warriors': '勇士', 'clippers':'快船', 'kings': '国王', 'lakers': '湖人', 'suns': '太阳',
'jazz': '爵士', 'thunder': '雷霆', 'blazers': '开拓者', 'nuggets': '掘金', 'timberwolves': '森林狼',
'celtics': '凯尔特人', 'raptors': '猛龙', 'knicks': '尼克斯', '76ers': '76人', 'nets': '篮网',
'wizards': '奇才', 'hawks': '老鹰', 'heat': '热火', 'hornets': '黄蜂', 'magic': '魔术',
'cavaliers': '骑士', 'bucks':'雄鹿', 'bulls': '公牛', 'pacers': '步行者', 'pistons': '活塞'}
for team in teams:
#Comment this if and unindent codes below to get all 30 team's results
if team == 'rockets':
statDataSet = getDataSet(team)
countGame = 0
countWin = 0
countLose = 0
streak = ''
streakCount = 0
results = []
#Count Wins and Loses and print every record
for statRecord in statDataSet:
countGame += 1
record = Record(teams[team])
record.Load(statRecord, countGame)
if record.done == True:
results.append(record.scoreDiff)
if record.result == '胜':
countWin += 1
else:
countLose += 1
record.Print()
#Reverse the records to check the Win/Lose streak
statDataSet.reverse()
for statRecord in statDataSet:
record = Record(teams[team])
record.Load(statRecord, countGame)
if streak == '':
streak = record.result
streakCount = 1
continue
if record.result == streak:
streakCount += 1
else:
break
#Plot results one by one
x = range(0, len(results))
figure = plt.figure()
plt.plot(x, results, 'r-', alpha = 0.6, label = 'dot')
plt.plot(x, results, 'ro', label = 'line')
plt.title(team.upper() + ' +/- Overall' )
plt.xlabel('Game NO.')
plt.ylabel('+/-')
plt.grid(True)
figure.set_size_inches(12,4)
plt.legend(loc = 'upper right')
plt.show()
print('Total : %d W / %d L %d 连%s中' % (countWin, countLose, streakCount, streak))
|
import threading
import upnp
import nupnp
class DiscoveryThread(threading.Thread):
def __init__(self, bridges):
super(DiscoveryThread, | self).__init__()
self.bridges = bridges
self.upnp_thread = upnp.UPnPDiscoveryThread(self.bridges)
self.nupnp_thread = nupnp.NUPnPDiscoveryThread(self.bridges)
def run(self):
self.upnp_thread.start()
self.nupnp_thread.start()
self.upnp_threa | d.join()
self.nupnp_thread.join()
def discover():
bridges = set()
discovery_thread = DiscoveryThread(bridges)
discovery_thread.start()
discovery_thread.join()
return bridges
|
# Copyrig | ht 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
__all__ = ()
from .non | localmeans_functionals import *
__all__ += nonlocalmeans_functionals.__all__
|
__autho | r__ = 'Ahmed Hani Ibrahim'
from LearningAlgorithm import *
class RPROP(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate:
:param input:
:param output:
:param network:
:return:
" | ""
pass |
tes and sr:
sd, ed = dates
PromotedLinkRoadblock.add(sr, sd, ed)
jquery.refresh()
@validatedForm(VSponsorAdmin(),
VModhash(),
dates=VDateRange(['startdate', 'enddate'],
reference_date=promote.promo_datetime_now),
sr=VSubmitSR('sr', promotion=True))
def POST_rm_roadblock(self, form, jquery, dates, sr):
if dates and sr:
sd, ed = dates
PromotedLinkRoadblock.remove(sr, sd, ed)
jquery.refresh()
@validatedForm(VSponsor('link_id36'),
VModhash(),
dates=VDateRange(['startdate', 'enddate'],
earliest=timedelta(days=g.min_promote_future),
latest=timede | lta(days=g.max_promote_future),
| reference_date=promote.promo_datetime_now,
business_days=True,
sponsor_override=True),
link=VLink('link_id36'),
bid=VBid('bid', min=0, max=g.max_promote_bid,
coerce=False, error=errors.BAD_BID),
sr=VSubmitSR('sr', promotion=True),
campaign_id36=nop("campaign_id36"),
targeting=VLength("targeting", 10),
priority=VPriority("priority"),
location=VLocation())
def POST_edit_campaign(self, form, jquery, link, campaign_id36,
dates, bid, sr, targeting, priority, location):
if not link:
return
start, end = dates or (None, None)
author = Account._byID(link.author_id, data=True)
cpm = author.cpm_selfserve_pennies
if location:
cpm += g.cpm_selfserve_geotarget.pennies
if (form.has_errors('startdate', errors.BAD_DATE,
errors.DATE_TOO_EARLY, errors.DATE_TOO_LATE) or
form.has_errors('enddate', errors.BAD_DATE, errors.DATE_TOO_EARLY,
errors.DATE_TOO_LATE, errors.BAD_DATE_RANGE)):
return
# Limit the number of PromoCampaigns a Link can have
# Note that the front end should prevent the user from getting
# this far
existing_campaigns = list(PromoCampaign._by_link(link._id))
if len(existing_campaigns) > g.MAX_CAMPAIGNS_PER_LINK:
c.errors.add(errors.TOO_MANY_CAMPAIGNS,
msg_params={'count': g.MAX_CAMPAIGNS_PER_LINK},
field='title')
form.has_errors('title', errors.TOO_MANY_CAMPAIGNS)
return
campaign = None
if campaign_id36:
try:
campaign = PromoCampaign._byID36(campaign_id36)
except NotFound:
pass
if priority.cpm:
if form.has_errors('bid', errors.BAD_BID):
return
# you cannot edit the bid of a live ad unless it's a freebie
if (campaign and bid != campaign.bid and
promote.is_live_promo(link, campaign) and
not campaign.is_freebie()):
c.errors.add(errors.BID_LIVE, field='bid')
form.has_errors('bid', errors.BID_LIVE)
return
min_bid = 0 if c.user_is_sponsor else g.min_promote_bid
if bid is None or bid < min_bid:
c.errors.add(errors.BAD_BID, field='bid',
msg_params={'min': min_bid,
'max': g.max_promote_bid})
form.has_errors('bid', errors.BAD_BID)
return
else:
bid = 0. # Set bid to 0 as dummy value
if targeting == 'one':
if form.has_errors('sr', errors.SUBREDDIT_NOEXIST,
errors.SUBREDDIT_NOTALLOWED,
errors.SUBREDDIT_REQUIRED):
# checking to get the error set in the form, but we can't
# check for rate-limiting if there's no subreddit
return
roadblock = PromotedLinkRoadblock.is_roadblocked(sr, start, end)
if roadblock and not c.user_is_sponsor:
msg_params = {"start": roadblock[0].strftime('%m/%d/%Y'),
"end": roadblock[1].strftime('%m/%d/%Y')}
c.errors.add(errors.OVERSOLD, field='sr',
msg_params=msg_params)
form.has_errors('sr', errors.OVERSOLD)
return
elif targeting == 'none':
sr = None
# Check inventory
campaign = campaign if campaign_id36 else None
if not priority.inventory_override:
oversold = has_oversold_error(form, campaign, start, end, bid, cpm,
sr, location)
if oversold:
return
if campaign:
promote.edit_campaign(link, campaign, dates, bid, cpm, sr, priority,
location)
else:
campaign = promote.new_campaign(link, dates, bid, cpm, sr, priority,
location)
rc = RenderableCampaign.from_campaigns(link, campaign)
jquery.update_campaign(campaign._fullname, rc.render_html())
@validatedForm(VSponsor('link_id36'),
VModhash(),
l=VLink('link_id36'),
campaign=VPromoCampaign("campaign_id36"))
def POST_delete_campaign(self, form, jquery, l, campaign):
if l and campaign:
promote.delete_campaign(l, campaign)
@validatedForm(VSponsorAdmin(),
VModhash(),
link=VLink('link_id36'),
campaign=VPromoCampaign("campaign_id36"))
def POST_terminate_campaign(self, form, jquery, link, campaign):
if link and campaign:
promote.terminate_campaign(link, campaign)
rc = RenderableCampaign.from_campaigns(link, campaign)
jquery.update_campaign(campaign._fullname, rc.render_html())
@validatedForm(VSponsor('link'),
VModhash(),
link=VByName("link"),
campaign=VPromoCampaign("campaign"),
customer_id=VInt("customer_id", min=0),
pay_id=VInt("account", min=0),
edit=VBoolean("edit"),
address=ValidAddress(
["firstName", "lastName", "company", "address",
"city", "state", "zip", "country", "phoneNumber"]),
creditcard=ValidCard(["cardNumber", "expirationDate",
"cardCode"]))
def POST_update_pay(self, form, jquery, link, campaign, customer_id, pay_id,
edit, address, creditcard):
# Check inventory
if campaign_has_oversold_error(form, campaign):
return
address_modified = not pay_id or edit
form_has_errors = False
if address_modified:
if (form.has_errors(["firstName", "lastName", "company", "address",
"city", "state", "zip",
"country", "phoneNumber"],
errors.BAD_ADDRESS) or
form.has_errors(["cardNumber", "expirationDate", "cardCode"],
errors.BAD_CARD)):
form_has_errors = True
elif g.authorizenetapi:
pay_id = edit_profile(c.user, address, creditcard, pay_id)
else:
pay_id = 1
# if link is in use or finished, don't make a change
if pay_id and not form_has_errors:
# valid bid and created or existing bid id.
# check if already a transaction
if g.authorizenetapi:
success, reason = promote.auth_campaign(link, campaign, c.user,
pay_id)
else:
success = True
|
from django.conf.urls import patterns, url
urlpatterns | = patterns(
'mtr.utils.views',
url(r'^model/(?P<name>.+)/pk/(?P<pk>\d+)$',
| 'model_label', name='model_label')
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script de comprobación de entrega de ejercicio
Para ejecutarlo, desde la shell:
$ python check.py login_github
"""
import os
import random
import sys
ejercicio = 'X-Serv-14.5-Sumador-Simple'
student_files = [
'servidor-sumador.py'
]
repo_files = [
'check.py',
'README.md',
'LICENSE',
'.gitignore',
'.git'
]
files = student_files + repo_files
if len(sys.argv) != 2:
print
sys.exit("Usage: $ python check.py login_github")
repo_git = "http://github.com/" + sys.argv[1] + "/" + ejercicio
aleatorio = str(int(random.random() * 1000000))
error = 0
print
print "Clonando el repositorio " + repo_git + "\n"
os.system('git clone ' + repo_git + ' /tmp/' + aleatorio + ' > /dev/null 2>&1')
try:
github_file_list = os.listdir('/tmp/' + aleatorio)
except OSError:
error = 1
print "Error: No se ha podido acceder al repositorio " + repo_git + "."
print
sys.exit()
if len(github_file_list) != len(files):
error = 1
print "Error: número de ficheros en el repositorio incorrecto"
for filename in files:
if filename not in github_file_list:
error = 1
print "\tError: " + filename + " no encontrado en el repositorio."
if not error:
print "Parece que la entrega se ha realizado bien."
print
print "La salida de pep8 es: (si tod | o va bien, no ha de mostrar nada)"
print
for filename in student_files:
if filename in github_file_list:
os.system('pep8 --repeat --show-source --statistics /tmp/'
+ aleatorio + '/' | + filename)
else:
print "Fichero " + filename + " no encontrado en el repositorio."
print
|
import os
import re
import sys
from setuptools import setup, find_packages
PY3 = sys.version_info[0] == 3
here = os.path.abspath(os.path.dirname(__file__))
name = 'pyramid_kvs'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
with open(os.path.join(here, name, '__init__.py')) as v_file:
version = re.compile(r".*__version__ = '(.*?)'",
re.S).match(v_file.read()).group(1)
requires = ['pyramid', 'redis >= 3.0']
if PY3:
requires.append('python3-memcached')
else:
requires.append('python-memcached')
tests_require = ['nose', 'coverage']
if sys.version_info < (2, 7):
tests_require += ['unittest2']
extras_require = {'test': tests_require}
setup(name=name.replace('_', '-'),
version=version,
description='Session and cache for Pyramid',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid | ",
"Topic :: I | nternet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
author='Gandi',
author_email='feedback@gandi.net',
url='https://github.com/Gandi/pyramid_kvs',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='{0}.tests'.format(name),
install_requires=requires,
tests_require=tests_require,
extras_require=extras_require
)
|
from django.core.urlresolvers import reverse
from kishore.models import Artist, Song, Release
from base import KishoreTestCase
class ArtistTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_artists_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
a = Artist.objects.get(pk=1)
resp = self.client.get(a.get_absolute_url())
self.assertEqual(resp.status_code, 200)
class SongTestCase(KishoreTestCase):
def test_index( | self):
resp = self.client.get(reverse('kishore_songs_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
s = Song.objects.get(pk=1 | )
resp = self.client.get(s.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
s = Song.objects.get(pk=1)
self.assertTrue(s.get_player_html())
# try non-streamable song
s = Song.objects.get(pk=2)
self.assertFalse(s.get_player_html())
def test_download_link(self):
s = Song.objects.get(pk=1)
self.assertTrue(s.download_link())
# try non-downloadable song
s = Song.objects.get(pk=2)
self.assertFalse(s.download_link())
class ReleaseTestCase(KishoreTestCase):
def test_index(self):
resp = self.client.get(reverse('kishore_releases_index'))
self.assertEqual(resp.status_code, 200)
def test_detail(self):
r = Release.objects.get(pk=1)
resp = self.client.get(r.get_absolute_url())
self.assertEqual(resp.status_code, 200)
def test_player_html(self):
with self.settings(KISHORE_AUDIO_PLAYER="kishore.models.SoundcloudPlayer"):
r = Release.objects.get(pk=1)
self.assertTrue(r.get_player_html())
# try non-streamable
r = Release.objects.get(pk=2)
self.assertFalse(r.get_player_html())
|
""" |
LMS specific monitoring helpers.
" | ""
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: A | pache-2.0
#-----------------------------------------------------------------------------
import os
import sys
# On Mac OS X tell enchant library where to look for enchant backends (aspell, myspell, ...).
# Enchant is looking for backends in directory 'PREFIX/lib/enchant'
# Note: env. var. ENCHANT_PREFIX_ | DIR is implemented only in the development version:
# https://github.com/AbiWord/enchant
# https://github.com/AbiWord/enchant/pull/2
# TODO Test this rthook.
if sys.platform.startswith('darwin'):
os.environ['ENCHANT_PREFIX_DIR'] = os.path.join(sys._MEIPASS, 'enchant')
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.model.base_pytorch_model import PytorchModelBu | ilder
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.model.Seq2Seq_pytorch import model_creator
from .base_automodel import BasePytorchAutomodel
class AutoSeq2Seq(BasePytorc | hAutomodel):
def __init__(self,
input_feature_num,
output_target_num,
past_seq_len,
future_seq_len,
optimizer,
loss,
metric,
lr=0.001,
lstm_hidden_dim=128,
lstm_layer_num=2,
dropout=0.25,
teacher_forcing=False,
backend="torch",
logs_dir="/tmp/auto_seq2seq",
cpus_per_trial=1,
name="auto_seq2seq",
remote_dir=None,
):
"""
Create an AutoSeq2Seq.
:param input_feature_num: Int. The number of features in the input
:param output_target_num: Int. The number of targets in the output
:param past_seq_len: Int. The number of historical steps used for forecasting.
:param future_seq_len: Int. The number of future steps to forecast.
:param optimizer: String or pyTorch optimizer creator function or
tf.keras optimizer instance.
:param loss: String or pytorch/tf.keras loss instance or pytorch loss creator function.
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param lr: float or hp sampling function from a float space. Learning rate.
e.g. hp.choice([0.001, 0.003, 0.01])
:param lstm_hidden_dim: LSTM hidden channel for decoder and encoder.
hp.grid_search([32, 64, 128])
:param lstm_layer_num: LSTM layer number for decoder and encoder.
e.g. hp.randint(1, 4)
:param dropout: float or hp sampling function from a float space. Learning rate. Dropout
rate. e.g. hp.uniform(0.1, 0.3)
:param teacher_forcing: If use teacher forcing in training. e.g. hp.choice([True, False])
:param backend: The backend of the Seq2Seq model. We only support backend as "torch"
for now.
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_seq2seq"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoSeq2Seq. It defaults to "auto_seq2seq"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
"""
super().__init__()
# todo: support search for past_seq_len.
# todo: add input check.
if backend != "torch":
raise ValueError(f"We only support backend as torch. Got {backend}")
self.search_space = dict(
input_feature_num=input_feature_num,
output_feature_num=output_target_num,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
lstm_hidden_dim=lstm_hidden_dim,
lstm_layer_num=lstm_layer_num,
lr=lr,
dropout=dropout,
teacher_forcing=teacher_forcing
)
self.metric = metric
model_builder = PytorchModelBuilder(model_creator=model_creator,
optimizer_creator=optimizer,
loss_creator=loss,
)
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
|
import netifaces
from netaddr import *
for inter in netifaces.interfaces():
addrs = netifaces.ifaddresses(inter)
try:
print(addrs)
print(addrs[netifaces.AF_INET][0]["addr"])
print(addrs[netifaces.AF_INET][0]["broadcast"])
print(addrs[netifaces.AF_INET][0]["netmask"])
local_ip = addrs[netifaces.AF_INET][0]["addr"]
broadcast = addrs[netifaces.AF_INET][0]["broadcast"]
netmask = addrs[netifaces.AF_INET][0]["netmask"]
mac = addrs[netifaces.AF_LINK][0]["addr"]
gws = netifaces.gateways()
gateway = gws['default'][netifaces.AF_INET][0]
interface = inter
ips = []
for ip in IPNetwork(broadcast + '/' + str(IPNetwork('0.0.0.0/' + netmask).prefixlen)).iter_hosts():
ips.append(str(ip))
except:
print("Error")
def ge | t_lan_ip():
global local_ip
return local_ip
def get_broadcast_ip():
global broadcast
return broadcast
def get_all_ips():
global ips
return ips
def | get_gateway():
global gateway
return gateway
def get_mac():
global mac
return mac |
__author__ = 'sibirrer'
#this file is ment to be a shell script to be run with Monch cluster
# set up the scene
from cosmoHammer.util.MpiUtil import MpiPool
import time
import sys
import pickle
import dill
start_time = time.time()
#path2load = '/mnt/lnec/sibirrer/input.txt'
path2load = str(sys.argv[1])
f = open(path2load, 'rb')
[lensDES, walkerRatio, n_burn, n_run, mean_start, sigma_start, lowerLimit, upperLimit, path2dump] = dill.load(f)
f.close()
end_time = time.time()
#print end_time - start_time, 'time used for initialisation'
# run the computation
from easylens.Fitting.mcmc import MCMC_sampler
sampler = MCMC_sampler(lensDES, fix_center=False)
samples = sampler.mcmc_CH(walkerRatio | , n_run, n_burn, mean_start, sigma_start, lowerLimit, upperLimit, threadCount=1, init_pos=None, mpi_monch=True)
# save the output
pool = MpiPool(None)
if pool.isMaster():
f = open(path2dump, 'wb')
pickle.dump(samples, f)
f.close()
end_time = time.time()
print(end_time | - start_time, 'total time needed for computation')
print('Result saved in:', path2dump)
print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
|
#! /usr/bin/env python
"""
This program plots the average electronic energy during a NAMD simulatons
averaged over several initial conditions.
It plots both the SH and SE population based energies.
Example:
plot_average_energy.py -p . -nstates 26 -nconds 6
Note that the number of states is the same as given in the pyxaid output.
It must include the ground state as well.
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
def plot_stuff(outs, pops):
"""
energies - a vector of energy values that can be plotted
"""
dim_x = np.arange(outs.shape[0])
plot = np.column_stack((outs, pops))
plt.xlabel('Time (fs)')
plt.ylabel('Energy (eV)')
plt.plot(dim_x, plot[:, 0:])
fileName = "Average_Energy.png"
plt.show()
plt.savefig(fileName, format='png', dpi=300)
def read_energies(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols = tuple(range(5, nstates * 2 + 5, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def read_pops(path, fn, nstates, nconds):
inpfile = os.path.join(path, fn)
cols = tuple(range(3, nstates * 2 + 3, 2))
xs = np.stack(np.loadtxt(f'{inpfile}{j}', usecols=cols)
for j in range(nconds)).transpose()
# Rows = timeframes ; Columns = states ; tensor = initial conditions
xs = xs.swapaxes(0, 1)
return xs
def main(path_output, nstates, nconds):
outs = read_pops(path_output, 'out', nstates, nconds)
pops = read_pops(path_output, 'me_pop', nstates, nconds)
energies = read_energies(path_output, 'me_energies', nstates, nconds)
# Weighted state energy for a given SH or SH population at time t
eav_outs = energies * outs
eav_pops = energies * pops
# Ensamble average over initial conditions of the electronic energy
# as a function of time
el_ene_outs = np.ave | rage(np.sum(eav_outs, axis=1), axis=1)
el_ene_pops = np.average(np.sum(eav_pops, axis=1), axis=1)
# Ensamble average scaled to the lowest excitation energy.
# This way the cooling converge to 0.
lowest_hl_gap = np.average(np.amin(energies[:, 1:, :], axis=1), axis=1)
ene_outs_ref0 = el_ene_outs - lowest_hl_gap
ene_pops_ref0 = el_ene_pops - lowest_hl_gap
plot_stuff(en | e_outs_ref0, ene_pops_ref0)
def read_cmd_line(parser):
"""
Parse Command line options.
"""
args = parser.parse_args()
attributes = ['p', 'nstates', 'nconds']
return [getattr(args, p) for p in attributes]
# ============<>===============
if __name__ == "__main__":
msg = "plot_states_pops -p <path/to/output>\
-nstates <number of states computed>\
-nconds <number of initial conditions>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-p', required=True,
help='path to the Hamiltonian files in Pyxaid format')
parser.add_argument('-nstates', type=int, required=True,
help='Number of states')
parser.add_argument('-nconds', type=int, required=True,
help='Number of initial conditions')
main(*read_cmd_line(parser))
|
"""Detail firewall."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import firewall
from SoftLayer.CLI import formatting |
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Detail firewall."""
mgr = SoftLayer.FirewallManager(env.client)
firewall_type, firewall_id = firewall.parse_id(identifier)
if firewall_type == 'vlan':
rules = mgr.get_dedicated_fwl_rules(firewall_id)
else: |
rules = mgr.get_standard_fwl_rules(firewall_id)
env.fout(get_rules_table(rules))
def get_rules_table(rules):
"""Helper to format the rules into a table.
:param list rules: A list containing the rules of the firewall
:returns: a formatted table of the firewall rules
"""
table = formatting.Table(['#', 'action', 'protocol', 'src_ip', 'src_mask',
'dest', 'dest_mask'])
table.sortby = '#'
for rule in rules:
table.add_row([
rule['orderValue'],
rule['action'],
rule['protocol'],
rule['sourceIpAddress'],
utils.lookup(rule, 'sourceIpSubnetMask'),
'%s:%s-%s' % (rule['destinationIpAddress'],
rule['destinationPortRangeStart'],
rule['destinationPortRangeEnd']),
utils.lookup(rule, 'destinationIpSubnetMask')])
return table
|
#!/usr/bin/python
# Python modules imports
from optparse import OptionParser, make_option
import pyupm_grove as g
import os, sys, socket, uuid, dbus, dbus.service
import dbus.mainloop.glib
#import gardening_system
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
# Set up constants
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
PROFILE_INTERFACE = 'org.bluez.Profile1'
# Trusted device function
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path), "org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
# Agent class
class Agent(dbus.service.Object):
@dbus.service.method(AGENT_INTERFACE, in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("\nEnsure this passkey matches with the one in your device: %06d\nPress [ENTER] to continue" % passkey)
set_trusted(device)
return
#Profile class
class Profile(dbus.service.Object):
fd = -1
@dbus.service.method(PROFILE_INTERFACE, in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
device_path = os.path.basename(path)
print("\nConnected to %s\nPress [ENTER] to continue" % d | evice_path)
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.settimeout(1)
server_sock.send("Hello, this is Edison!")
try:
while True:
try:
data = server_sock.recv(1024)
gardening_system.function(data)
if data == 'b':
server_sock.send(gardening_system.requestData())
except socket.timeout:
| pass
gardening_system.myProgram()
except IOError:
pass
server_sock.close()
print("\nYour device is now disconnected\nPress [ENTER] to continue")
def bluetoothConnection():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
profile_manager = dbus.Interface(obj, "org.bluez.ProfileManager1")
profile_path = "/foo/bar/profile"
auto_connect = {"AutoConnect": False}
profile_uuid = "1101"
profile = Profile(bus, profile_path)
profile_manager.RegisterProfile(profile_path, profile_uuid, auto_connect)
mainloop = GObject.MainLoop()
mainloop.run()
if __name__ == '__main__':
# Generic dbus config
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object(BUS_NAME, "/org/bluez");
# Agent config
agent_capability = "KeyboardDisplay"
agent_path = "/test/agent"
agent = Agent(bus, agent_path)
agent_manager = dbus.Interface(obj, "org.bluez.AgentManager1")
agent_manager.RegisterAgent(agent_path, agent_capability)
agent_manager.RequestDefaultAgent(agent_path)
# Mainloop
mainloop = GObject.MainLoop()
mainloop.run()
|
#!../ | venv/bin/python
import sys
print (sys.argv[1: | ])
|
FORE
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q1.get_absolute_url()], [r['url'] for r in results])
qs['created'] = constants.INTERVAL_AFTER
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q2.get_absolute_url()], [r['url'] for r in results])
def test_sortby_invalid(self):
"""Invalid created_date is ignored."""
qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_created_date_invalid(self):
"""Invalid created_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'created': constants.INTERVAL_AFTER,
'created_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_created_date_nonexistent(self):
"""created is set while created_date is left out of the query."""
qs = {'a': 1, 'w': 2, 'format': 'json', 'created': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_updated_invalid(self):
"""Invalid updated_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'updated': 1, 'updated_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_updated_nonexistent(self):
"""updated is set while updated_date is left out of the query."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(response.status_code, 200)
def test_asked_by(self):
"""Check several author values, including test for (anon)"""
author_vals = (
('DoesNotExist', 0),
('jsocol', 2),
('pcraciunoiu', 2),
)
# Set up all the question data---creats users, creates the
# questions, shove it all in the index, then query it and see
# what happens.
for name, number in author_vals:
u = user(username=name, save=True)
for i in range(number):
ques = question(title=u'audio', creator=u, save=True)
ques.tags.add(u'desktop')
ans = answer(question=ques, save=True)
answervote(answer=ans, helpful=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json'}
for author, total in author_vals:
qs.update({'asked_by': author})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_question_topics(self):
"""Search questions for topics."""
p = product(save=True)
t1 = topic(slug='doesnotexist', product=p, save=True)
t2 = topic(slug='cookies', product=p, save=True)
t3 = topic(slug='sync', product=p, save=True)
question(topic=t2, save=True)
question(topic=t2, save=True)
question(topic=t3, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics(self):
"""Search wiki for topics, includes multiple."""
t1 = topic(slug='doesnotexist', save=True)
t2 = topic(slug='extant', save=True)
t3 = topic(slug='tagged', save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
revision(document=doc, is_approved=True, save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
doc.topics.add(t3)
revision(document=doc, is_approved=True, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
([t2.slug, t3.slug], 1),
)
qs = {'a': 1, 'w': 1, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics_inherit(self):
"""Translations inherit topics from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(topic(slug='extant', save=True))
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'es', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'topics': 'extant'}
response = self.client.get(reverse('search.advanced', locale='es'), qs)
eq_(1, json.loads(response.content)['total'])
def test_question_products(self):
"""Search questions for products."""
p1 = product(slug='b2g', save=True)
p2 = product(slug='mobile', save=True)
p3 = product(slug='desktop', save=True)
question(product=p2, save=True)
question(product=p2, save=True)
question(product=p3, save=True)
self.refresh()
product_vals = (
(p1.slug, 0),
(p2.slug, 2),
(p3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for products, number in product_vals:
qs.update({'product': products})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_products(self):
"""Search wiki for products."""
prod_vals = (
(product(slug='b2g', save=True), 0),
(product(slug='mobile', save=True), 1),
(product(slug='desktop', save=True), 2) | ,
)
for prod, total in prod_vals:
for i in range(total):
doc = document(locale=u'en-US', category=10, save=True)
doc.p | roducts.add(prod)
revision(document=doc, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json'}
for prod, total in prod_vals:
qs.update({'product': prod.slug})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_wiki_products_inherit(self):
"""Translations inherit products from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
p = product(title=u'Firefox', slug=u'desktop', save=True)
doc.products.add(p)
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'fr', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'product': p.slug}
response = self.client.get(reverse('search.advanced', locale='fr'), qs)
eq_(1, json.loads(response.content)['total'])
def test_discussion_filter_author(self):
"""Filter by author in discussion forums."""
author_vals = (
('DoesN |
# ********************************************************************** <====
from artmgr.transport.basew import BaseWTransport
# ********************************************************************** ====>
import os
import sys
import errno
import stat
import re
# chunksize for reading/writing local files
CHUNK = 8192
# ---------------------------------------------------------------------
def mkpath_recursive(path):
"""Test a local path and, if it does not exist, create it recursively"""
try:
mode = os.stat( path ).st_mode
if not stat.S_ISDIR(mode):
raise InvalidArgumentError("parent path '"+str(path)+"' not a dir")
except OSError as e:
if e.errno != errno.ENOENT:
raise
(head,tail) = os.path.split( path )
if head:
| mkpath_recursive( head )
os.mkdir( path )
# ---------------------------------------------------------------------
class LocalTransport( BaseWTransport ):
"""
A full R/W transport instance that uses a locally visible directory to
store and read all artifact data
"""
def __init__( self, basedir, | subrepo ):
"""
Constructor
@param basedir (str): local folder to use
@param subrepo (str): name of the repository we are dealing with
"""
if not basedir:
raise InvalidArgumentError("Empty basedir in local transport")
if not subrepo:
raise InvalidArgumentError("Empty subrepo in local transport")
self._basedir = os.path.join(basedir,subrepo)
super(LocalTransport,self).__init__()
def init_base( self ):
"""Ensure the base path for the repository exists"""
mkpath_recursive( self._basedir )
def get( self, sourcename, dest ):
"""
Read a file into a file-like destination.
@param sourcename (str): name of the file in remote repo
@param dest (file): an object with a write() method
@return (bool): \c True if ok, \c False if the file does not exist
"""
name = os.path.join(self._basedir,sourcename)
try:
with open(name, 'rb') as f:
while True:
bytes = f.read( CHUNK )
if not bytes:
break
dest.write( bytes )
return True
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
def otype( self, path ):
"""
Given the path of am object, return:
* 'F' for a file,
* 'D' for a directory,
* \c None if the path does not exist
"""
oldname = os.path.join(self._basedir,path)
try:
mode = os.stat( oldname ).st_mode
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
return 'D' if stat.S_ISDIR(mode) else 'F' if stat.S_ISREG(mode) else '?'
def put( self, source, destname ):
"""
Store a file. If a file with the same name existed, it is overwritten
@param source (file): an object with a read() method
@param destname (str): name of the destination file,
relative to repo base directory
"""
name = os.path.join(self._basedir,destname)
with open(name, 'wb') as f:
while True:
bytes = source.read( CHUNK )
if not bytes:
break
f.write( bytes )
def delete( self, filename ):
"""
Delete a file
"""
name = os.path.join(self._basedir,filename)
os.unlink( name )
def rename( self, oldname, newname ):
"""
Rename a file into a new name and/or folder
"""
oldname = os.path.join(self._basedir,oldname)
newname = os.path.join(self._basedir,newname)
os.rename( oldname, newname )
def folder_create( self, path ):
"""
Make a folder in the repository, assuming all parent folders exist
"""
os.mkdir( os.path.join(self._basedir,path) )
def folder_list( self, path ):
"""
Return the list of all components (files & folders) in a folder
*This method is optional*
"""
return os.listdir( os.path.join(self._basedir,path) )
|
#author :haiyfu
#date:April 14
#description:
#contact:haiyangfu512@gmail.com
"""
This little part is to check how many different values in
a column and store the unqiue values in a list.
For FCBF initially.
The last column is the class .
"""
from sys import argv
#only count the target file and return
#a list structure which contains the detail
#information,like [23, [[1,23],[11,233]], 34 ]
#Here is the correspond meanings
#[attribure_number,[ [first-column-different-values] [2nd-df-val] ],line_num]
def rc_gn(sn):
fin=open(sn)
atrn=len(fin.readline().split(","))
#Initialize the result list
fin.seek(0,0)
rc=[]
rc.append(atrn)
rc.append([])
l=fin.readline().strip("\r \n ").split(",")
for x in l:
rc[1].append([x])
count=0
for l in fin:
l=l.strip("\n \r").split(",")
idx=0
if(len(l)<rc[0]):
break
for x in l:
if x not in rc[1][idx]:
rc[1][idx].append(x)
rc[1][idx].sort()
idx=idx+1
count=count+1
#print rc
rc.append(count+1)
fin.close()
return rc
def wrt_rc(rc,tn):
#print rc
ft=open(tn,"w")
#class info
ft.write(str(len(rc[1][-1]))+","+",".join(rc[1][-1])+".\n" )
#attribure number
ft.write(str( rc[0]-1 )+"\n")
#every attribure info
for x in range(rc[0]-1):
sl="A"+str(x+1)+" - "+",".join(rc[1][x])+".\n"
ft.write(sl)
ft.close()
if __name__=="__main__":
| script_ | nm,src_file,out_file=argv
wrt_rc(rc_gn(src_file),out_file)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Simple scrambling test generator."""
import copy
import random
from typing import List, Text, Optional
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.lib import utils
JsonDict = types.JsonDict
FIELDS_TO_SCRAMBLE_KEY = 'Fields to scramble'
class Scrambler(lit_components.Generator):
"""Scramble all words in an example to generate a new example."""
@staticmethod
def scramble(val: Text) -> Text:
words = val.split(' ')
random.shuffle(words)
return ' '.join(words)
def config_spec(self) -> types.Spec:
return {
FIELDS_TO_SCRAMBLE_KEY:
types.MultiFieldMatcher(
spec='input',
types=['TextSegment'],
select_all=True),
}
def generat | e(self,
example: JsonDict,
model: lit_model.Model, |
dataset: lit_dataset.Dataset,
config: Optional[JsonDict] = None) -> List[JsonDict]:
"""Naively scramble all words in an example.
Note: Even if more than one field is to be scrambled, only a single example
will be produced, unlike other generators which will produce multiple
examples, one per field.
Args:
example: the example used for basis of generated examples.
model: the model.
dataset: the dataset.
config: user-provided config properties.
Returns:
examples: a list of generated examples.
"""
del model # Unused.
config = config or {}
# If config key is missing, generate no examples.
fields_to_scramble = list(config.get(FIELDS_TO_SCRAMBLE_KEY, []))
if not fields_to_scramble:
return []
# TODO(lit-dev): move this to generate_all(), so we read the spec once
# instead of on every example.
text_keys = utils.find_spec_keys(dataset.spec(), types.TextSegment)
if not text_keys:
return []
text_keys = [key for key in text_keys if key in fields_to_scramble]
new_example = copy.deepcopy(example)
for text_key in text_keys:
new_example[text_key] = self.scramble(example[text_key])
return [new_example]
|
import re
import html
# The regular string.split() only takes a max number of splits,
# but it won't unpack if there aren | 't enough values.
# This function ensures that we always get the wanted
# number of returned values, even if the | string doesn't include
# as many splits values as we want, simply by filling in extra
# empty strings at the end.
#
# Some examples:
# split("a b c d", " ", 3) = ["a", "b", "c d"]
# split("a b c" , " ", 3) = ["a", "b", "c"]
# split("a b", " ", 3) = ["a", "b", ""]
def split(s, sep, count):
return (s + ((count - 1 - s.count(sep)) * sep)).split(sep, count - 1)
# Sanitize a string by removing all new lines and extra spaces
def sanitize_string(s):
return " ".join(s.split()).strip()
# Unescape HTML/XML entities
def unescape_entities(text):
def replace_entity(match):
try:
if match.group(1) in html.entities.name2codepoint:
return chr(html.entities.name2codepoint[match.group(1)])
elif match.group(1).lower().startswith("#x"):
return chr(int(match.group(1)[2:], 16))
elif match.group(1).startswith("#"):
return chr(int(match.group(1)[1:]))
except (ValueError, KeyError):
pass # Fall through to default return
return match.group(0)
return re.sub(r"&([#a-zA-Z0-9]+);", replace_entity, text)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import re
from setuptools import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
packages = [
"the_big_username_blacklist"
]
# Handle requirements
install_requires = []
tests_requires = [
"pytest==3.0.5",
]
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
version = ''
with io.open('the_big_username_blacklist/__init__.py', 'r', encoding='utf-8') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name="the_big_username_blacklist",
version=version,
description="Validate usernames against a blacklist", # NOQA
long_description=long_description,
author="Martin Sandström",
author_email="martin@marteinn.se",
url="https://github.com/marteinn/the-big-username-blacklist-python",
packages=packages,
package_data={"": ["LICENSE", ], "the_big_username_blacklist": ["*.txt"]},
package_dir | ={"the_big_username_blacklist": "the_big_username_blacklist"},
include_package_data=True,
install_requires=install_requires,
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming L | anguage :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
|
#SBaaS
from .stage02_physiology_graphData_io import stage02_physiology_graphData_io
from SBaaS_models.models_COBRA_execute import models_COBRA_execute
from .stage02_physiology_analysis_query import stage02_physiology_analysis_query
#System
import copy
class stage02_physiology_graphData_execute(stage02_physiology_graphData_io):
def execute_findShortestPaths(self,
analysis_id_I,
algorithms_params_I,
nodes_startAndStop_I,
exclusion_list_I=[],
weights_I=[],
):
'''
compute the shortest paths
INPUT:
model_id_I
algorithms_params_I
nodes_startAndStop_I
simulation_id_I
exclusion_list_I
OUTPUT:
'''
exCOBRA01 = models_COBRA_execute(self.session,self.engine,self.settings);
exCOBRA01.initialize_supportedTables();
physiology_analysis_query = stage02_physiology_analysis_query(self.session,self.engine,self.settings);
physiology_analysis_query.initialize_supportedTables();
data_O=[];
data_graphs_O=[];
rows = physiology_analysis_query.getJoin_analysisID_dataStage02PhysiologyAnalysisAndSimulation(analysis_id_I);
for row in rows:
weights = [];
if type(weights_I)==type([]):
weights = weights_I;
weights_str = '[]';
elif type(weights_I)==type(''):
if weights_I == 'stage02_physiology_sampledData_query':
weights = self.import_graphWeights_sampledData(row['simulation_id']);
weights_str = 'stage02 | _physiology_sampledData_query';
elif weights_I == 'stage02_physiology_simulatedData_query':
weights = self.import_graphWeights_simulatedData(row[' | simulation_id']);
weights_str = 'stage02_physiology_simulatedData_query';
else:
print('weights source not recognized');
# run the analysis for different algorithms/params
for ap in algorithms_params_I:
shortestPaths = exCOBRA01.execute_findShortestPath_nodes(
row['model_id'],
nodes_startAndStop_I = nodes_startAndStop_I,
algorithm_I=ap['algorithm'],
exclusion_list_I=exclusion_list_I,
params_I=ap['params'],
weights_I=weights
)
for sp in shortestPaths:
tmp = {};
tmp['analysis_id']=analysis_id_I
tmp['simulation_id']=row['simulation_id']
tmp['weights']=weights_str;
tmp['used_']=True;
tmp['comment_']=None;
tmp['params']=sp['params']
tmp['path_start']=sp['start']
tmp['algorithm']=sp['algorithm']
tmp['path_stop']=sp['stop']
tmp1 = copy.copy(tmp);
tmp1['path_n']=sp['path_n']
tmp1['path_iq_1']=sp['path_iq_1']
tmp1['path_var']=sp['path_var']
tmp1['path_ci_lb']=sp['path_ci_lb']
tmp1['path_cv']=sp['path_cv']
tmp1['path_iq_3']=sp['path_iq_3']
tmp1['path_ci_ub']=sp['path_ci_ub']
tmp1['path_average']=sp['path_average']
tmp1['path_max']=sp['path_max']
tmp1['path_median']=sp['path_median']
tmp1['path_ci_level']=sp['path_ci_level']
tmp1['path_min']=sp['path_min']
data_O.append(tmp1);
for path in sp['all_paths']:
tmp2 = copy.copy(tmp);
tmp2['paths']=path;
data_graphs_O.append(tmp2);
#for sp in shortestPaths:
#dict_keys(['stop', 'params', 'path_n', 'all_paths', 'path_iq_1', 'path_var', 'path_ci_lb', 'path_cv', 'path_iq_3', 'path_ci_ub', 'path_average', 'path_max', 'path_median', 'start', 'algorithm', 'path_ci_level', 'path_min'])
# str = "start: %s, stop: %s, min: %s, max: %s, average: %s, " \
# %(sp['start'],sp['stop'],sp['path_min'],
# sp['path_max'],sp['path_average'])
# print(str)
self.add_rows_table('data_stage02_physiology_graphData_shortestPathStats',data_O);
self.add_rows_table('data_stage02_physiology_graphData_shortestPaths',data_graphs_O); |
from __future__ import absolute_import, division, print_function
from dynd._pydynd import w_type, \
make_var_dim, make_strided_dim, make_fixed_dim, make_cfixed_dim
__all__ = ['var', 'strided', 'fixed', 'cfixed']
class _Dim(object):
__slots__ = []
def __mul__(self, rhs):
if isinstance(rhs, w_type):
# Apply all the dimensions to get
# produce a type
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, (str, type)):
# Allow:
# ndt.strided * 'int32'
# ndt.strided * int
rhs = w_type(rhs)
for dim in reversed(self.dims):
rhs = dim.create(rhs)
return rhs
elif isinstance(rhs, _Dim):
# Combine the dimension fragments
return _DimFragment(self.dims + rhs.dims)
else:
raise TypeError('Expected a dynd dimension or type, not %r' % rhs)
def __pow__(self, count):
return _DimFragment(self.dims * count)
class _DimFragment(_Dim):
__slots__ = ['dims']
def __init__(self, dims):
self.dims = dims
def __repr__(self):
return ' * '.join(repr(dim) for dim in self.dims)
class _Var(_Dim):
"""
Creates a var dimension when combined with other types.
Examples
--------
>>> ndt.var * ndt.int32
ndt.type('var * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_var_dim(eltype)
def __repr__(self):
return 'ndt.var'
class _Strided(_Dim):
"""
Creates a strided dimension when combined with other types.
Examples
--------
>>> ndt.strided * ndt.int32
ndt.type('strided * int32')
>>> ndt.fixed[5] * ndt.strided * ndt.float64
ndt.type('5 * strided * float64')
"""
__slots__ = []
@property
def dims(self):
return (self,)
def create(self, eltype):
return make_strided_dim(eltype)
def __repr__(self):
return 'ndt.strided'
class _Fixed(_Dim):
"""
Creates a fixed dimension when combined with other types.
Examples
--------
>>> ndt.fixed[3] * ndt.int32
ndt.type('3 * int32')
>>> ndt.fixed[5] * ndt.var * ndt.float64
ndt.type('5 * var * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
return (self,)
else:
raise TypeError('Need to specify ndt.fixed[dim_size],' +
' not just ndt.fixed')
def create(self, eltype):
return make_fixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _Fixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.fixed[%d]' % self.dim_size
else:
| return 'ndt.fixed'
class _CFixed(_Dim):
"""
Creates a cfixed dimension when combined with other types.
Examples
--------
>>> ndt.cfixed[3] * ndt.int32
ndt.type('cfixed[3] * int32')
>>> ndt.fixed[5] * ndt.cfixed[2] * ndt.float64
ndt.type('5 * cfixed[2] * float64')
"""
__slots__ = ['dim_size']
def __init__(self, dim_size = None):
self.dim_size = dim_size
@property
def dims(self):
if self.dim_size is not None:
| return (self,)
else:
raise TypeError('Need to specify ndt.cfixed[dim_size],' +
' not just ndt.cfixed')
def create(self, eltype):
return make_cfixed_dim(self.dim_size, eltype)
def __getitem__(self, dim_size):
return _CFixed(dim_size)
def __repr__(self):
if self.dim_size is not None:
return 'ndt.cfixed[%d]' % self.dim_size
else:
return 'ndt.cfixed'
var = _Var()
strided = _Strided()
fixed = _Fixed()
cfixed = _CFixed()
|
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from extras.models import CustomFieldModel, CustomFieldValue
from utilities.models import CreatedUpdatedModel
from utilities.utils import csv_format
@python_2_unicode_compatible
class TenantGroup | (models.Model):
"""
An arbitrary collection of T | enants.
"""
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?group={}".format(reverse('tenancy:tenant_list'), self.slug)
@python_2_unicode_compatible
class Tenant(CreatedUpdatedModel, CustomFieldModel):
"""
A Tenant represents an organization served by the NetBox owner. This is typically a customer or an internal
department.
"""
name = models.CharField(max_length=30, unique=True)
slug = models.SlugField(unique=True)
group = models.ForeignKey('TenantGroup', related_name='tenants', blank=True, null=True, on_delete=models.SET_NULL)
description = models.CharField(max_length=100, blank=True, help_text="Long-form name (optional)")
comments = models.TextField(blank=True)
custom_field_values = GenericRelation(CustomFieldValue, content_type_field='obj_type', object_id_field='obj_id')
class Meta:
ordering = ['group', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:tenant', args=[self.slug])
def to_csv(self):
return csv_format([
self.name,
self.slug,
self.group.name if self.group else None,
self.description,
])
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
import requests
import json
import logging
from bs4 import BeautifulSoup as htmldoc
def carrier_lookup():
return None
class CarrierLookup(object):
def __init__(self, number, logname=None):
self.number = number
self._logname = logname if logname else ''
self.log = logging.getLogger(self._logname)
def lookup(self):
log = self.log
domain = 'www.twilio.com'
host = 'https://{0}'.format(domain)
lookup = '{0}/lookup'.format(host)
# masquerade as OS-X Firefox
s = requests.Session()
s.headers['user-agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:37.0) Gecko/20100101 Firefox/37.0'
s.headers['x-requested-with'] = 'XMLHttpRequest'
s.headers['accept-language'] = 'en-US,en;q=0.5'
s.headers['cache-control'] = 'no-cache'
s.headers['content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
s.headers['host'] = domain
s.headers['DNT'] = '1'
s.headers['connection'] = 'close'
# fetch the base page to set the cookies and get csrf and sid values
r = s.get(lookup)
hdrs = {k: v for k, v in s.headers.iteritems()}
cookies = [{c.name: c.value} for c in s.cookies]
log.debug('\nsession headers: {0}\n'.format(jsonify(hdrs)))
log.debug('\nsession cookies: {0}\n'.format(jsonify(cookies)))
if not cookies:
log.error('unknown error accessing base page: {0}'.format(lookup))
log.error('ERROR: {0}'.format(r.status_code))
log.error(r.text)
raise ValueError()
# extract the csrf and sid
page = htmldoc(r.text)
token = page.find('meta', attrs={'name': 'csrfToken'})
if token is None:
log.debug(r.text)
csrf = token['content']
log.debug('NAME={0} CONTENT={1}'.format(token['name'], csrf))
sid_attrs = {'type': 'hidden', 'role': 'visitorSid'}
role = page.find('input', attrs=sid_attrs)
sid = role['value']
log.debug('ROLE={0} VALUE={1}'.format(role['role'], sid))
# retrieve the phone number information
s.headers['referer'] = lookup
params = {
' | Type': 'lookup',
'PhoneNumber': "{0}".format(self.numb | er),
'VisitorSid': sid,
'CSRF': csrf,
}
log.debug('\nparams: {0}\n'.format(jsonify(params)))
url = '{0}/functional-demos'.format(host)
r = s.post(url, params=params)
info = json.loads(r.content)
return info
|
#!/usr/bin/env python
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy) |
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configmen | t(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
|
"""
compact_group.py - Part of millennium-compact-groups package
Defines CompactGroup object to handle information about a single
compact group.
Copyright(C) 2016 by
Trey Wenger; tvwenger@gmail.com
Chris Wiens; cdw9bf@virginia.edu
Kelsey Johnson; kej7a@virginia.edu
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
14 Mar 2016 - TVW Finalized version 1.0
"""
_PACK_NAME = 'millennium-compact-groups'
_PROG_NAME = 'compact_group.py'
_VERSION = 'v1.0'
# System utilities
import sys
import os
# Numerical utilities
import numpy as np
import pandas
class CompactGroup:
"""
Compact Group Object
"""
def __init__(self, label, members):
"""
Initialize ComactGroup Object
"""
self.label = label
self.members = members
self.median_vel = 0.0
self.mediod = None
self.radius = 0.0
self.avg_mvir = 0.0
self.avg_stellarmass = 0.0
self.num_nearby_galaxies = 0
self.neighbors = []
self.annular_mass_ratio = 0.0
self.secondtwo_mass_ratio = 0.0
def find_dwarfs(self,dwarf_limit):
"""
Find galaxies that have a stellar mass less than dwarf_limit
"""
# add a is_dwarf column to members
self.members['is_dwarf'] = np.zeros(len(self.members),dtype=bool)
# assign dwarfs
ind = self.members['stellarMass'] < dwarf_limit
self.members.ix[ind,'is_dwarf'] = True
def calc_median_velocity(self):
"""
Calculate the median velocity of galaxies in this group
"""
good = (~self.members['is_dwarf'])
vels = (self.members['velX']*self.members['velX'] +
self.members['velY']*self.members['velY'] +
self.members['velZ']*self.members['velZ'])**0.5
# add a velocity2 column to members
self.members['vel'] = vels
self.median_vel = np.median(vels[good])
def find_flybys(self,crit_velocity):
"""
Find galaxies that are travelling crit_velocity faster or
slower than median velocity of group. These are "fly-bys"
"""
# add a is_flyby column to members
self.members['is_flyby'] = np.zeros(len(self.members),dtype=bool)
# assign flybys
ind = np.abs(self.members['vel'] - self.median_vel) > crit_velocity
self.members.ix[ind,'is_flyby'] = True
def calc_mediod(self):
"""
Calculate the mediod center of this group, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
x_med = np.median(self.members['x'][good])
y_med = np.median(self.members['y'][good])
z_med = np.median(self.members['z'][good])
self.mediod = np.array([x_med,y_med,z_med])
def calc_radius(self):
"""
Calculate the radius of this group, defined as the
maximum galaxy distance from the mediod, excluding
dwarfs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
xdist = self.members['x'][good]-self.mediod[0]
ydist = self.members['y'][good]-self.mediod[1]
zdist = self.members['z'][good]-self.mediod[2]
dists = (xdist*xdist + ydist*ydist + zdist*zdist)**0.5
self.radius = np.max(dists)
def calc_avg_mvir(self):
"""
Calculate the average virial mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_mvir = np.nan
else:
self.avg_mvir = np.mean(self.members['mvir'][good])
def calc_avg_stellarmass(self):
"""
Calculate the average stellar mass of galaxies in this group
excluding dwafs and flybys
"""
good = ((~self.members['is_dwarf'])&(~self.members['is_flyby']))
if np.sum(good) == 0:
self.avg_stellarmass = np.nan
else:
self.avg_stellarmass = np.mean(self.members['stellarMass'][good])
def calc_annular_mass_ratio(self,radius):
"""
Calculate the virial mass ratio
of neighboring galaxies within the surrounding annulus to the
total virial mass of all galaxies within the sphere
"""
# mass of cluster
sphere_mass = np.sum(self.members['mvir'])
sphere_mass = sphere_mass / (4.*np.pi/3. * self.radius**3.)
| # mass in annulus
annulus_mass = np.sum(self.neighbors['mvir'])
annulus_mass = annulus_mass/(4.*np.pi/3. * (radius**3. - self.radius**3.))
self.annular_mass_ratio = annulus_mass/sphere_mass
def calc_secondtwo_mass_ratio(self | ):
"""
Calculate the ratio of the virial masses of the second largest
members to the virial mass of the largest member
"""
sorted_masses = np.sort(self.members['mvir'])
self.secondtwo_mass_ratio = (sorted_masses[-2]+sorted_masses[-3])/sorted_masses[-1]
|
from .test_base_class import ZhihuClientC | lassTest
PEOPLE_SLUG = 'giantchen'
class TestPeopleBadgeNumber(ZhihuClientClassTest):
def test_badge_topics_number(self):
self.assertEqual(
len(list(self.client.people(PEOPLE_SLUG).badge.topics)), 2,
)
def test_people_has_badge(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.has_badge)
def test_people_has_identity(self):
self.assertFals | e(self.client.people(PEOPLE_SLUG).badge.has_identity)
def test_people_is_best_answerer_or_not(self):
self.assertTrue(self.client.people(PEOPLE_SLUG).badge.is_best_answerer)
def test_people_identify_information(self):
self.assertIsNone(self.client.people(PEOPLE_SLUG).badge.identity)
|
#! /bin/python
import xbh as xbhpkg
x | bh = xbhpkg.Xbh()
#xbh.switch_to_app()
xbh.calc_checksum()
print(xbh.g | et_results())
|
class FieldRegistry(object):
_registry = {}
def add_field(self, model, | field):
reg = self.__class__ | ._registry.setdefault(model, [])
reg.append(field)
def get_fields(self, model):
return self.__class__._registry.get(model, [])
def __contains__(self, model):
return model in self.__class__._registry
|
"""
Generates 40 random numbers and writes them
to a file. No number is repeated.
~ Created by Elijah Wilson 2014 ~
"""
# used for generating random integers
from random import randint
# open the output file -> "in.data"
f = open("in.data", "w")
# create an empty list
succ = []
# loops throu | gh 40 times for generating numbers
for x in xrange(0,40):
# generate random int between 1111 & 9999
randNum = randint(1111, 9999)
# check to see if it was already generated
if randNum not in succ:
# put the random number in the list
succ.append(str(randNum))
else:
# while the randNum has already been generated
| # generate a new one
while randNum in succ:
randNum = randint(1111, 9999)
# put the random number in the list
succ.append(str(randNum))
# loops through 40 times for writing to file
for x in xrange(0,40):
# makes sure it isn't the last line to be written
# to write a new line char
if x != 39:
f.write(succ[x] + "\n")
else:
# if it is the last line to be written
# don't write a new line char
f.write(succ[x])
#close the file
f.close() |
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new in | stance of the ZipFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ZipFile, self).__init__(temboo_ses | sion, '/Library/Box/Files/ZipFile')
def new_input_set(self):
return ZipFileInputSet()
def _make_result_set(self, result, path):
return ZipFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ZipFileChoreographyExecution(session, exec_id, path)
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
"""
return self._output.get('Response', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
class ZipFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ZipFileResultSet(response, path)
|
import os
import platform
from setuptools import setup
# "import" __version__
__version__ = 'unknown'
for line in open('sounddevice.py'):
if line.startswith('__version__'):
exec(line)
break
MACOSX_VERSIONS = '.'.join([
'macosx_10_6_x86_64', # for compatibility with pip < v21
'macosx_10_6_universal2',
])
# environment variables for cross-platform package creation
system = os.environ.get('PYTHON_SOUNDDEVICE_PLATFORM', platform.system())
architecture0 = os.environ.get('PYTHON_SOUNDDEVICE_ARCHITECTURE',
platform.architecture()[0])
if system == 'Darwin':
libname = 'libportaudio.dylib'
elif system == 'Windows':
libname = 'libportaudio' + architecture0 + '.dll'
else:
libname = None
if libname and os.path.isdir('_sounddevice_data/portaudio-binaries'):
packages = ['_sounddevice_data']
package_data = {'_sounddevice_data': ['portaudio-binaries/' + libname,
'portaudio-binaries/README.md']}
zip_safe = False
else:
packages = None
package_data = None
zip_safe = True
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel_half_pure(bdist_wheel):
"""Create OS-dependent, but Python-independent wheels."""
def get_tag(self):
if system == 'Darwin':
oses = MACOSX_VERSIONS
elif system == 'Windows':
if architecture0 == '32bit':
oses = 'win32'
else:
oses = 'win_amd64'
else:
oses = 'any'
return 'py3', 'none', oses
cmdclass = {'bdist_wheel': bdist_wheel_half_pure}
setup(
name='sounddevice',
version=__version__,
py_modules=['sounddevice'],
packages=packages,
package_data=package_data,
zip_safe=zip_safe,
python_requires='>=3',
setup_requires=['CFFI>=1.0'],
install_requires=['CFFI>=1.0'],
extras_require={'NumPy': ['NumPy']},
cffi_modules=['sounddevice_build.py:ffibuilder'],
author='Matthias Geier',
author_email='Matthias.Geier@gmail.com',
description='Play and Record Sound with Python',
long_description=open('README.rst').read(),
license='MIT',
keywords='sound audio PortAudio play record playrec'.split(),
url='http://python-sounddevice.readthedocs.io/',
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
| 'Operating System :: OS Independent',
'Programming Language :: P | ython',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Sound/Audio',
],
cmdclass=cmdclass,
)
|
#!/usr/bin/env python
"""The MySQL database methods for foreman rule handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import rdfvalue
from grr_response_server import foreman_rules
from grr_response_server.databases import mysql_utils
class MySQLDBForemanRulesMixin(object):
"""MySQLDB mixin for foreman rules related functions."""
@mysql_utils.WithTransaction()
def WriteForemanRule(self, rule, cursor=None):
"""Writes a foreman rule to the database."""
query = ("INSERT INTO foreman_rules "
" (hunt_id, expiration_time, rule) "
"VALUES (%s, FROM_UNIXTIME(%s), %s) "
"ON DUPLICATE KEY UPDATE "
" expiration_time=FROM_UNIXTIME(%s), rule=%s")
exp_str = mysql_utils.RDFDatetimeToTimestamp(rule.expiration_time),
rule_str = rule.SerializeToBytes()
cursor.execute(query, [rule.hunt_id, exp_str, rule_str, exp_str, rule_str])
@mysql_utils.WithTransaction()
def RemoveForemanRule(self, hunt_id, cursor=None):
query = "DELETE FROM foreman_rules WHERE hunt_id=%s"
cursor.execute(query, [hunt_id])
@mysql_utils.WithTransaction(readonly=True)
def ReadAll | ForemanRules(self, cursor=None):
cursor.execute("SELECT rule FROM foreman_ | rules")
res = []
for rule, in cursor.fetchall():
res.append(foreman_rules.ForemanCondition.FromSerializedBytes(rule))
return res
@mysql_utils.WithTransaction()
def RemoveExpiredForemanRules(self, cursor=None):
now = rdfvalue.RDFDatetime.Now()
cursor.execute(
"DELETE FROM foreman_rules WHERE expiration_time < FROM_UNIXTIME(%s)",
[mysql_utils.RDFDatetimeToTimestamp(now)])
|
import traceback
from vstruct2.compat import int2bytes, bytes2int
# This routine was coppied from vivisect to allow vstruct
# to be free from dependencies
MAX_WORD = 16
def initmask(bits):
return (1<<bits)-1
bitmasks = [ initmask(i) for i in range(MAX_WORD*8) ]
def bitmask(value,bits):
return value & bitmasks[bits]
class v_base:
'''
Base class for all VStruct types
'''
def __init__(self):
self._vs_onset = []
self._vs_isprim = True
def __len__(self):
return self.vsSize()
def __bytes__(self):
return self.vsEmit()
def vsOnset(self, callback, *args, **kwargs):
'''
Trigger a callback when the fields value is updated.
NOTE: this callback is triggered during parse() as well
as during value updates.
'''
self._vs_onset.append( (callback,args,kwargs) )
return self
def _fire_onset(self):
for cb,args,kwargs in self._vs_onset:
try:
cb(*args,**kwargs)
except Exception as e:
traceback.print_exc()
class v_prim(v_base):
'''
Base class for all vstruct primitive types
'''
def __init__(self, size=None, valu=None):
v_base.__init__(self)
self._vs_size = size
self._vs_bits = size * 8
self._vs_value = self._prim_norm(valu)
self._vs_parent = None
# on-demand field parsing
self._vs_backfd = None
self._vs_backoff = None
self._vs_backbytes = None
self._vs_writeback = False
def __repr__(self):
return repr(self._prim_getval())
def vsGetTypeName(self):
return self.__class__.__name__
def vsParse(self, bytez, offset=0, writeback=False):
'''
Byte parsing method for VStruct primitives.
'''
self._vs_value = None
self._vs_backoff = offset
self._vs_backbytes = bytez
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsLoad(self, fd, offset=0, writeback=False):
self._vs_value = None
self._vs_backfd = fd
self._vs_backoff = offset
self._vs_writeback = writeback
retval = offset + self.vsSize()
self._fire_onset()
return retval
def vsSize(self):
'''
Return the size of the field.
'''
return self._vs_size
def vsResize(self, size):
'''
Resizing callback whi | ch can dynamically change the size
of a primitive.
| '''
self._vs_size = size
def _prim_setval(self, newval):
valu = self._prim_norm(newval)
self._vs_value = valu
# if requested, write changes back to bytearray / fd
if self._vs_writeback:
byts = self._prim_emit(valu)
if self._vs_backbytes != None:
self._vs_backbytes[ self._vs_backoff:self._vs_backoff + len(byts) ] = byts
if self._vs_backfd != None:
self._vs_backfd.seek( self._vs_backoff )
self._vs_backfd.write( byts )
self._fire_onset()
def _prim_getval(self):
# trigger on-demand parsing if needed
if self._vs_value == None:
if self._vs_backfd:
self._vs_value = self._prim_load(self._vs_backfd, self._vs_backoff)
elif self._vs_backbytes:
self._vs_value = self._prim_parse(self._vs_backbytes, self._vs_backoff)
return self._vs_value
def _prim_load(self, fd, offset):
# easy base case...
fd.seek(offset)
byts = fd.read(self._vs_size)
return self._prim_parse(byts, 0)
def vsEmit(self):
return self._prim_emit( self._prim_getval() )
def _prim_norm(self, x):
raise Exception('Implement Me')
def _prim_emit(self, x):
raise Exception('Implement Me')
def _prim_parse(self, bytez, offset):
raise Exception('Implement Me')
class v_int(v_prim):
def __init__(self,valu=0,size=4,endian='little',signed=False,enum=None):
v_prim.__init__(self,valu=valu,size=size)
self._vs_enum = enum
self._vs_endian = endian
self._vs_signed = signed
def __int__(self):
return self._prim_getval()
def __repr__(self):
valu = self._prim_getval()
if self._vs_enum != None:
enum = self._vs_enum[valu]
if enum != None:
return enum
return repr(valu)
def vsResize(self, size):
self._vs_bits = size * 8
return v_prim.vsResize(self,size)
def _prim_emit(self,x):
return int2bytes(x, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed)
def _prim_norm(self,x):
return bitmask(x,self._vs_bits)
def _prim_parse(self, byts, offset):
return bytes2int(byts, self._vs_size, byteorder=self._vs_endian, signed=self._vs_signed, off=offset)
|
import pytest
| import tensorflow as tf
import numpy as np
import tfs.core.layer.ops as ops
from tfs.core.layer.dropout import Dropout
from tfs.network import Network
net = Network()
@pytest.fixture
def l():
l = Dropout(
net,
keep_prob=1.0,
)
return l
class TestDropout:
def test_build_inverse(self,l):
_in = tf.zeros([1,10,10,4])
_out=l.build(_in)
assert _out.get_shape().as_list()==[1,10,10,4] | |
# coding = utf-8
__author__ = 'Forec'
import xlwt
import re
book = xlwt.Workbook(encoding = 'utf-8', style_compression=0)
sheet = book.add_sheet('student',cell_overwrite_o | k = True)
line = 0
info = re.compile(r'\"(\d+)\" | : \"(.*?)\"')
with open('city.txt',"r") as f:
data = f.read()
for x in info.findall(data):
for i in range(len(x)):
sheet.write(line,i,x[i])
line+=1
book.save('city.xls') |
# Copyright (C) 2014 Optiv, Inc. (brad.spe | ngler@optiv.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class InjectionRWX(Signature):
name = "injection_rwx"
description = "Creates RWX memory"
severity = 2
confidence = 50
categories = ["injection"]
authors = ["Optiv"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs | )
filter_apinames = set(["NtAllocateVirtualMemory","NtProtectVirtualMemory","VirtualProtectEx"])
filter_analysistypes = set(["file"])
def on_call(self, call, process):
if call["api"] == "NtAllocateVirtualMemory" or call["api"] == "VirtualProtectEx":
protection = self.get_argument(call, "Protection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
elif call["api"] == "NtProtectVirtualMemory":
protection = self.get_argument(call, "NewAccessProtection")
# PAGE_EXECUTE_READWRITE
if protection == "0x00000040":
return True
|
# -*- mode: python; coding: utf-8; -*-
import os
APP_NAME = "SLog"
VERSION = "0.9.4"
WEBSITE = "http://vialinx.org"
LICENSE = """
SLog is a PyGTK-based GUI for the LightLang SL dictionary.
Copyright 2007 Nasyrov Renat <renatn@gmail.com>
This file is part of SLog.
SLog is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SLog is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
along with SLog; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
INSTALL_PREFIX = "@prefix@"
PIXMAP_DIR = | os.path.join(INSTALL_PREFIX, "s | hare", "pixmaps")
LOCALE_DIR = os.path.join(INSTALL_PREFIX, "share", "locale")
DATA_DIR = os.path.join(INSTALL_PREFIX, "share", "slog")
LOGO_ICON = "slog.png"
LOGO_ICON_SPY = "slog_spy.png"
#FTP_LL_URL = "ftp://ftp.lightlang.org.ru/dicts"
FTP_LL_URL = "ftp://etc.edu.ru/pub/soft/for_linux/lightlang"
FTP_DICTS_URL = FTP_LL_URL + "/dicts"
FTP_REPO_URL = FTP_DICTS_URL + "/repodata/primary.xml"
REPO_FILE = os.path.expanduser("~/.config/slog/primary.xml")
SL_TMP_DIR = "/tmp/sl"
def get_icon(filename):
return os.path.join(PIXMAP_DIR, filename)
|
var='<project>',
help='Default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='User description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
h | elp='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
| help='Disable user',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing user'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
project_id = None
if parsed_args.project:
project_id = common.find_project(identity_client,
parsed_args.project,
parsed_args.project_domain).id
domain_id = None
if parsed_args.domain:
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
try:
user = identity_client.users.create(
name=parsed_args.name,
domain=domain_id,
default_project=project_id,
password=parsed_args.password,
email=parsed_args.email,
description=parsed_args.description,
enabled=enabled
)
except ks_exc.Conflict as e:
if parsed_args.or_show:
user = utils.find_resource(identity_client.users,
parsed_args.name,
domain_id=domain_id)
self.log.info('Returning existing user %s', user.name)
else:
raise e
user._info.pop('links')
return zip(*sorted(six.iteritems(user._info)))
class DeleteUser(command.Command):
"""Delete user(s)"""
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'users',
metavar='<user>',
nargs="+",
help='User(s) to delete (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
for user in parsed_args.users:
if domain is not None:
user_obj = utils.find_resource(identity_client.users,
user,
domain_id=domain.id)
else:
user_obj = utils.find_resource(identity_client.users,
user)
identity_client.users.delete(user_obj.id)
class ListUser(command.Lister):
"""List users"""
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter users by <domain> (name or ID)',
)
project_or_group = parser.add_mutually_exclusive_group()
project_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter users by <group> membership (name or ID)',
)
project_or_group.add_argument(
'--project',
metavar='<project>',
help='Filter users by <project> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
group = None
if parsed_args.group:
group = common.find_group(identity_client,
parsed_args.group,
parsed_args.domain).id
if parsed_args.project:
if domain is not None:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
domain_id=domain
).id
else:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
assignments = identity_client.role_assignments.list(
project=project)
# NOTE(stevemar): If a user has more than one role on a project
# then they will have two entries in the returned data. Since we
# are looking for any role, let's just track unique user IDs.
user_ids = set()
for assignment in assignments:
if hasattr(assignment, 'user'):
user_ids.add(assignment.user['id'])
# NOTE(stevemar): Call find_resource once we have unique IDs, so
# it's fewer trips to the Identity API, then collect the data.
data = []
for user_id in user_ids:
user = utils.find_resource(identity_client.users, user_id)
data.append(user)
else:
data = identity_client.users.list(
domain=domain,
group=group,
)
# Column handling
if parsed_args.long:
columns = ['ID', 'Name', 'Default Project Id', 'Domain Id',
'Description', 'Email', 'Enabled']
column_headers = copy.deepcopy(columns)
column_headers[2] = 'Project'
column_headers[3] = 'Domain'
else:
columns = ['ID', 'Name']
column_headers = columns
return (
column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data)
)
class SetUser(command.Command):
"""Set user properties"""
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to change (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set user name',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Set default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
|
from temboo.Library.Zendesk.Search.SearchA | ll import SearchAll, SearchAllInputSet | , SearchAllResultSet, SearchAllChoreographyExecution
from temboo.Library.Zendesk.Search.SearchAnonymous import SearchAnonymous, SearchAnonymousInputSet, SearchAnonymousResultSet, SearchAnonymousChoreographyExecution
|
import random
## Course texture colors ##
###########################
class Course(object):
def __init__(self, num):
## Default colors, fall back to these
fog = [0,0,0]
light_road = [0,0,0]
dark_road = [0,0,0]
light_offroad = [0,0,0]
dark_offroad = [0,0,0]
light_wall = [0,0,0]
dark_wall = [0,0,0]
light_rumble = [0,0,0]
dark_rumble = [0,0,0]
## Course road geometry
self.geometry = [0,0,0,0,0,0,0,0]
last_seg = 0
## Start with a straightaway by default
## Exactly six "segments" are made
for i in range(7):
## Add a segment that's different from the previous one
self.geometry[i] = last_seg
last_seg += random.choice((-1,1))
if last_seg < 1:
last_seg = 3
elif last_seg > 3:
last_seg = 1
## Length of each segment, larger number = longer strip
self.strip = 3 ## Wall
self.road = 2 ## Road
## Load texture colors to overwrite defaults
f = open("res/stage/%d.dat" %num, "r").readlines()
for line in f:
if line.startswith("fog = "): ## Fog color to fade into
temp = line.strip("fog = ").split(",")
fog = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_road = "): ## Light ground strip
temp = line.strip("light_road = ").split(",")
light_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_road = "): ## Dark ground strip
temp = line.strip("dark_road = ").split(",")
dark_road = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_offroad = "): ## Light offroad strip
temp = line.strip("light_offroad = ").split(",")
light_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_offroad = "): ## Dark offroad strip
temp = line.strip("dark_offroad = ").split(",")
dark_offroad = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_wall = "): ## Light wall strip
temp = line.strip("light_wall = ").split(",")
light_wall = [int(temp[0]),int(tem | p[1]),int(temp[2])]
elif line.startswith("dark_wall = "): ## Dark wall strip
temp = line.strip("dark_wall = ").split(",")
| dark_wall = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("light_rumble = "): ## Light rumble strip
temp = line.strip("light_rumble = ").split(",")
light_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("dark_rumble = "): ## Dark rumble strip
temp = line.strip("dark_rumble = ").split(",")
dark_rumble = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("ceiling = "): ## Ceiling
temp = line.strip("ceiling = ").split(",")
ceiling = [int(temp[0]),int(temp[1]),int(temp[2])]
elif line.startswith("strip = "): ## Length of wall segment
self.strip = int(line.strip("strip = "))
elif line.startswith("road = "): ## Length of road segment
self.road = int(line.strip("road = "))
## Define start line, finish line, and dark and light strip lists
white = [200,200,200]
self.start = [white,white,light_wall, white, fog, white]
self.finish = [white, white, dark_wall, white, fog, white]
self.dark_colors = [dark_road, dark_offroad, dark_wall, dark_rumble, fog, None, ceiling]
self.light_colors = [light_road, light_offroad, light_wall, light_rumble, fog, white, ceiling]
|
"""
End to end test of the internal reporting user table loading task.
"""
import os
import logging
import datetime
import pandas
from luigi.date_interval import Date
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase
from edx.analytics.tasks.url import url_path_join
log = logging.getLogger(__name__)
class InternalReportingUserLoadAcceptanceTest(AcceptanceTestCase):
"""End-to-end test of the workflow to load the internal reporting warehouse's user table."""
INPUT_FILE = 'location_by_course_tracking.log'
DATE_INTERVAL = Date(2014, 7, 21)
def setUp(self):
super(InternalReportingUserLoadAcceptanceTest, self).setUp()
# Set up the mock LMS databases.
self.execute_sql_fixture_file('load_auth_user_for_internal_reporting_user.sql')
self.execute_sql_fixture_file('load_auth_userprofile.sql')
# Put up the mock tracking log for user locations.
self.upload_tracking_log(self.INPUT_FILE, datetime.datetime(2014, 7, 21))
def test_internal_reporting_user(self):
"""Tests the workflow for the internal reporting user table, end to end."""
self.task.launch([
'LoadInternalReportingUserToWarehouse',
'--interval', self.DATE_INTERVAL.to_string(),
'--user-country-output', url_path_join(self.test_out, 'user'),
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
self.validate_output()
def validate_output(self):
"""Validates the output, comparing it to a csv of all the expected output from this workflow."""
with self.vertica.cursor() as cursor:
expected_output_csv = os.path.join(self.data_dir, 'output', 'acceptance_expected_d_user.csv')
expected = pandas.read_csv(expected_output_csv, parse_dates=True)
cursor.execute("SELECT * FROM {schema}.d_user".format(schema=self.vertica.schema_name))
response = cursor.fetchall()
d_user = pandas.DataFrame(response, columns=['user_id', 'user_year_of_birth', 'user_level_of_education',
'user_gender', 'user_email', 'user_username',
'user_account_creation_time',
'user_last_location_country | _code'])
try: # A ValueError will be thrown if the column names don't match or the two data frames are not square.
self.assertTrue(all(d_user == expected))
except ValueError:
| self.fail("Expected and returned data frames have different shapes or labels.")
|
# -*- coding: utf-8 -*-
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public | License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# default settings that can be made for a user.
from __future__ import unicode_literals
import frappe
# product_name = "ERPNext"
product_name = "let | zERP"
user_defaults = {
"Company": "company",
"Territory": "territory"
}
|
__author__ = 'Sulantha'
import logging
class PipelineLogger:
logFunctions={'info':logging.info,
'debug':logging.debug,
'warning':logging.warning,
'error':logging.error,
'critical':logging.critical,
'exception':logging.exception}
@staticmethod
def log(moduleName, level, message):
level = level.lower()
logging.getLogger(moduleName)
PipelineLogger.logFunctions[level](messag | e)
| |
notator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.func_code.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.func_name in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.func_name == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
| pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=t | ypes.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finishsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
return f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = getsig(g)
assert sig == [model.SomeFloat(), model.SomeFloat()]
@signature(types.str(), returns=types.int())
def cannot_add_s |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sin título.py
#
# Copyright 2012 Jesús Hómez <jesus@soneview>
#
# This program | is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT | ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from modelo import Model
#Model().conectar()
#Model().borrar_tablas_para_inventario()
#~ Model().crear_tablas_para_inventario(2011,12)
#~ Model().crear_tabla_entradas()
venta_id = Model().venta_id_max()
new_venta_id = Model().nuevo_venta_id()
print venta_id
print new_venta_id
|
"""Command line tool for creating audioMD metadata."""
from __future__ import unicode_literals, print_function
import os
import sys
import click
import six
import audiomd
from siptools.mdcreator import MetsSectionCreator
from siptools.utils import fix_missing_metadata, scrape_file
click.disable_unicode_literals_warning = True
FILEDATA_KEYS = ['audio_data_encoding', 'bits_per_sample',
'data_rate', 'data_rate_mode', 'sampling_frequency']
AUDIOINFO_KEYS = ['duration', 'num_channels']
ALLOW_UNAV = ['audio_data_encoding', 'codec_creator_app',
'codec_creator_app_version', 'codec_name',
'duration', 'num_channels']
ALLOW_ZERO = ['bits_per_sample', 'data_rate', 'sampling_frequency']
@click.command()
@click.argument(
'filename', type=str)
@click.option(
'--workspace', type=click.Path(exists=True),
default='./workspace/',
metavar='<WORKSPACE PATH>',
help="Workspace directory for the metadata files. "
"Defaults to ./workspace/")
@click.option(
'--base_path', type=click.Path(exists=True), default='.',
metavar='<BASE PATH>',
help="Source base path of digital objects. If used, give path to "
"the file in relation to this base path.")
def main(filename, workspace, base_path):
"""Write audioMD metadata for an audio file or streams.
FILENAME: Relative path to the file from current directory or from
--base_path.
"""
create_audiomd(filename, workspace, base_path)
return 0
def create_audiomd(filename, workspace="./workspace/", base_path="."):
"""
Write audioMD metadata for an audio file or streams.
:filename: Audio file path relative to base path
:workspace: Workspace path
:base_path: Base path
"""
filerel = os.path.normpath(filename)
filepath = os.path.normpath(os.path.join(base_path, filename))
creator = AudiomdCreator(workspace)
creator.add_audiomd_md(filepath, filerel)
creator.write()
class AudiomdCreator(MetsSectionCreator):
"""
Subclass of MetsSectionCreator, which generates audioMD metadata for audio
files.
"""
def add_audiomd_md(self, filepath, filerel=None):
"""Create audioMD metadata for a audio file and append it
to self.md_elements.
If a file is not a video container, then the audio stream metadata is
processed in file level. Video container includes streams which need
to be processed separately one at a time.
:filepath: Audio file path
:filerel: Audio file path relative to base path
"""
# Create audioMD met | adata
audiomd_dict = create_audiomd_metadata(
filepath, filerel, self.workspace
)
if '0' in audiomd_dict and len(audiomd_dict) == 1:
self.add_md(metadata=audiomd_dict['0'],
filename=(filerel if filerel else filepath))
else:
for index, audio in six.iteritems(audio | md_dict):
self.add_md(metadata=audio,
filename=(filerel if filerel else filepath),
stream=index)
# pylint: disable=too-many-arguments
def write(self, mdtype="OTHER", mdtypeversion="2.0",
othermdtype="AudioMD", section=None, stdout=False,
file_metadata_dict=None,
ref_file="create-audiomd-md-references.jsonl"):
"""
Write AudioMD metadata.
"""
super(AudiomdCreator, self).write(
mdtype=mdtype, mdtypeversion=mdtypeversion,
othermdtype=othermdtype, ref_file=ref_file
)
def create_audiomd_metadata(filename, filerel=None, workspace=None,
streams=None):
"""Creates and returns list of audioMD XML sections.
:filename: Audio file path
:filrel: Audio file path relative to base path
:workspace: Workspace path
:streams: Metadata dict of streams. Will be created if None.
:returns: Dict of AudioMD XML sections.
"""
if streams is None:
(streams, _, _) = scrape_file(filepath=filename,
filerel=filerel,
workspace=workspace,
skip_well_check=True)
fix_missing_metadata(streams, filename, ALLOW_UNAV, ALLOW_ZERO)
audiomd_dict = {}
for index, stream_md in six.iteritems(streams):
if stream_md['stream_type'] != 'audio':
continue
stream_md = _fix_data_rate(stream_md)
file_data_elem = _get_file_data(stream_md)
audio_info_elem = _get_audio_info(stream_md)
audiomd_elem = audiomd.create_audiomd(
file_data=file_data_elem,
audio_info=audio_info_elem
)
audiomd_dict[six.text_type(index)] = audiomd_elem
if not audiomd_dict:
print('The file has no audio streams. No AudioMD metadata created.')
return None
return audiomd_dict
def _fix_data_rate(stream_dict):
"""Changes the data_rate to an integer if it is of a
float type by rounding the number. The value is saved as
a string in the dictionary.
:stream_dict: Metadata dict of a stream
:returns: Fixed metadata dict
"""
for key in stream_dict:
if key == 'data_rate':
data_rate = stream_dict[key]
if data_rate:
try:
data_rate = float(data_rate)
stream_dict['data_rate'] = six.text_type(int(round(data_rate)))
except ValueError:
pass
return stream_dict
def _get_file_data(stream_dict):
"""Creates and returns the fileData XML element.
:stream_dict: Metadata dict of a stream
:returns: AudioMD fileData element
"""
params = {}
for key in FILEDATA_KEYS:
keyparts = key.split('_')
camel_key = keyparts[0] + ''.join(x.title() for x in keyparts[1:])
params[camel_key] = stream_dict[key]
compression = (stream_dict['codec_creator_app'],
stream_dict['codec_creator_app_version'],
stream_dict['codec_name'],
stream_dict['codec_quality'])
params['compression'] = audiomd.amd_compression(*compression)
return audiomd.amd_file_data(params)
def _get_audio_info(stream_dict):
"""Creates and returns the audioInfo XML element.
:stream_dict: Metadata dict of a stream
:returns: AudioMD audioInfo element
"""
return audiomd.amd_audio_info(
duration=stream_dict['duration'],
num_channels=stream_dict['num_channels'])
if __name__ == '__main__':
RETVAL = main() # pylint: disable=no-value-for-parameter
sys.exit(RETVAL)
|
deDocs=True):
def streamResult(resp):
CHUNK_SIZE=1024
data = resp.read(CHUNK_SIZE)
while len(data) > 0:
yield data
data = resp.read(CHUNK_SIZE)
try:
| resp = self._getView(urlBase,startKey=startKey,endKey=endKey,includeDocs=includeDocs)
return streamResult(resp)
except HTTPError as ex:
abort(404, "not found")
def _orderParmaByView(self,params,view):
def makeEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
#if complex key
if isinstance(newkey, list):
# get last element | in key
last = newkey[-1]
# if the last element is a list, just append an empty object to the last element's list
if isinstance(last, list):
last.append({})
# if the last element in an object, it becomes a bit tricky
# *** note that the key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
# since there's no easy way to increment a float accurately, instead append a new key that 'should' sort after the previous key.
if (isinstance(last[lastkey], float)):
last[lastkey+u'\ud7af'] = None
# if it's something else... this thing should recurse and keep going.
else:
last[lastkey] = makeEndKey(last[lastkey])
# if we got here, it's nothing really special, so we'll just append a {} to newkey
else:
newkey.append({})
# this if to handle the odd case where we have string as either the key or the value of an object in a complex key.
elif isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# integer... so just increment 1.
elif isinstance(newkey, int):
newkey += 1
# if we skipped everything else - we don't have a strategy to deal with it... so don't
return newkey
def makeStartsWithEndKey(key):
from copy import deepcopy
newkey = deepcopy(key)
# this the base case for keys that are just strings, append the funky unicode char so that it grabs everything from
# "foo" to "foo\ud7af", which is technically the only way we know how to deal with starts with.
if isinstance(newkey, (str, unicode, basestring)):
newkey=newkey+u'\ud7af'
# if this is a complex key, then get the last element and recurse
elif isinstance(newkey, list):
newkey[-1] = makeStartsWithEndKey(newkey[-1])
# if the last element in an object, it becomes a bit tricky, because you must modify the last key, which implies
# order of keys was maintained when the value was originally parsed.
# *** IMPORTANT: The key parameter MUST have been originally json parsed with the object_pairs_hook=collections.OrderedDict otherwise
# *** key order won't be guaranteed to be the same as what CouchDB will use!!!!
elif isinstance(last, dict):
lastkey = last.keys()[-1]
#take the value from the last key and recurse.
last[lastkey] = makeEndKey(last[lastkey])
# if we skipped everything else - we don't have a strategy to deal with it as a Starts With key, so just return
else:
newkey = key
return newkey
def hasParamFor(funcName):
if funcName == 'ts' and ('from' in params or 'until' in params):
return True
elif funcName == 'discriminator' and ('discriminator' in params or 'discriminator-starts-with' in params):
return True
elif funcName == 'resource' and ('resource' in params or 'resource-starts-with' in params):
return True
else:
return False
def populateTs(startKey, endKey, pos, isLast):
if 'from' in params:
startKey.append(self._convertDateTime(params['from']))
elif pos == 1:
startKey.append(self._convertDateTime(datetime.min.isoformat() + "Z"))
if 'until' in params:
endKey.append(self._convertDateTime(params['until']))
elif pos == 1:
endKey.append(self._convertDateTime(datetime.utcnow().isoformat()+"Z"))
return startKey, endKey
def populateDiscriminator(startKey, endKey, pos, isLast):
if 'discriminator' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator']
startKey.append(discriminator)
endKey.append(discriminator)
elif 'discriminator-starts-with' in params:
# preserve key order!!!
try:
discriminator = json.loads(params['discriminator-starts-with'], object_pairs_hook=collections.OrderedDict)
except:
log.error(sys.exc_info()[0])
discriminator = params['discriminator-starts-with']
startKey.append(discriminator)
endKey.append(discriminator)
endKey = makeStartsWithEndKey(endKey)
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
def populateResource(startKey, endKey, pos, isLast):
if 'resource' in params:
startKey.append(params['resource'])
endKey.append(params['resource'])
elif 'resource-starts-with' in params:
startKey.append(params['resource-starts-with'])
endKey.append(params['resource-starts-with']+u'\ud7af')
return startKey, endKey
# else:
# startKey.append('')
# endKey.append(u'\ud7af')
startKey=[]
endKey=[]
includeDocs = True
if "ids_only" in params:
includeDocs = not params
funcs = {
"discriminator":populateDiscriminator,
'resource':populateResource,
'ts':populateTs
}
queryOrderParts = view.split('-by-')
aggregate = queryOrderParts[0]
queryParams= queryOrderParts[1].split('-')
# if we don't have explicit params for this, then omit.
if hasParamFor(aggregate):
queryParams.append(aggregate)
log.error("added aggregate")
for pos, q in enumerate(queryParams,start=1):
startkey, endKey = funcs[q](startKey, endKey, pos, len(queryParams)==pos)
if len(endKey) > 0 and 'resource-starts-with' not in params and 'discriminator-starts-with' not in params:
log.error("making endkey")
endKey = makeEndKey(endKey)
# startkey, endKey = funcs[aggregate](startKey, endKey, len(queryParams)+1, True)
return startKey if len(startKey) > 0 else None, endKey if len(endKey) > 0 else None, includeDocs
def get(self, dataservice="",view='',list=''):
"""GET /extract/id: Show a specific intem"""
try:
db_url = '/'.join([appConfig['couchdb.url'],appConfig['couchdb.db.resourcedata']])
db = couchdb.Database(db_url)
dsDocument = db['_design/'+dataservice]
if "dataservices" not in dsDocument:
abort(406, "I |
# -*- coding: utf-8 -*-
__all__ = ("clear_tags", "get_tex | t_from_html", "clear_text")
def clear_tags(obj):
"""
Remove not used blocks, such as table of contents, advertisements
"""
SEARCH_TAG = ["div", "table"]
for i in SEARCH_TAG:
res= obj.soup(i)
for row in res:
if row["align"]=="right":
| row.clear()
res= obj.soup("title")
if len(res):
res[0].clear()
def join_rows(obj):
"""
Join formatted rows into paragraphs.
Need check splitted words.
Skipped in this solution
"""
pass
def get_text_from_html(obj):
"""
Return text without html tags
"""
obj.text = obj.soup.get_text()
def clear_text(obj):
"""
Remove special/not used symbols
"""
obj.text = obj.text.replace("\t", "")
|
distinct groups for the given query grouped by the
fields in group_by.
"""
sql, params = self._get_num_groups_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
return self._cursor_rowcount(cursor)
class Machine(dbmodels.Model):
'''
A machine used to run a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
machine_idx = dbmodels.AutoField(primary_key=True)
#: The name, such as a FQDN, of the machine that run the test. Must be
#: unique.
hostname = dbmodels.CharField(unique=True, max_length=255)
#: the machine group
machine_group = dbmodels.CharField(blank=True, max_length=240)
#: the machine owner
owner = dbmodels.CharField(blank=True, max_length=240)
class Meta:
| db_table = 'tko_machines'
class Kernel(dbmodels.Model):
'''
The Linux Kernel used during a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
kernel_idx = dbmodels.AutoField(primary_key=True)
#: the | kernel hash
kernel_hash = dbmodels.CharField(max_length=105, editable=False)
#: base
base = dbmodels.CharField(max_length=90)
#: printable
printable = dbmodels.CharField(max_length=300)
class Meta:
db_table = 'tko_kernels'
class Patch(dbmodels.Model):
'''
A Patch applied to a Linux Kernel source during the build process
'''
#: A reference to a :class:`Kernel`
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
#: A descriptive name for the patch
name = dbmodels.CharField(blank=True, max_length=240)
#: The URL where the patch was fetched from
url = dbmodels.CharField(blank=True, max_length=900)
#: hash
the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
class Meta:
db_table = 'tko_patches'
class Status(dbmodels.Model):
'''
The possible results of a test
These objects are populated automatically from a
:ref:`fixture file <django:initial-data-via-fixtures>`
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
status_idx = dbmodels.AutoField(primary_key=True)
#: A short descriptive name for the status. This exact name is searched for
#: while the TKO parser is reading and parsing status files
word = dbmodels.CharField(max_length=30)
class Meta:
db_table = 'tko_status'
class Job(dbmodels.Model, model_logic.ModelExtensions):
"""
A test job, having one or many tests an their results
"""
job_idx = dbmodels.AutoField(primary_key=True)
tag = dbmodels.CharField(unique=True, max_length=100)
label = dbmodels.CharField(max_length=300)
username = dbmodels.CharField(max_length=240)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
queued_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
finished_time = dbmodels.DateTimeField(null=True, blank=True)
#: If this job was scheduled through the AFE application, this points
#: to the related :class:`autotest.frontend.afe.models.Job` object
afe_job_id = dbmodels.IntegerField(null=True, default=None)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_jobs'
class JobKeyval(dbmodels.Model):
job = dbmodels.ForeignKey(Job)
key = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
class Meta:
db_table = 'tko_job_keyvals'
class Test(dbmodels.Model, model_logic.ModelExtensions,
model_logic.ModelWithAttributes):
test_idx = dbmodels.AutoField(primary_key=True)
job = dbmodels.ForeignKey(Job, db_column='job_idx')
test = dbmodels.CharField(max_length=300)
subdir = dbmodels.CharField(blank=True, max_length=300)
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
status = dbmodels.ForeignKey(Status, db_column='status')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
finished_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
objects = model_logic.ExtendedManager()
def _get_attribute_model_and_args(self, attribute):
return TestAttribute, dict(test=self, attribute=attribute,
user_created=True)
def set_attribute(self, attribute, value):
# ensure non-user-created attributes remain immutable
try:
TestAttribute.objects.get(test=self, attribute=attribute,
user_created=False)
raise ValueError('Attribute %s already exists for test %s and is '
'immutable' % (attribute, self.test_idx))
except TestAttribute.DoesNotExist:
super(Test, self).set_attribute(attribute, value)
class Meta:
db_table = 'tko_tests'
class TestAttribute(dbmodels.Model, model_logic.ModelExtensions):
test = dbmodels.ForeignKey(Test, db_column='test_idx')
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
user_created = dbmodels.BooleanField(default=False)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_attributes'
class IterationAttribute(dbmodels.Model, model_logic.ModelExtensions):
# this isn't really a primary key, but it's necessary to appease Django
# and is harmless as long as we're careful
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_attributes'
class IterationResult(dbmodels.Model, model_logic.ModelExtensions):
# see comment on IterationAttribute regarding primary_key=True
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.FloatField(null=True, blank=True)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_result'
class TestLabel(dbmodels.Model, model_logic.ModelExtensions):
name = dbmodels.CharField(max_length=80, unique=True)
description = dbmodels.TextField(blank=True)
tests = dbmodels.ManyToManyField(Test, blank=True,
db_table='tko_test_labels_tests')
name_field = 'name'
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_labels'
class SavedQuery(dbmodels.Model, model_logic.ModelExtensions):
# TODO: change this to foreign key once DBs are merged
owner = dbmodels.CharField(max_length=80)
name = dbmodels.CharField(max_length=100)
url_token = dbmodels.TextField()
class Meta:
db_table = 'tko_saved_queries'
class EmbeddedGraphingQuery(dbmodels.Model, model_logic.ModelExtensions):
url_token = dbmodels.TextField(null=False, blank=False)
graph_type = dbmodels.CharField(max_length=16, null=False, blank=False)
params = dbmodels.TextField(null=False, blank=False)
last_updated = dbmodels.DateTimeField(null=False, blank=False,
editable=False)
# refresh_time shows the time at which a thread is updating the cached
# image, or NULL if no one is updating the image. This is used so that only
# one thread is updating the cached image at a time (see
# graphing_utils.handle_plot_request)
refresh_time = |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para ecuador tv
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import os
import sys
import urlparse,re
import urllib
import datetime
from core import logger
from core import scrapertools
from core.item import Item
import youtube_channel
__channel__ = "ecuadortv"
DEBUG | = True
YOUTUBE_CHANNEL_ID = "RTVEcuador"
def isGeneric():
return True
def mainlist(item):
| logger.info("tvalacarta.channels.ecuadortv mainlist")
return youtube_channel.playlists(item,YOUTUBE_CHANNEL_ID)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging, os
logging.basicConfig(level=logging.INFO)
from deepy.networks import RecursiveAutoEncoder
from deepy.trainers import SGD | Trainer, LearningRateAnnealer
from util import get_data, VECTOR_SIZE
model_path = os.path.join(os.path.dirname(__file__), "models", "rae1.gz")
if __name__ == '__main__':
model = RecursiveAutoEncoder(input_dim=V | ECTOR_SIZE, rep_dim=10)
trainer = SGDTrainer(model)
annealer = LearningRateAnnealer()
trainer.run(get_data(), epoch_controllers=[annealer])
model.save_params(model_path)
|
def migrate(cr, version):
if not version:
return
# Replace | ids of better_zip by ids of city_zip
cr.execute("""
ALTER TABLE crm_event_compassion
DROP CONSTRAINT crm_event_compassion_zip_id_fkey;
UPDATE crm_event_compassion e
SET zip_id = (
SELE | CT id FROM res_city_zip
WHERE openupgrade_legacy_12_0_better_zip_id = e.zip_id)
""")
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import http_client
from cyborg.api.controllers.v1.deployables import Deployable
from cyborg.tests.unit.api.controllers.v1 import base as v1_test
from cyborg.tests.unit import fake_deployable
from cyborg.agent.rpcapi import AgentAPI
class TestFPGAProgramController(v1_test.APITestV1):
def setUp(self) | :
super(TestFPGAProgramController, self).setUp()
self.headers = self.gen_headers(self.context)
self.deployable_uuids = ['0acbf8d6-e02a-4394-aae3-57557d209498']
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.program_fpga_with_bits | tream')
def test_program(self, mock_program, mock_get_dep):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
fake_dep = fake_deployable.fake_deployable_obj(self.context,
uuid=dep_uuid)
mock_get_dep.return_value = fake_dep
mock_program.return_value = None
body = [{"image_uuid": "9a17439a-85d0-4c53-a3d3-0f68a2eac896"}]
response = self.\
patch_json('/accelerators/deployables/%s/program' % dep_uuid,
[{'path': '/program', 'value': body,
'op': 'replace'}],
headers=self.headers)
self.assertEqual(http_client.OK, response.status_code)
data = response.json_body
self.assertEqual(dep_uuid, data['uuid'])
|
'''
Created on Oct 11, 2014
@author: mshepher
'''
from globals import Globals
class Order(object):
EIGENAAR = 0
DIER = 1
GESLACHT = 2
GECASTREERD = 3
AKTIEF = 4
OVERGANGS = 5
GEWICHT = 6 #numerical
PAKKETKG = 7 #float
SOORT = 8
PUP = 9
RAS = 10
def __init__(self,order):
'''order = line from csv file, unparsed'''
rec = order.strip().split(',')
self.base = rec[:self.RAS+1]
self.owner, self.animal = rec[:self.GESLACHT]
self.weight = float(rec[self.GEWICHT])
self.package = float(rec[self.PAKKETKG])
self.kind = rec[self.SOORT].upper()
self.ras = rec[self.RAS].upper()
rest = rec[self.RAS+1:]
if '|' in rest:
splitter = rest.index('|')
self.donts = [i.upper() for i in rest[:splitter]]
self.prefers = [i.upper() for i in rest[splitter+1:]]
else:
self.donts = [i.upper() for i in rest]
self.prefers = []
self.factor = 1.0
self.result = | None
self.portie = 'beide'
def get_prefers(self):
return self.prefers
| def set_prefers(self, value):
self.prefers = value
def get_base(self):
return ','.join(self.base)
def is_allergic(self,stuff):
'''true if animal is allergic to stuff'''
return stuff in self.donts
def get_donts(self):
return self.donts
def set_donts(self, donts):
self.donts = donts
def get_days(self):
return round(self.package / (self.weight * Globals.FACTOR[self.ras]))
def get_meal_size(self):
return self.weight * self.factor * Globals.FACTOR[self.ras] / 2
def get_weight(self):
return self.weight
def get_package(self):
return self.package
def get_kind(self):
return self.kind
def get_owner(self):
return self.owner
def get_animal(self):
return self.animal
def get_ras(self):
return self.ras
def set_result(self, result):
self.result = result
def get_result(self):
return self.result
def get_factor(self):
return self.factor
def set_factor(self, factor):
self.factor = factor
def get_portie(self):
return self.portie
def set_portie(self, portie):
self.portie = portie |
from djang | o.db import models
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
user = m | odels.ForeignKey(User)
|
import csv
from util.helper import *
import util.plo | t_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f',
help="data file direct | ory",
required=True,
action="store",
dest="file")
parser.add_argument('-o',
help="Output directory",
required=True,
action="store",
dest="dir")
args = parser.parse_args()
to_plot = []
cong = ['reno', 'cubic', 'vegas']
bursts = ['0.03', '0.05', '0.07', '0.09']
graphfiles = []
for burst in bursts:
for tcptype in cong:
data = read_list(args.file + '/' + tcptype + '-' + burst +'-raw_data.txt')
xs = col(0, data)
ys = col(1, data)
plt.plot(xs, ys, label=tcptype)
plt.title('Shrew-attack TCP throughput. Burst = ' + burst)
plt.legend(loc='upper left')
plt.xlabel('seconds')
plt.ylabel("% thoroughput")
plt.grid(True)
plt.savefig("{0}/{1}-result.png".format(args.dir, burst))
plt.close()
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
import unittest
try:
from unittest import mock
except ImportError:
import mock
import virttest
from virttest.utils_zchannels import ChannelPaths, SubchannelPaths
OUT_OK = ["Device Subchan. DevType CU Type Use PIM PAM POM CHPIDs ",
"----------------------------------------------------------------------",
"0.0.0600 0.0.0000 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0601 0.0.0001 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.0602 0.0.0002 1732/01 1731/01 yes 80 80 ff 17000000 00000000",
"0.0.540c 0.0.24ac 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"0.0.540d 0.0.24ad 3390/0c 3990/e9 yes f0 f0 ff 01021112 00000000",
"none 0.0.26aa f0 f0 ff 11122122 00000000",
"none 0.0.26ab f0 f0 ff 11122122 00000000",
"0.0.570c 0.0.27ac 3390/0c 3990/e9 yes f0 f0 ff 12212231 00000000"]
class TestSubchannelPaths(unittest.TestCase):
def test_get_info(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
self.assertEqual(8, len(subchannel_paths.devices))
def test_get_first_unused_and_safely_removable(self):
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(OUT_OK)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26aa", device[1])
def test_get_first_unused_and_safely_removable_not_safe(self):
not_safe = OUT_OK.copy()
not_safe[6] = not_safe[6].replace("01021112", "11122122")
virttest.utils_zchannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIs | None(device)
def test_get_first_unused_and_safely_removable_not_first(self):
not_safe = OUT_OK.copy()
not_safe[7] = not_safe[7].replace("11122122", "01021112")
virttest.utils_zc | hannels.cmd_status_output = mock.Mock(return_value=(0,
"\n".join(not_safe)))
subchannel_paths = SubchannelPaths()
subchannel_paths.get_info()
device = subchannel_paths.get_first_unused_and_safely_removable()
self.assertIsNotNone(device)
self.assertEqual("0.0.26ab", device[1])
class TestChannelPaths(unittest.TestCase):
def test__split(self):
chpids = "12345678"
ids = ChannelPaths._split(chpids)
self.assertEqual(4, len(ids))
self.assertEqual("0.12", ids[0])
self.assertEqual("0.78", ids[3])
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class EclipseIntegrationTest(PantsRunIntegrationTest):
def _eclipse_test(self, specs, project_dir=os.path.join('.pants.d', 'tmp', 'test-eclipse'),
project_name='project'):
"""Helper method that tests eclipse generation on the input spec list."""
if not os.path.exists(project_dir):
os.makedirs(project_dir)
with temporary_dir(root_dir=project_dir) as path:
pants_run = self.run_pants(['goal', 'eclipse',] + specs
+ ['--no-pantsrc', '--eclipse-project-dir={dir}'.format(dir=path)])
self.assertEquals(pants_run.returncode, self.PANTS_SUCCESS_CODE,
"goal eclipse expected success, got {0}\n"
"got stderr:\n{1}\n"
"got stdout:\n{2}\n".format(pants_run.returncode,
pants_run.stderr_dat | a,
pants_run.stdout_data))
expected_files = ('.classpath', '.project',)
workdir = os.path.join(path, project_name)
self.assertTrue(os.path.exists(workdir),
'Failed to find project_dir at {dir}.'.format(dir=workdir))
self.assertTrue(all(os.path.exists(os.path.join(workdir, name))
for name in expected_files))
# return contents of .classpath so we can verify it
| with open(os.path.join(workdir, '.classpath')) as classpath_f:
classpath = classpath_f.read()
# should be at least one input; if not we may have the wrong target path
self.assertIn('<classpathentry kind="src"', classpath)
return classpath
# Test Eclipse generation on example targets; ideally should test that the build "works"
def test_eclipse_on_protobuf(self):
self._eclipse_test(['examples/src/java/com/pants/examples/protobuf::'])
def test_eclipse_on_jaxb(self):
self._eclipse_test(['examples/src/java/com/pants/examples/jaxb/main'])
def test_eclipse_on_unicode(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/unicode::'])
def test_eclipse_on_hello(self):
self._eclipse_test(['examples/src/java/com/pants/examples/hello::'])
def test_eclipse_on_annotations(self):
self._eclipse_test(['examples/src/java/com/pants/examples/annotation::'])
def test_eclipse_on_all_examples(self):
self._eclipse_test(['examples/src/java/com/pants/examples::'])
def test_eclipse_on_java_sources(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/javasources::'])
self.assertIn('path="testprojects.src.java"', classpath)
def test_eclipse_on_thriftdeptest(self):
self._eclipse_test(['testprojects/src/java/com/pants/testproject/thriftdeptest::'])
def test_eclipse_on_scaladepsonboth(self):
classpath = self._eclipse_test(['testprojects/src/scala/com/pants/testproject/scaladepsonboth::'])
# Previously Java dependencies didn't get included
self.assertIn('path="testprojects.src.java"', classpath)
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
h1_start = re.compile(r"^\s*=(?P<title>[^=]+)=*[ \t]*")
valid_title = re.compile(r"[^=]+")
general_heading = re.compile(r"^\s*(={2,6}(?P<title>" + valid_title.pattern +
")=*)\s*$", flags=re.MULTILINE)
invalid_symbols = re | .compile(r"[^\w\-_\s]+")
def strip_accents(s):
return ''.join(
(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(
c) != 'Mn'))
REPLACEMENTS = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'Ae',
ord('Ö'): 'Oe',
ord('Ü'): 'Ue',
ord('ẞ'): 'SS'
}
def substitute_umlauts(s):
return s.translate(REPLACEMENTS)
def remove | _unallowed_chars(s):
s = invalid_symbols.sub('', s)
return s
def remove_and_compress_whitespaces(s):
return '_'.join(s.split()).strip('_')
def turn_into_valid_short_title(title, short_title_set=(), max_length=20):
st = substitute_umlauts(title)
st = strip_accents(st)
st = remove_unallowed_chars(st)
st = remove_and_compress_whitespaces(st)
st = st.lstrip('1234567890-_')
st = st[:min(len(st), max_length)]
if not st:
st = 'sub'
if st not in short_title_set:
return st
else:
i = 0
while True:
i += 1
suffix = str(i)
new_st = st[:min(max_length - len(suffix), len(st))] + suffix
if new_st not in short_title_set:
return new_st
def get_heading_matcher(level=0):
if 0 < level < 7:
s = "%d" % level
elif level == 0:
s = "1, 6"
else:
raise ValueError(
"level must be between 1 and 6 or 0, but was %d." % level)
pattern = r"^\s*={%s}(?P<title>[^=§]+)" \
r"(?:§\s*(?P<short_title>[^=§\s][^=§]*))?=*\s*$"
return re.compile(pattern % s, flags=re.MULTILINE) |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
class GIF_IMAGE(Structure):
type_name = 'GIF_IMAGE';
def __init__(self, stream, offset, max_size, parent, name):
import C;
from GIF_BLOCK import GIF_BLOCK;
from GIF_COLORTABLE import GIF_COLORTABLE;
from GIF_IMAGE_DESCRIPTOR import GIF_IMAGE_DESCRIPTOR;
from LZW_compressed_data import LZW_compressed_data;
Structure.__init__(self, stream, offset, max_size, parent, name);
self._descriptor = self.Member(GIF_IMAGE_DESCRIPTOR, 'descriptor');
flags = self._descriptor._Flags;
self._has_local_color_table = flags._LocalColorTable.value == 1;
if self._has_local_color_table:
self._local_color_table_entries = \
2 ** (flags._SizeLocalColorTable.value + 1);
self._local_color_table_sorted = flags._Sort.value == 1;
self._local_color_table = self.Member(GIF_COLORTABLE, \
'local_color_table', self._local_color_table_entries, \
self._local_color_table_sorted);
else:
self._local_color_table = None;
self._lzw_minimum_code_size = self.Member(C.BYTE, 'LZW_minimum_code_size');
if self._lzw_minimum_code_size.value == 0:
self._lzw_minimum_code_size.warnings.append('expected | value > 0');
self._compressed_pixel_data_container = self.Member(GIF_BLOCK, 'pixel_data');
self._pixel_data_container = \
self._compressed_pixel_data_container.ContainMember( \
LZW_compressed_data, 'pixel_data', \
self._lzw_minimum_code_size.value);
self._pixel_data = self._pixel_data_container.ContainMember( \
C. | STRING, 'pixel_data', \
self._descriptor._Width.value * self._descriptor._Height.value);
|
import psycopg2
from db.enums import *
base = psycopg2.connect("dbname='cardkeepersample' user='andrew' host='localhost' password='1234'")
cursor = base.cursor()
# Wrapped queries in alphabetic order
def active_packs(user_id, start=0, count=10):
query = """SELECT packs.pack_id, packs.name FROM user_packs, packs WHERE packs.pack_id = user_packs.pack_id
AND user_packs.status = %s AND user_id = %s ORDER BY pack_id
OFFSET %s LIMIT %s;"""
cursor.execute(query, (CardStatusType.ACTIVE.value, user_id, start, count))
return cursor.fetchall()
def add_pack(user_id, pack_id):
query = """INSERT INTO user_packs (pack_id, user_id, status) VALUES (%s, %s, 'Active');"""
cursor.execute(query, (pack_id, user_id))
query = """SELECT card_id FROM cards WHERE cards.pack_id = %s"""
cursor.execute(query, (pack_id,))
cards = cursor.fetchall()
for i in cards:
query = """INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status) VALUES (%s, %s, 0, 0, 'Active');"""
cursor.execute(query, (user_id, i[0]))
base.commit()
def add_user(user):
query = """INSERT INTO users (user_id, name, general_goal, weekly_goal, notifications_learn, notifications_stats, joined)
VALUES (%s, %s, %s, %s, %s, %s, current_date);"""
cursor.execute(query, tuple(user))
base.commit()
def available_packs(user_id):
query = """SELECT packs.pack_id, packs.name FROM packs
WHERE packs.privacy = 'public' LIMIT 105;"""
cursor.execute(query)
return cursor.fetchall()
def available_groups(user_id, rights=RightsType.USER, include_higher=False):
query = """SELECT groups.group_id, groups.name FROM groups, user_groups
WHERE groups.group_id = user_groups.group_id
AND user_groups.user_id = %s
AND user_groups.rights """ + ("<" if include_higher else "") + "= %s;"""
cursor.execute(query, (user_id, rights))
return cursor.fetchall()
def delete_pack(pack_id):
owner_id = get_pack(pack_id)['owner_id']
cursor.execute('''
DELETE FROM user_cards
USING cards
WHERE
user_cards.card_id = cards.card_id AND
cards.pack_id = %s;
''', (pack_id,))
cursor.execute(
'DELETE FROM cards WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM user_packs WHERE pack_id = %s;',
(pack_id,)
)
cursor.execute(
'DELETE FROM packs WHERE pack_id = %s;',
(pack_id,)
)
base.commit()
def get_all_cards_in_pack(pack_id):
cursor.execute('''
SELECT card_id, front, back, comment, type
FROM cards
WHERE pack_id = %s;
''', (pack_id,))
return [{'card_id': card_id, 'front': front, 'back': back,
'comment': comment, 'type': tp}
for card_id, front, back, comment, tp
in cursor.fetchall()]
def get_pack(pack_id, user_id=None):
cursor.execute(
'SELECT name, owner_id, privacy FROM packs WHERE pack_id = %s;',
(pack_id,)
)
name, owner_id, privacy = cursor.fetchone()
status = None
if user_id is not None:
cursor.execute('''
SELECT status FROM user_packs
WHERE user_id = %s AND pack_id = %s;
''', (user_id, pack_id))
status = cursor.fetchone()[0]
return {
'pack_id': pack_id,
'name': name,
'owner_id': owner_id,
'privacy': privacy,
'status': status
}
def if_added(user_id, pack_id):
query = "SELECT * FROM user_packs WHERE user_id = %s AND pack_id = %s;"
cursor.execute(query, (user_id, pack_id))
return list(cursor.fetchall())
# TODO: Take permissions lists into account
def has_pack_read_access(pack_id, user_id):
pack_info = get_pack(pack_id)
return user_id == pack_info['owner_id'] or pack_info['privacy'] == 'public'
def if_registered(user_id):
query = "SELECT * FROM users WHERE users.user_id = %s;"
cursor.execute(query, (user_id,))
return True if len(cursor.fetchall()) else False
def cards_for_learning(user_id):
query = """SELECT cards.front, cards.back, cards.comment FROM user_cards, cards
WHERE user_cards.card_id = cards.card_id AND
user_id = %s AND cards.type = %s"""
cursor.execute(query, (user_id, CardType.SHORT))
return cursor.fetchall()
def new_card(front, back):
query = "INSERT INTO cards (front, back) VALUES (%s, %s);"
cursor.execute(query, (front, back))
base.commit()
def new_group(name, owner, privacy="public"):
query = "INSERT INTO groups (name, privacy, owner_id) VALUES (%s, %s, %s);"
cursor.execute(query, (name, privacy, owner))
base.commit()
def new_pack(name, owner, privacy=PrivacyType.PUBLIC, status=CardStatusType.ACTIVE, cards=[]):
if isinstance(privacy, PrivacyType):
privacy = privacy.value
if isinstance(status, CardStatusType):
status = status.value
query = "INSERT INTO packs (name, owner_id, privacy) VALUES (%s, %s, %s);"
cursor.execute(query, (name, owner, privacy))
query = "SELECT pack_id FROM packs WHERE name = %s AND owner_id = %s;"
cursor.execute(query, (name, owner))
pack_id = cursor.fetchone()[0]
query = "INSERT INTO user_packs (user_id, pack_id, status) VALUES (%s, %s, %s);"
cursor.execute(query, (owner, pack_id, status))
insert_query = "INSERT INTO cards (pack_id, front, back, comment, type) VALUES (%s, %s, %s, %s, %s) RETURNING card_id;"
insert2_query = "INSERT INTO user_cards (user_id, card_id, times_reviewed, correct_answers, status)" \
"VALUES (%s, %s, 0, 0, 'Active');"
for card in cards:
front = card['front']
back = card['back']
comment = card['comment']
cursor.execute(insert_query, (pack_id, front, back, comment, CardType.SHORT.value))
card_id = cursor.fetchone()[0]
cursor.execute(insert2_query, (owner, card_id))
base.commit()
return pack_id
def select_cards(user_id, pack_id):
print(user_id, pack_id)
query = """SELECT cards.card_id, cards.front, cards.back, cards.comment
FROM cards, user_cards
WHERE cards.card_id = user_cards.card_id
AND user_cards.status = %s
AND cards.pack_id = %s
AND user_cards.user_id = %s"""
cursor.execute(query, (CardStatusType.ACTIVE.value, pack_id, use | r_id))
return cursor.fetchall()
def update_card_data(user_id, card_id, answer):
query = """UPDATE user_cards SET times_reviewed = times_reviewed+1, correct_answers = correct_answers+%s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (answer, user_id, card_id))
base.commit()
def update_card_status( | user_id, card_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, card_id))
base.commit()
def update_pack_name(pack_id, new_name):
query = 'UPDATE packs SET name = %s WHERE pack_id = %s;'
cursor.execute(query, (new_name, pack_id))
base.commit()
def update_pack_privacy(pack_id, new_privacy):
if isinstance(new_privacy, PrivacyType):
new_privacy = new_privacy.value
query = 'UPDATE packs SET privacy = %s WHERE pack_id = %s;'
cursor.execute(query, (new_privacy, pack_id))
base.commit()
def update_pack_status(user_id, pack_id, status):
query = """UPDATE user_cards SET status = %s
WHERE user_id = %s AND card_id = %s"""
cursor.execute(query, (status, user_id, pack_id))
base.commit()
|
'max_length': '100'}),
'packagesum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'})
},
'deploy.packagecondition': {
'Meta': {'object_name': 'packagecondition'},
'depends': ('django.db.models.fields.CharField', [], {'default': "'installed'", 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'softwarename': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'softwareversion': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'deploy.packageprofile': {
'Meta': {'object_name': 'packageprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"})
},
'deploy.timeprofile': {
'Meta': {'object_name': 'timeprofile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'start_time': ('django.db.models.fields.TimeField', [], {})
},
'inventory.entity': {
'Meta': {'object_name': 'entity'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'force_packageprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'force_timeprofile': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'old_packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_packageprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.packageprofile']"}),
'old_timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_timeprofile'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['deploy.timeprofile']"}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'parent': ('django.db.models.fields.relat | ed.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['inventory.entity']"}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'inventory.machine': {
'Meta': {'object_name': 'machine'},
'domain' | : ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.entity']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'lastsave': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'netsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'ossum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'packageprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.packageprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deploy.package']", 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'softsum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timeprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deploy.timeprofile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'typemachine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.typemachine']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'inventory.net': {
'Meta': {'object_name': 'net'},
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machine']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'mask': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'inventory.osdistribution': {
'Meta': {'object_name': 'osdistribution'},
'arch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machine']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manualy_created': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'systemdrive': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'undefined'", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'inventory.software': {
'Meta': {'object_name': 'software'},
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.machi |
from django.contrib import admin
from .models import BackgroundImages, Widget
class WidgetAdmin(admin.ModelAdmin):
list_display = ( | 'name', 'link', 'is_featured')
ordering = ('-id',)
class BackgroundAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at')
ordering = ('-id',)
admin.site.register(Widget, WidgetAd | min)
admin.site.register(BackgroundImages, BackgroundAdmin)
|
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file exce | pt in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing p | ermissions and
# limitations under the License.
"""
Commandline arguments related to error handling
"""
from __future__ import print_function, division, absolute_import
def add_error_args(arg_parser):
error_group = arg_parser.add_argument_group(
title="Errors",
description="Options for error handling")
error_group.add_argument(
"--skip-variant-errors",
default=False,
action="store_true",
help="Skip variants which cause runtime errors of any kind")
return error_group
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.