hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f703453f1a75155cf6aefd9737dfceaf1d6cbd6a
| 13,173
|
py
|
Python
|
meerk40t/device/moshi/moshiblob.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
meerk40t/device/moshi/moshiblob.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
meerk40t/device/moshi/moshiblob.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
swizzle_table = [
[
b"\x00",
b"\x01",
b"\x40",
b"\x03",
b"\x10",
b"\x21",
b"\x50",
b"\x23",
b"\x04",
b"\x09",
b"\x44",
b"\x0b",
b"\x14",
b"\x29",
b"\x54",
b"\x2b",
],
[
b"\x08",
b"\x11",
b"\x48",
b"\x13",
b"\x18",
b"\x31",
b"\x58",
b"\x33",
b"\x0c",
b"\x19",
b"\x4c",
b"\x1b",
b"\x1c",
b"\x39",
b"\x5c",
b"\x3b",
],
[
b"\x80",
b"\x05",
b"\xc0",
b"\x07",
b"\x90",
b"\x25",
b"\xd0",
b"\x27",
b"\x84",
b"\x0d",
b"\xc4",
b"\x0f",
b"\x94",
b"\x2d",
b"\xd4",
b"\x2f",
],
[
b"\x88",
b"\x15",
b"\xc8",
b"\x17",
b"\x98",
b"\x35",
b"\xd8",
b"\x37",
b"\x8c",
b"\x1d",
b"\xcc",
b"\x1f",
b"\x9c",
b"\x3d",
b"\xdc",
b"\x3f",
],
[
b"\x02",
b"\x41",
b"\x42",
b"\x43",
b"\x12",
b"\x61",
b"\x52",
b"\x63",
b"\x06",
b"\x49",
b"\x46",
b"\x4b",
b"\x16",
b"\x69",
b"\x56",
b"\x6b",
],
[
b"\x0a",
b"\x51",
b"\x4a",
b"\x53",
b"\x1a",
b"\x71",
b"\x5a",
b"\x73",
b"\x0e",
b"\x59",
b"\x4e",
b"\x5b",
b"\x1e",
b"\x79",
b"\x5e",
b"\x7b",
],
[
b"\x82",
b"\x45",
b"\xc2",
b"\x47",
b"\x92",
b"\x65",
b"\xd2",
b"\x67",
b"\x86",
b"\x4d",
b"\xc6",
b"\x4f",
b"\x96",
b"\x6d",
b"\xd6",
b"\x6f",
],
[
b"\x8a",
b"\x55",
b"\xca",
b"\x57",
b"\x9a",
b"\x75",
b"\xda",
b"\x77",
b"\x8e",
b"\x5d",
b"\xce",
b"\x5f",
b"\x9e",
b"\x7d",
b"\xde",
b"\x7f",
],
[
b"\x20",
b"\x81",
b"\x60",
b"\x83",
b"\x30",
b"\xa1",
b"\x70",
b"\xa3",
b"\x24",
b"\x89",
b"\x64",
b"\x8b",
b"\x34",
b"\xa9",
b"\x74",
b"\xab",
],
[
b"\x28",
b"\x91",
b"\x68",
b"\x93",
b"\x38",
b"\xb1",
b"\x78",
b"\xb3",
b"\x2c",
b"\x99",
b"\x6c",
b"\x9b",
b"\x3c",
b"\xb9",
b"\x7c",
b"\xbb",
],
[
b"\xa0",
b"\x85",
b"\xe0",
b"\x87",
b"\xb0",
b"\xa5",
b"\xf0",
b"\xa7",
b"\xa4",
b"\x8d",
b"\xe4",
b"\x8f",
b"\xb4",
b"\xad",
b"\xf4",
b"\xaf",
],
[
b"\xa8",
b"\x95",
b"\xe8",
b"\x97",
b"\xb8",
b"\xb5",
b"\xf8",
b"\xb7",
b"\xac",
b"\x9d",
b"\xec",
b"\x9f",
b"\xbc",
b"\xbd",
b"\xfc",
b"\xbf",
],
[
b"\x22",
b"\xc1",
b"\x62",
b"\xc3",
b"\x32",
b"\xe1",
b"\x72",
b"\xe3",
b"\x26",
b"\xc9",
b"\x66",
b"\xcb",
b"\x36",
b"\xe9",
b"\x76",
b"\xeb",
],
[
b"\x2a",
b"\xd1",
b"\x6a",
b"\xd3",
b"\x3a",
b"\xf1",
b"\x7a",
b"\xf3",
b"\x2e",
b"\xd9",
b"\x6e",
b"\xdb",
b"\x3e",
b"\xf9",
b"\x7e",
b"\xfb",
],
[
b"\xa2",
b"\xc5",
b"\xe2",
b"\xc7",
b"\xb2",
b"\xe5",
b"\xf2",
b"\xe7",
b"\xa6",
b"\xcd",
b"\xe6",
b"\xcf",
b"\xb6",
b"\xed",
b"\xf6",
b"\xef",
],
[
b"\xaa",
b"\xd5",
b"\xea",
b"\xd7",
b"\xba",
b"\xf5",
b"\xfa",
b"\xf7",
b"\xae",
b"\xdd",
b"\xee",
b"\xdf",
b"\xbe",
b"\xfd",
b"\xfe",
b"\xff",
],
]
MOSHI_SET_OFFSET = 0
MOSHI_TERMINATION = 2
MOSHI_VECTOR_SPEED = 5
MOSHI_RASTER_SPEED = 4
MOSHI_CUT_ABS = 15
MOSHI_CUT_HORIZ = 14
MOSHI_CUT_VERT = 11
MOSHI_MOVE_ABS = 7
MOSHI_MOVE_HORIZ = 6
MOSHI_MOVE_VERT = 3
MOSHI_FREEMOTOR = 1
MOSHI_ESTOP = 1
MOSHI_EPILOGUE = 2
MOSHI_PROLOGUE = 6
# 6 also seen at laser startup.
MOSHI_LASER = 7
MOSHI_READ = 14
# 14 is also sometimes done as a keepalive each 3.4 seconds.
class MoshiBlob:
"""
MoshiBlobs are datablobs of Moshi types. These are series of commands which should be executed as a program within
the Moshicontroller.
"""
def __init__(self, channel=None):
self.data = bytearray() # Queued additional commands programs.
self.channel = channel
self.last_x = 0
self.last_y = 0
self.offset_x = 0
self.offset_y = 0
self._stage = 0
def __len__(self):
return len(self.data)
def pipe_int8(self, value):
"""
Write an 8 bit into to the current program.
"""
v = bytes(
bytearray(
[
value & 0xFF,
]
)
)
self.write(v)
def pipe_int16le(self, value):
"""
Write a 16 bit little-endian value to the current program.
"""
v = bytes(
bytearray(
[
(value >> 0) & 0xFF,
(value >> 8) & 0xFF,
]
)
)
self.write(v)
def write(self, bytes_to_write):
"""
Writes data to the queue, this will be moved into the buffer by the thread in a threadsafe manner.
:param bytes_to_write: data to write to the queue.
:return:
"""
self.data += bytes_to_write
return self
def vector_speed(self, speed_mms, normal_speed_mms):
"""
Vector Speed Byte. (0x00 position), followed by 2 int8 values.
Jog and Normal Speed. These values are limited to integer values which
are 1 to 256.
:return:
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel(
"Vector Cut Speed: %d mm/s Normal Speed: %d mm/s"
% (int(speed_mms), int(normal_speed_mms))
)
self.write(swizzle_table[MOSHI_VECTOR_SPEED][0])
if speed_mms > 256:
speed_mms = 256
if speed_mms < 1:
speed_mms = 1
self.pipe_int8(speed_mms - 1)
self.pipe_int8(normal_speed_mms - 1)
def raster_speed(self, speed_mms):
"""
Write speed for raster programs.
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel("Raster Header Speed: %d cm/s" % int(speed_mms))
self.write(swizzle_table[MOSHI_RASTER_SPEED][0])
speed_cms = int(round(speed_mms / 10))
if speed_cms == 0:
speed_cms = 1
self.pipe_int8(speed_cms - 1)
def set_offset(self, z, x, y):
"""
2nd Command For Jump. (0x03 position), followed by 3 int16le (2)
:return:
"""
assert self._stage == 1
self._stage = 2
self.offset_x = x
self.offset_y = y
if self.channel:
self.channel("Set Location z: %d, x: %d, y: %d" % (int(z), int(x), int(y)))
self.write(swizzle_table[MOSHI_SET_OFFSET][0])
self.pipe_int16le(z) # Unknown, always zero.
self.pipe_int16le(x) # x
self.pipe_int16le(y) # y
def termination(self):
"""
Terminal Commands for Jump/Program. (last 7 bytes). (4)
:return:
"""
# assert self._stage == 3
self._stage = 4
if self.channel:
self.channel("Termination.")
for i in range(7):
self.write(swizzle_table[MOSHI_TERMINATION][0])
def cut_abs(self, x, y):
"""
Write an absolute position cut value.
Laser will cut to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Cut x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_CUT_ABS][1])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_abs(self, x, y):
"""
Write an absolute position move value.
Laser will move without cutting to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Move x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_MOVE_ABS][0])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_vertical_abs(self, y):
"""
Write an absolute position vertical move.
Laser will move the y position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Move Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_MOVE_VERT][0])
self.pipe_int16le(int(y))
def move_horizontal_abs(self, x):
"""
Write an absolute position horizontal move.
Laser will move the x position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Move Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_MOVE_HORIZ][0])
self.pipe_int16le(int(x))
def cut_horizontal_abs(self, x):
"""
Write an absolute position horizontal cut.
Laser will cut to the x position with laser firing to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Cut Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_CUT_HORIZ][0])
self.pipe_int16le(int(x))
def cut_vertical_abs(self, y):
"""
Write an absolute position vertical cut.
Laser will cut to the y position with laser firing to the new position from the head position
stored in the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Cut Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_CUT_VERT][0])
self.pipe_int16le(int(y))
@staticmethod
def _swizzle(b, p7, p6, p5, p4, p3, p2, p1, p0):
return (
((b >> 0) & 1) << p0
| ((b >> 1) & 1) << p1
| ((b >> 2) & 1) << p2
| ((b >> 3) & 1) << p3
| ((b >> 4) & 1) << p4
| ((b >> 5) & 1) << p5
| ((b >> 6) & 1) << p6
| ((b >> 7) & 1) << p7
)
@staticmethod
def convert(q):
"""
Translated Moshiboard swizzle into correct Moshi command code.
Moshiboards command codes have 16 values with 16 different swizzled values. There are
two different swizzles depending on the parity of the particular code. These codes are used
randomly by Moshi's native software. The board itself reads these all the same.
"""
if q & 1:
return MoshiBlob._swizzle(q, 7, 6, 2, 4, 3, 5, 1, 0)
else:
return MoshiBlob._swizzle(q, 5, 1, 7, 2, 4, 3, 6, 0)
@staticmethod
def reconvert(q):
"""
Counter-translate a particular command code back into correct values.
"""
for m in range(5):
q = MoshiBlob.convert(q)
return q
| 22.556507
| 118
| 0.43407
|
swizzle_table = [
[
b"\x00",
b"\x01",
b"\x40",
b"\x03",
b"\x10",
b"\x21",
b"\x50",
b"\x23",
b"\x04",
b"\x09",
b"\x44",
b"\x0b",
b"\x14",
b"\x29",
b"\x54",
b"\x2b",
],
[
b"\x08",
b"\x11",
b"\x48",
b"\x13",
b"\x18",
b"\x31",
b"\x58",
b"\x33",
b"\x0c",
b"\x19",
b"\x4c",
b"\x1b",
b"\x1c",
b"\x39",
b"\x5c",
b"\x3b",
],
[
b"\x80",
b"\x05",
b"\xc0",
b"\x07",
b"\x90",
b"\x25",
b"\xd0",
b"\x27",
b"\x84",
b"\x0d",
b"\xc4",
b"\x0f",
b"\x94",
b"\x2d",
b"\xd4",
b"\x2f",
],
[
b"\x88",
b"\x15",
b"\xc8",
b"\x17",
b"\x98",
b"\x35",
b"\xd8",
b"\x37",
b"\x8c",
b"\x1d",
b"\xcc",
b"\x1f",
b"\x9c",
b"\x3d",
b"\xdc",
b"\x3f",
],
[
b"\x02",
b"\x41",
b"\x42",
b"\x43",
b"\x12",
b"\x61",
b"\x52",
b"\x63",
b"\x06",
b"\x49",
b"\x46",
b"\x4b",
b"\x16",
b"\x69",
b"\x56",
b"\x6b",
],
[
b"\x0a",
b"\x51",
b"\x4a",
b"\x53",
b"\x1a",
b"\x71",
b"\x5a",
b"\x73",
b"\x0e",
b"\x59",
b"\x4e",
b"\x5b",
b"\x1e",
b"\x79",
b"\x5e",
b"\x7b",
],
[
b"\x82",
b"\x45",
b"\xc2",
b"\x47",
b"\x92",
b"\x65",
b"\xd2",
b"\x67",
b"\x86",
b"\x4d",
b"\xc6",
b"\x4f",
b"\x96",
b"\x6d",
b"\xd6",
b"\x6f",
],
[
b"\x8a",
b"\x55",
b"\xca",
b"\x57",
b"\x9a",
b"\x75",
b"\xda",
b"\x77",
b"\x8e",
b"\x5d",
b"\xce",
b"\x5f",
b"\x9e",
b"\x7d",
b"\xde",
b"\x7f",
],
[
b"\x20",
b"\x81",
b"\x60",
b"\x83",
b"\x30",
b"\xa1",
b"\x70",
b"\xa3",
b"\x24",
b"\x89",
b"\x64",
b"\x8b",
b"\x34",
b"\xa9",
b"\x74",
b"\xab",
],
[
b"\x28",
b"\x91",
b"\x68",
b"\x93",
b"\x38",
b"\xb1",
b"\x78",
b"\xb3",
b"\x2c",
b"\x99",
b"\x6c",
b"\x9b",
b"\x3c",
b"\xb9",
b"\x7c",
b"\xbb",
],
[
b"\xa0",
b"\x85",
b"\xe0",
b"\x87",
b"\xb0",
b"\xa5",
b"\xf0",
b"\xa7",
b"\xa4",
b"\x8d",
b"\xe4",
b"\x8f",
b"\xb4",
b"\xad",
b"\xf4",
b"\xaf",
],
[
b"\xa8",
b"\x95",
b"\xe8",
b"\x97",
b"\xb8",
b"\xb5",
b"\xf8",
b"\xb7",
b"\xac",
b"\x9d",
b"\xec",
b"\x9f",
b"\xbc",
b"\xbd",
b"\xfc",
b"\xbf",
],
[
b"\x22",
b"\xc1",
b"\x62",
b"\xc3",
b"\x32",
b"\xe1",
b"\x72",
b"\xe3",
b"\x26",
b"\xc9",
b"\x66",
b"\xcb",
b"\x36",
b"\xe9",
b"\x76",
b"\xeb",
],
[
b"\x2a",
b"\xd1",
b"\x6a",
b"\xd3",
b"\x3a",
b"\xf1",
b"\x7a",
b"\xf3",
b"\x2e",
b"\xd9",
b"\x6e",
b"\xdb",
b"\x3e",
b"\xf9",
b"\x7e",
b"\xfb",
],
[
b"\xa2",
b"\xc5",
b"\xe2",
b"\xc7",
b"\xb2",
b"\xe5",
b"\xf2",
b"\xe7",
b"\xa6",
b"\xcd",
b"\xe6",
b"\xcf",
b"\xb6",
b"\xed",
b"\xf6",
b"\xef",
],
[
b"\xaa",
b"\xd5",
b"\xea",
b"\xd7",
b"\xba",
b"\xf5",
b"\xfa",
b"\xf7",
b"\xae",
b"\xdd",
b"\xee",
b"\xdf",
b"\xbe",
b"\xfd",
b"\xfe",
b"\xff",
],
]
MOSHI_SET_OFFSET = 0
MOSHI_TERMINATION = 2
MOSHI_VECTOR_SPEED = 5
MOSHI_RASTER_SPEED = 4
MOSHI_CUT_ABS = 15
MOSHI_CUT_HORIZ = 14
MOSHI_CUT_VERT = 11
MOSHI_MOVE_ABS = 7
MOSHI_MOVE_HORIZ = 6
MOSHI_MOVE_VERT = 3
MOSHI_FREEMOTOR = 1
MOSHI_ESTOP = 1
MOSHI_EPILOGUE = 2
MOSHI_PROLOGUE = 6
MOSHI_LASER = 7
MOSHI_READ = 14
class MoshiBlob:
def __init__(self, channel=None):
self.data = bytearray() self.channel = channel
self.last_x = 0
self.last_y = 0
self.offset_x = 0
self.offset_y = 0
self._stage = 0
def __len__(self):
return len(self.data)
def pipe_int8(self, value):
v = bytes(
bytearray(
[
value & 0xFF,
]
)
)
self.write(v)
def pipe_int16le(self, value):
v = bytes(
bytearray(
[
(value >> 0) & 0xFF,
(value >> 8) & 0xFF,
]
)
)
self.write(v)
def write(self, bytes_to_write):
self.data += bytes_to_write
return self
def vector_speed(self, speed_mms, normal_speed_mms):
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel(
"Vector Cut Speed: %d mm/s Normal Speed: %d mm/s"
% (int(speed_mms), int(normal_speed_mms))
)
self.write(swizzle_table[MOSHI_VECTOR_SPEED][0])
if speed_mms > 256:
speed_mms = 256
if speed_mms < 1:
speed_mms = 1
self.pipe_int8(speed_mms - 1)
self.pipe_int8(normal_speed_mms - 1)
def raster_speed(self, speed_mms):
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel("Raster Header Speed: %d cm/s" % int(speed_mms))
self.write(swizzle_table[MOSHI_RASTER_SPEED][0])
speed_cms = int(round(speed_mms / 10))
if speed_cms == 0:
speed_cms = 1
self.pipe_int8(speed_cms - 1)
def set_offset(self, z, x, y):
assert self._stage == 1
self._stage = 2
self.offset_x = x
self.offset_y = y
if self.channel:
self.channel("Set Location z: %d, x: %d, y: %d" % (int(z), int(x), int(y)))
self.write(swizzle_table[MOSHI_SET_OFFSET][0])
self.pipe_int16le(z) self.pipe_int16le(x) self.pipe_int16le(y)
def termination(self):
self._stage = 4
if self.channel:
self.channel("Termination.")
for i in range(7):
self.write(swizzle_table[MOSHI_TERMINATION][0])
def cut_abs(self, x, y):
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Cut x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_CUT_ABS][1])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_abs(self, x, y):
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Move x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_MOVE_ABS][0])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_vertical_abs(self, y):
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Move Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_MOVE_VERT][0])
self.pipe_int16le(int(y))
def move_horizontal_abs(self, x):
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Move Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_MOVE_HORIZ][0])
self.pipe_int16le(int(x))
def cut_horizontal_abs(self, x):
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Cut Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_CUT_HORIZ][0])
self.pipe_int16le(int(x))
def cut_vertical_abs(self, y):
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Cut Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_CUT_VERT][0])
self.pipe_int16le(int(y))
@staticmethod
def _swizzle(b, p7, p6, p5, p4, p3, p2, p1, p0):
return (
((b >> 0) & 1) << p0
| ((b >> 1) & 1) << p1
| ((b >> 2) & 1) << p2
| ((b >> 3) & 1) << p3
| ((b >> 4) & 1) << p4
| ((b >> 5) & 1) << p5
| ((b >> 6) & 1) << p6
| ((b >> 7) & 1) << p7
)
@staticmethod
def convert(q):
if q & 1:
return MoshiBlob._swizzle(q, 7, 6, 2, 4, 3, 5, 1, 0)
else:
return MoshiBlob._swizzle(q, 5, 1, 7, 2, 4, 3, 6, 0)
@staticmethod
def reconvert(q):
for m in range(5):
q = MoshiBlob.convert(q)
return q
| true
| true
|
f7034553785fcf61079064947729e6c97bd2edd5
| 18,406
|
py
|
Python
|
backend/projects/views.py
|
MParvin/falco
|
18665a89829bb4c9b5e28b98499cca378301ccf6
|
[
"MIT"
] | 796
|
2019-10-19T19:58:12.000Z
|
2022-03-22T14:02:37.000Z
|
backend/projects/views.py
|
MParvin/falco
|
18665a89829bb4c9b5e28b98499cca378301ccf6
|
[
"MIT"
] | 224
|
2019-10-19T17:45:12.000Z
|
2022-03-24T20:46:29.000Z
|
backend/projects/views.py
|
MParvin/falco
|
18665a89829bb4c9b5e28b98499cca378301ccf6
|
[
"MIT"
] | 33
|
2019-10-22T21:17:09.000Z
|
2021-12-23T06:08:26.000Z
|
from core.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import JSONParser
from requests.exceptions import ConnectionError
from projects.models import (
Page,
Project,
ProjectMemberRole,
ProjectAuditParameters,
AvailableAuditParameters,
Script,
)
from projects.serializers import (
PageSerializer,
ProjectSerializer,
ProjectMemberRoleSerializer,
ProjectAuditParametersSerializer,
AvailableAuditParameterSerializer,
ScriptSerializer,
)
from projects.permissions import (
check_if_member_of_project,
check_if_admin_of_project,
is_admin_of_project,
)
from audits.tasks import get_wpt_audit_configurations
def get_user_projects(user_id):
return Project.objects.filter(members__id=user_id, is_active=True)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all the user’s projects", ProjectSerializer(many=True)
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectSerializer,
responses={201: openapi.Response("Returns the created project", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_list(request):
if request.method == "GET":
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects, many=True, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
data = JSONParser().parse(request)
serializer = ProjectSerializer(data=data, context={"user_id": request.user.id})
if serializer.is_valid():
project = Project.objects.create(**serializer.validated_data)
project.save()
return JsonResponse(
{"uuid": project.uuid, **serializer.data},
status=status.HTTP_201_CREATED,
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def first_project(request):
"""Returns the first project of the user.
This is used to speed up the loading of the first project page"""
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects.first(), context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response("Returns details of a project.", ProjectSerializer)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectSerializer,
responses={
200: openapi.Response(
"Updates a project. Allows for partial updates.", ProjectSerializer
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Projects"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_detail(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
if is_admin_of_project(request.user.id, project.uuid):
serializer = ProjectSerializer(
project, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
serializer = ProjectSerializer(
project,
fields=(
"uuid",
"name",
"project_members",
"pages",
"scripts",
"audit_parameters_list",
"screenshot_url",
"latest_audit_at",
"has_siblings",
),
context={"user_id": request.user.id},
)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectSerializer(
project, data=data, partial=True, context={"user_id": request.user.id}
)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
project.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all pages in the project", PageSerializer(many=True)
)
},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["post"],
request_body=PageSerializer,
responses={201: openapi.Response("Returns the created page", PageSerializer)},
tags=["Pages"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_page_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
pages = project.pages.all()
serializer = PageSerializer(pages, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(data=data)
if serializer.is_valid():
page = Page.objects.create(project=project, **serializer.validated_data)
page.save()
return JsonResponse(
{"uuid": page.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("Returns details of a page.", PageSerializer)},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["put"],
request_body=PageSerializer,
responses={
200: openapi.Response(
"Updates a page. Allows for partial updates.", PageSerializer
)
},
tags=["Pages"],
)
@swagger_auto_schema(methods=["delete"], responses={204: "No content"}, tags=["Pages"])
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_page_detail(request, project_uuid, page_uuid):
project = get_object_or_404(Project, pk=project_uuid)
page = get_object_or_404(Page, pk=page_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if page.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = PageSerializer(page)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(page, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
page.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectAuditParametersSerializer,
responses={
201: openapi.Response(
"Returns the created project audit parameter",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameter_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(data=data)
if serializer.is_valid():
audit_parameter = ProjectAuditParameters.objects.create(
project=project, **serializer.validated_data
)
audit_parameter.save()
serializer = ProjectAuditParametersSerializer(audit_parameter)
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns the details of a project audit parameter.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectAuditParametersSerializer,
responses={
200: openapi.Response(
"Updates a project audit parameter. Allows for partial updates.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Audit Parameters"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameters_detail(request, project_uuid, audit_parameters_uuid):
project = get_object_or_404(Project, pk=project_uuid)
audit_parameters = get_object_or_404(
ProjectAuditParameters, pk=audit_parameters_uuid
)
check_if_member_of_project(request.user.id, project.uuid)
if audit_parameters.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = ProjectAuditParametersSerializer(audit_parameters)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(audit_parameters, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
audit_parameters.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectMemberRoleSerializer,
responses={
200: openapi.Response(
"Updates a project member. Allows for partial updates.",
ProjectMemberRoleSerializer,
)
},
tags=["Project Members"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Members"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_member_detail(request, project_uuid, user_id):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
project_member = ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=user_id
)
if not project_member:
return HttpResponse(
"No project member was found", status=status.HTTP_404_NOT_FOUND
)
if request.method == "PUT":
data = JSONParser().parse(request)
if "is_admin" in data and type(data["is_admin"]) is bool:
project_member.update(is_admin=data["is_admin"])
serializer = ProjectMemberRoleSerializer(project_member.first())
return JsonResponse(serializer.data)
return HttpResponse(
"Please provide a valid 'is_admin' value.",
status=status.HTTP_400_BAD_REQUEST,
)
elif request.method == "DELETE":
project_member.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"user_id": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns the updated project with the new member.", ProjectSerializer
)
},
tags=["Project Members"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_members(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
if "user_id" in data:
if not ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=data["user_id"]
):
user = User.objects.filter(id=data["user_id"])
if not user:
return HttpResponse(
"No user found with this id", status=status.HTTP_404_NOT_FOUND
)
project = Project.objects.filter(uuid=project_uuid).first()
project.members.add(user.first(), through_defaults={"is_admin": False})
serializer = ProjectSerializer(project)
return JsonResponse(serializer.data)
return HttpResponse(
"The user is already a member of the project",
status=status.HTTP_400_BAD_REQUEST,
)
return HttpResponse(
"You must provide a user_id", status=status.HTTP_400_BAD_REQUEST
)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"wpt_instance_url": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns discovered available audit parameters for the WPT instance URL passed in parameter",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
def discover_available_audit_parameters(request):
data = JSONParser().parse(request)
if "wpt_instance_url" in data:
try:
get_wpt_audit_configurations(data["wpt_instance_url"])
except ConnectionError:
return JsonResponse(
{
"error": "UNREACHABLE",
"details": "The WPT instance is not reachable, please check the URL",
},
status=status.HTTP_400_BAD_REQUEST,
)
available_audit_parameters = AvailableAuditParameters.objects.filter(
is_active=True
)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
return JsonResponse(
{
"error": "MISSING_PARAMETER",
"details": "You must provide a wpt_instance_url in the request body",
},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns all WebPageTest available audit parameters",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def available_audit_parameters(request):
available_audit_parameters = AvailableAuditParameters.objects.filter(is_active=True)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
@swagger_auto_schema(
methods=["post"],
request_body=ScriptSerializer,
responses={201: openapi.Response("Returns the created script", ScriptSerializer)},
tags=["Scripts"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_scripts(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(data=data)
if serializer.is_valid():
script = Script.objects.create(project=project, **serializer.validated_data)
script.save()
return JsonResponse(
{"uuid": script.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["put"],
request_body=ScriptSerializer,
responses={
200: openapi.Response(
"Updates a script. Allows for partial updates.", ScriptSerializer
)
},
tags=["Scripts"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Scripts"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_script_detail(request, project_uuid, script_uuid):
project = get_object_or_404(Project, pk=project_uuid)
script = get_object_or_404(Script, pk=script_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if script.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(script, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
script.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
| 34.597744
| 105
| 0.674237
|
from core.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import JSONParser
from requests.exceptions import ConnectionError
from projects.models import (
Page,
Project,
ProjectMemberRole,
ProjectAuditParameters,
AvailableAuditParameters,
Script,
)
from projects.serializers import (
PageSerializer,
ProjectSerializer,
ProjectMemberRoleSerializer,
ProjectAuditParametersSerializer,
AvailableAuditParameterSerializer,
ScriptSerializer,
)
from projects.permissions import (
check_if_member_of_project,
check_if_admin_of_project,
is_admin_of_project,
)
from audits.tasks import get_wpt_audit_configurations
def get_user_projects(user_id):
return Project.objects.filter(members__id=user_id, is_active=True)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all the user’s projects", ProjectSerializer(many=True)
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectSerializer,
responses={201: openapi.Response("Returns the created project", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_list(request):
if request.method == "GET":
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects, many=True, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
data = JSONParser().parse(request)
serializer = ProjectSerializer(data=data, context={"user_id": request.user.id})
if serializer.is_valid():
project = Project.objects.create(**serializer.validated_data)
project.save()
return JsonResponse(
{"uuid": project.uuid, **serializer.data},
status=status.HTTP_201_CREATED,
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def first_project(request):
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects.first(), context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response("Returns details of a project.", ProjectSerializer)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectSerializer,
responses={
200: openapi.Response(
"Updates a project. Allows for partial updates.", ProjectSerializer
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Projects"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_detail(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
if is_admin_of_project(request.user.id, project.uuid):
serializer = ProjectSerializer(
project, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
serializer = ProjectSerializer(
project,
fields=(
"uuid",
"name",
"project_members",
"pages",
"scripts",
"audit_parameters_list",
"screenshot_url",
"latest_audit_at",
"has_siblings",
),
context={"user_id": request.user.id},
)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectSerializer(
project, data=data, partial=True, context={"user_id": request.user.id}
)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
project.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all pages in the project", PageSerializer(many=True)
)
},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["post"],
request_body=PageSerializer,
responses={201: openapi.Response("Returns the created page", PageSerializer)},
tags=["Pages"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_page_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
pages = project.pages.all()
serializer = PageSerializer(pages, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(data=data)
if serializer.is_valid():
page = Page.objects.create(project=project, **serializer.validated_data)
page.save()
return JsonResponse(
{"uuid": page.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("Returns details of a page.", PageSerializer)},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["put"],
request_body=PageSerializer,
responses={
200: openapi.Response(
"Updates a page. Allows for partial updates.", PageSerializer
)
},
tags=["Pages"],
)
@swagger_auto_schema(methods=["delete"], responses={204: "No content"}, tags=["Pages"])
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_page_detail(request, project_uuid, page_uuid):
project = get_object_or_404(Project, pk=project_uuid)
page = get_object_or_404(Page, pk=page_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if page.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = PageSerializer(page)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(page, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
page.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectAuditParametersSerializer,
responses={
201: openapi.Response(
"Returns the created project audit parameter",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameter_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(data=data)
if serializer.is_valid():
audit_parameter = ProjectAuditParameters.objects.create(
project=project, **serializer.validated_data
)
audit_parameter.save()
serializer = ProjectAuditParametersSerializer(audit_parameter)
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns the details of a project audit parameter.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectAuditParametersSerializer,
responses={
200: openapi.Response(
"Updates a project audit parameter. Allows for partial updates.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Audit Parameters"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameters_detail(request, project_uuid, audit_parameters_uuid):
project = get_object_or_404(Project, pk=project_uuid)
audit_parameters = get_object_or_404(
ProjectAuditParameters, pk=audit_parameters_uuid
)
check_if_member_of_project(request.user.id, project.uuid)
if audit_parameters.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = ProjectAuditParametersSerializer(audit_parameters)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(audit_parameters, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
audit_parameters.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectMemberRoleSerializer,
responses={
200: openapi.Response(
"Updates a project member. Allows for partial updates.",
ProjectMemberRoleSerializer,
)
},
tags=["Project Members"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Members"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_member_detail(request, project_uuid, user_id):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
project_member = ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=user_id
)
if not project_member:
return HttpResponse(
"No project member was found", status=status.HTTP_404_NOT_FOUND
)
if request.method == "PUT":
data = JSONParser().parse(request)
if "is_admin" in data and type(data["is_admin"]) is bool:
project_member.update(is_admin=data["is_admin"])
serializer = ProjectMemberRoleSerializer(project_member.first())
return JsonResponse(serializer.data)
return HttpResponse(
"Please provide a valid 'is_admin' value.",
status=status.HTTP_400_BAD_REQUEST,
)
elif request.method == "DELETE":
project_member.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"user_id": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns the updated project with the new member.", ProjectSerializer
)
},
tags=["Project Members"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_members(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
if "user_id" in data:
if not ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=data["user_id"]
):
user = User.objects.filter(id=data["user_id"])
if not user:
return HttpResponse(
"No user found with this id", status=status.HTTP_404_NOT_FOUND
)
project = Project.objects.filter(uuid=project_uuid).first()
project.members.add(user.first(), through_defaults={"is_admin": False})
serializer = ProjectSerializer(project)
return JsonResponse(serializer.data)
return HttpResponse(
"The user is already a member of the project",
status=status.HTTP_400_BAD_REQUEST,
)
return HttpResponse(
"You must provide a user_id", status=status.HTTP_400_BAD_REQUEST
)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"wpt_instance_url": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns discovered available audit parameters for the WPT instance URL passed in parameter",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
def discover_available_audit_parameters(request):
data = JSONParser().parse(request)
if "wpt_instance_url" in data:
try:
get_wpt_audit_configurations(data["wpt_instance_url"])
except ConnectionError:
return JsonResponse(
{
"error": "UNREACHABLE",
"details": "The WPT instance is not reachable, please check the URL",
},
status=status.HTTP_400_BAD_REQUEST,
)
available_audit_parameters = AvailableAuditParameters.objects.filter(
is_active=True
)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
return JsonResponse(
{
"error": "MISSING_PARAMETER",
"details": "You must provide a wpt_instance_url in the request body",
},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns all WebPageTest available audit parameters",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def available_audit_parameters(request):
available_audit_parameters = AvailableAuditParameters.objects.filter(is_active=True)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
@swagger_auto_schema(
methods=["post"],
request_body=ScriptSerializer,
responses={201: openapi.Response("Returns the created script", ScriptSerializer)},
tags=["Scripts"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_scripts(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(data=data)
if serializer.is_valid():
script = Script.objects.create(project=project, **serializer.validated_data)
script.save()
return JsonResponse(
{"uuid": script.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["put"],
request_body=ScriptSerializer,
responses={
200: openapi.Response(
"Updates a script. Allows for partial updates.", ScriptSerializer
)
},
tags=["Scripts"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Scripts"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_script_detail(request, project_uuid, script_uuid):
project = get_object_or_404(Project, pk=project_uuid)
script = get_object_or_404(Script, pk=script_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if script.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(script, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
script.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
| true
| true
|
f7034669c5fd6b935341eb03811699d28d8c71fd
| 8,280
|
py
|
Python
|
praw/objector.py
|
nwithan8/praw
|
579a6685332aed9c310e34fffbf7a3ebeccd1c1d
|
[
"BSD-2-Clause"
] | null | null | null |
praw/objector.py
|
nwithan8/praw
|
579a6685332aed9c310e34fffbf7a3ebeccd1c1d
|
[
"BSD-2-Clause"
] | null | null | null |
praw/objector.py
|
nwithan8/praw
|
579a6685332aed9c310e34fffbf7a3ebeccd1c1d
|
[
"BSD-2-Clause"
] | null | null | null |
"""Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
| 41.818182
| 88
| 0.563285
|
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: from ... import praw
class Objector:
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
if isinstance(data, list):
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
data = snake_case_keys(data)
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
if data is None: return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
| true
| true
|
f70346a442266f05839df1291c42a74b655fecd0
| 109
|
py
|
Python
|
altair/vega/v2/schema/__init__.py
|
zjffdu/altair
|
cd34b03ce011f16616f7c6c59a3c60436b679302
|
[
"BSD-3-Clause"
] | 1
|
2021-03-10T00:36:53.000Z
|
2021-03-10T00:36:53.000Z
|
altair/vega/v2/schema/__init__.py
|
zjffdu/altair
|
cd34b03ce011f16616f7c6c59a3c60436b679302
|
[
"BSD-3-Clause"
] | null | null | null |
altair/vega/v2/schema/__init__.py
|
zjffdu/altair
|
cd34b03ce011f16616f7c6c59a3c60436b679302
|
[
"BSD-3-Clause"
] | 1
|
2017-08-23T17:52:59.000Z
|
2017-08-23T17:52:59.000Z
|
from .core import *
SCHEMA_VERSION = 'v2.6.5'
SCHEMA_URL = 'https://vega.github.io/schema/vega/v2.6.5.json'
| 21.8
| 61
| 0.706422
|
from .core import *
SCHEMA_VERSION = 'v2.6.5'
SCHEMA_URL = 'https://vega.github.io/schema/vega/v2.6.5.json'
| true
| true
|
f70347a71417cd66381de736a77b57d65bdf9ef1
| 1,982
|
py
|
Python
|
ai_inspector/io_utils.py
|
Giskard-AI/ai-inspector
|
91ccad002b5c705df8f0d0f0f633e1086591528a
|
[
"Apache-2.0"
] | 12
|
2021-09-24T15:23:16.000Z
|
2022-03-27T20:17:33.000Z
|
ai_inspector/io_utils.py
|
Giskard-AI/ai-inspector
|
91ccad002b5c705df8f0d0f0f633e1086591528a
|
[
"Apache-2.0"
] | 7
|
2021-09-24T10:30:45.000Z
|
2022-03-28T16:14:42.000Z
|
ai_inspector/io_utils.py
|
Giskard-AI/ai-inspector
|
91ccad002b5c705df8f0d0f0f633e1086591528a
|
[
"Apache-2.0"
] | 1
|
2022-03-04T14:42:29.000Z
|
2022-03-04T14:42:29.000Z
|
"""Various input/output utility functions"""
from typing import Any, Optional
import os
import re
from io import BytesIO
import cloudpickle
import pandas as pd
from zstandard import ZstdCompressor, ZstdDecompressor
COMPRESSION_MAX_OUTPUT_SIZE = 10 ** 9 # 1GB
def pickle_dumps(variable: object) -> bytes:
pickle: bytes = cloudpickle.dumps(variable)
return pickle
def pickle_loads(dumped_pickle: bytes) -> Any:
return cloudpickle.loads(dumped_pickle)
def save_df(df: pd.DataFrame, format: str = "csv") -> bytes:
pandas_version: int = int(re.sub("[^0-9]", "", pd.__version__))
if format == "csv":
csv_buffer = BytesIO()
if pandas_version >= 120:
df.to_csv(csv_buffer, index=False)
else:
csv_buffer.write(df.to_csv(index=False).encode("utf-8"))
csv_buffer.seek(0)
return csv_buffer.getvalue()
else:
raise ValueError("Invalid method: {method}. Choose 'csv'.")
def compress(data: bytes, method: Optional[str] = "zstd") -> bytes:
if method == "zstd":
compressor = ZstdCompressor(level=3, write_checksum=True)
compressed_data = compressor.compress(data)
elif method is None:
compressed_data = data
# elif compression == "lz4":
# import lz4.frame
# data = lz4.frame.compress(data, compression_level=3, content_checksum=True)
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return compressed_data
def decompress(
data: bytes, method: Optional[str] = "zstd", max_output_size: int = COMPRESSION_MAX_OUTPUT_SIZE
) -> bytes:
if method == "zstd":
decompressor = ZstdDecompressor()
decompressed_data = decompressor.decompress(data, max_output_size=max_output_size)
elif method is None:
decompressed_data = data
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return decompressed_data
| 30.492308
| 99
| 0.676085
|
from typing import Any, Optional
import os
import re
from io import BytesIO
import cloudpickle
import pandas as pd
from zstandard import ZstdCompressor, ZstdDecompressor
COMPRESSION_MAX_OUTPUT_SIZE = 10 ** 9
def pickle_dumps(variable: object) -> bytes:
pickle: bytes = cloudpickle.dumps(variable)
return pickle
def pickle_loads(dumped_pickle: bytes) -> Any:
return cloudpickle.loads(dumped_pickle)
def save_df(df: pd.DataFrame, format: str = "csv") -> bytes:
pandas_version: int = int(re.sub("[^0-9]", "", pd.__version__))
if format == "csv":
csv_buffer = BytesIO()
if pandas_version >= 120:
df.to_csv(csv_buffer, index=False)
else:
csv_buffer.write(df.to_csv(index=False).encode("utf-8"))
csv_buffer.seek(0)
return csv_buffer.getvalue()
else:
raise ValueError("Invalid method: {method}. Choose 'csv'.")
def compress(data: bytes, method: Optional[str] = "zstd") -> bytes:
if method == "zstd":
compressor = ZstdCompressor(level=3, write_checksum=True)
compressed_data = compressor.compress(data)
elif method is None:
compressed_data = data
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return compressed_data
def decompress(
data: bytes, method: Optional[str] = "zstd", max_output_size: int = COMPRESSION_MAX_OUTPUT_SIZE
) -> bytes:
if method == "zstd":
decompressor = ZstdDecompressor()
decompressed_data = decompressor.decompress(data, max_output_size=max_output_size)
elif method is None:
decompressed_data = data
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return decompressed_data
| true
| true
|
f703489e32317c8fd5bf42962ae54884a961d1d3
| 5,450
|
py
|
Python
|
spacy/tests/conftest.py
|
himkt/spaCy
|
3f2e3cbd2784ebd2c15aafd5f02830a9369ada84
|
[
"MIT"
] | null | null | null |
spacy/tests/conftest.py
|
himkt/spaCy
|
3f2e3cbd2784ebd2c15aafd5f02830a9369ada84
|
[
"MIT"
] | null | null | null |
spacy/tests/conftest.py
|
himkt/spaCy
|
3f2e3cbd2784ebd2c15aafd5f02830a9369ada84
|
[
"MIT"
] | 1
|
2019-09-30T00:57:01.000Z
|
2019-09-30T00:57:01.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
# These languages are used for generic tokenizer tests – only add a language
# here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
#@pytest.fixture(params=_languages)
#def tokenizer(request):
#lang = util.get_lang_class(request.param)
#return lang.Defaults.create_tokenizer()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture
def fr_tokenizer():
return util.get_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture
def ja_tokenizer():
janome = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
# Check if test is marked with models and has arguments set, i.e. specific
# language. If so, skip test if flag not set.
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not item.config.getoption("--%s" % arg) and not item.config.getoption("--all"):
pytest.skip("need --%s or --all option to run" % arg)
| 25.707547
| 94
| 0.696881
|
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture
def fr_tokenizer():
return util.get_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture
def ja_tokenizer():
janome = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not item.config.getoption("--%s" % arg) and not item.config.getoption("--all"):
pytest.skip("need --%s or --all option to run" % arg)
| true
| true
|
f7034933752d9c12e8bad83bf1178250a44ed9a7
| 810
|
py
|
Python
|
loss/__init__.py
|
lartpang/OpticalFlowBasedVOS
|
cd4856644b77dd963133797c543aeafb4f402217
|
[
"MIT"
] | 6
|
2020-11-19T04:20:52.000Z
|
2021-09-24T02:40:11.000Z
|
loss/__init__.py
|
lartpang/OpticalFlowBasedVOS
|
cd4856644b77dd963133797c543aeafb4f402217
|
[
"MIT"
] | null | null | null |
loss/__init__.py
|
lartpang/OpticalFlowBasedVOS
|
cd4856644b77dd963133797c543aeafb4f402217
|
[
"MIT"
] | 2
|
2020-11-22T02:02:18.000Z
|
2021-11-14T08:56:15.000Z
|
from loss.BCELoss import cal_bce_loss
from loss.HEL import cal_hel_loss
from loss.IOULoss import cal_iou_loss, cal_weighted_iou_loss
from loss.L12Loss import cal_mae_loss, cal_mse_loss
from loss.SSIM import cal_ssim_loss
supported_loss = dict(
bce=cal_bce_loss,
hel=cal_hel_loss,
iou=cal_iou_loss,
weighted_iou=cal_weighted_iou_loss,
mae=cal_mae_loss,
mse=cal_mse_loss,
ssim=cal_ssim_loss,
)
def get_loss_combination_with_cfg(loss_cfg: dict) -> dict:
loss_combination = {}
for loss_name, with_loss in loss_cfg.items():
if with_loss:
if loss_func := supported_loss.get(loss_name):
loss_combination[loss_name] = loss_func
else:
raise Exception(f"{loss_name} is not be supported!")
return loss_combination
| 30
| 68
| 0.719753
|
from loss.BCELoss import cal_bce_loss
from loss.HEL import cal_hel_loss
from loss.IOULoss import cal_iou_loss, cal_weighted_iou_loss
from loss.L12Loss import cal_mae_loss, cal_mse_loss
from loss.SSIM import cal_ssim_loss
supported_loss = dict(
bce=cal_bce_loss,
hel=cal_hel_loss,
iou=cal_iou_loss,
weighted_iou=cal_weighted_iou_loss,
mae=cal_mae_loss,
mse=cal_mse_loss,
ssim=cal_ssim_loss,
)
def get_loss_combination_with_cfg(loss_cfg: dict) -> dict:
loss_combination = {}
for loss_name, with_loss in loss_cfg.items():
if with_loss:
if loss_func := supported_loss.get(loss_name):
loss_combination[loss_name] = loss_func
else:
raise Exception(f"{loss_name} is not be supported!")
return loss_combination
| true
| true
|
f7034a4570f202c8e19ef1cdd9a7c331fc3d1523
| 644
|
py
|
Python
|
bnu/team_201002/b.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
bnu/team_201002/b.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
bnu/team_201002/b.py
|
yofn/pyacm
|
e573f8fdeea77513711f00c42f128795cbba65a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#https://codeforces.com/group/H9K9zY8tcT/contest/297258/problem/B
#heap?
from queue import PriorityQueue
n = int(input())
g = {}
c = {str(i):0 for i in range(1,n+1)} #children count
for i in range(1,n+1):
k = str(i)
g[k] = input().split() # l[0]=weight; l[1]=no use; l[2:] parents;
for p in g[k][2:]:
c[p] += 1
q = PriorityQueue()
[q.put((int(g[k][0]),k)) for k in c if c[k]==0]
m = 0
i = n-1
while not q.empty():
w,k = q.get()
l = i + w
i -= 1
if l>m:
m = l
for p in g[k][2:]:
c[p] -= 1
if c[p]==0:
q.put((int(g[p][0]),p))
print(m)
| 20.774194
| 69
| 0.495342
|
from queue import PriorityQueue
n = int(input())
g = {}
c = {str(i):0 for i in range(1,n+1)} for i in range(1,n+1):
k = str(i)
g[k] = input().split() for p in g[k][2:]:
c[p] += 1
q = PriorityQueue()
[q.put((int(g[k][0]),k)) for k in c if c[k]==0]
m = 0
i = n-1
while not q.empty():
w,k = q.get()
l = i + w
i -= 1
if l>m:
m = l
for p in g[k][2:]:
c[p] -= 1
if c[p]==0:
q.put((int(g[p][0]),p))
print(m)
| true
| true
|
f7034b599f605a346b03f2b00d09af42a2e9041c
| 191
|
py
|
Python
|
brats_3D_viewer/forms.py
|
MartinMa28/BraTS-2018-Demo
|
f2508a56be51fe3a1ec6c4efc345c300891fba93
|
[
"MIT"
] | null | null | null |
brats_3D_viewer/forms.py
|
MartinMa28/BraTS-2018-Demo
|
f2508a56be51fe3a1ec6c4efc345c300891fba93
|
[
"MIT"
] | 5
|
2021-03-18T22:22:18.000Z
|
2021-09-22T18:18:27.000Z
|
brats_3D_viewer/forms.py
|
MartinMa28/BraTS-2018-Demo
|
f2508a56be51fe3a1ec6c4efc345c300891fba93
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm
from .models import MRIScan
class MRIScanForm(ModelForm):
class Meta:
model = MRIScan
fields = ['case_id', 't1', 't1ce', 't2', 'flair']
| 27.285714
| 57
| 0.65445
|
from django.forms import ModelForm
from .models import MRIScan
class MRIScanForm(ModelForm):
class Meta:
model = MRIScan
fields = ['case_id', 't1', 't1ce', 't2', 'flair']
| true
| true
|
f7034c0fb535b297167117fcf8d9e848802a3ac6
| 1,368
|
py
|
Python
|
task_set/tasks/fixed/fixed_text_rnn_classification_test.py
|
egonrian/google-research
|
8177adbe9ca0d7e5a9463b54581fe6dd27be0974
|
[
"Apache-2.0"
] | 7
|
2020-03-15T12:14:07.000Z
|
2021-12-01T07:01:09.000Z
|
task_set/tasks/fixed/fixed_text_rnn_classification_test.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 25
|
2020-07-25T08:53:09.000Z
|
2022-03-12T00:43:02.000Z
|
task_set/tasks/fixed/fixed_text_rnn_classification_test.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for task_set.tasks.fixed_text_rnn_classification."""
from absl.testing import parameterized
from task_set import registry
from task_set.tasks import family_test_utils
from task_set.tasks.fixed import fixed_text_rnn_classification # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
class FixedTextRNNClassificationTest(family_test_utils.SingleTaskTestCase):
def test_right_number_of_tasks(self):
task_names = registry.task_registry.get_all_fixed_config_names()
self.assertLen(task_names, 12)
@parameterized.parameters(registry.task_registry.get_all_fixed_config_names())
def test_tasks(self, task_name):
self.task_test(registry.task_registry.get_instance(task_name))
if __name__ == "__main__":
tf.test.main()
| 36
| 95
| 0.794591
|
from absl.testing import parameterized
from task_set import registry
from task_set.tasks import family_test_utils
from task_set.tasks.fixed import fixed_text_rnn_classification import tensorflow.compat.v1 as tf
class FixedTextRNNClassificationTest(family_test_utils.SingleTaskTestCase):
def test_right_number_of_tasks(self):
task_names = registry.task_registry.get_all_fixed_config_names()
self.assertLen(task_names, 12)
@parameterized.parameters(registry.task_registry.get_all_fixed_config_names())
def test_tasks(self, task_name):
self.task_test(registry.task_registry.get_instance(task_name))
if __name__ == "__main__":
tf.test.main()
| true
| true
|
f7034c7732426e51ff6c5d5e1bc007df9ad1278f
| 2,954
|
py
|
Python
|
Air Canvas (only)/Different Canvas Codes/detect1.py
|
shanky1947/Air-Draw
|
ab370f96384414ba5c4e369f5465cd8e28b4f3f0
|
[
"Apache-2.0"
] | null | null | null |
Air Canvas (only)/Different Canvas Codes/detect1.py
|
shanky1947/Air-Draw
|
ab370f96384414ba5c4e369f5465cd8e28b4f3f0
|
[
"Apache-2.0"
] | null | null | null |
Air Canvas (only)/Different Canvas Codes/detect1.py
|
shanky1947/Air-Draw
|
ab370f96384414ba5c4e369f5465cd8e28b4f3f0
|
[
"Apache-2.0"
] | null | null | null |
# get hsv values using trackbar
import cv2
import numpy as np
import time
# A required callback method that goes into the trackbar function.
def nothing(x):
pass
# Initializing the webcam feed.
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
# Create a window named trackbars.
cv2.namedWindow("Trackbars")
# Now create 6 trackbars that will control the lower and upper range of
# H,S and V channels. The Arguments are like this: Name of trackbar,
# window name, range,callback function. For Hue the range is 0-179 and
# for S,V its 0-255.
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
while True:
# Start reading the webcam feed frame by frame.
ret, frame = cap.read()
if not ret:
break
# Flip the frame horizontally (Not required)
frame = cv2.flip( frame, 1 )
# Convert the BGR image to HSV image.
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Get the new values of the trackbar in real time as the user changes
# them
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
# Set the lower and upper HSV range according to the value selected
# by the trackbar
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
# Filter the image and get the binary mask, where white represents
# your target color
mask = cv2.inRange(hsv, lower_range, upper_range)
# You can also visualize the real part of the target color (Optional)
res = cv2.bitwise_and(frame, frame, mask=mask)
# Converting the binary mask to 3 channel image, this is just so
# we can stack it with the others
mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# stack the mask, orginal frame and the filtered result
stacked = np.hstack((mask_3,frame,res))
# Show this stacked frame at 40% of the size.
cv2.imshow('Trackbars',cv2.resize(stacked,None,fx=0.4,fy=0.4))
# If the user presses ESC then exit the program
key = cv2.waitKey(1)
if key == 27:
break
# If the user presses `s` then print this array.
if key == ord('s'):
thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]
print(thearray)
# Also save this array as penval.npy
np.save('penval',thearray)
break
# Release the camera & destroy the windows.
cap.release()
cv2.destroyAllWindows()
| 32.822222
| 74
| 0.655044
|
import cv2
import numpy as np
import time
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.flip( frame, 1 )
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_range, upper_range)
res = cv2.bitwise_and(frame, frame, mask=mask)
mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
stacked = np.hstack((mask_3,frame,res))
cv2.imshow('Trackbars',cv2.resize(stacked,None,fx=0.4,fy=0.4))
key = cv2.waitKey(1)
if key == 27:
break
if key == ord('s'):
thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]
print(thearray)
np.save('penval',thearray)
break
cap.release()
cv2.destroyAllWindows()
| true
| true
|
f7034de21b1549f3d551d2ff41f194de21b6c390
| 12,342
|
py
|
Python
|
etl/parsers/etw/Microsoft_Windows_Direct3D10_1.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_Windows_Direct3D10_1.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_Windows_Direct3D10_1.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Direct3D10_1
GUID : 9b7e4c8f-342c-4106-a19f-4f2704f689f0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=1, version=0)
class Microsoft_Windows_Direct3D10_1_1_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchOldDebugObjectName" / Int32ul,
"OldDebugObjectName" / Bytes(lambda this: this.CchOldDebugObjectName),
"CchNewDebugObjectName" / Int32ul,
"NewDebugObjectName" / Bytes(lambda this: this.CchNewDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=2, version=0)
class Microsoft_Windows_Direct3D10_1_2_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchDebugObjectName" / Int32ul,
"DebugObjectName" / Bytes(lambda this: this.CchDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=3, version=0)
class Microsoft_Windows_Direct3D10_1_3_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=4, version=0)
class Microsoft_Windows_Direct3D10_1_4_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=5, version=0)
class Microsoft_Windows_Direct3D10_1_5_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=6, version=0)
class Microsoft_Windows_Direct3D10_1_6_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=7, version=0)
class Microsoft_Windows_Direct3D10_1_7_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=8, version=0)
class Microsoft_Windows_Direct3D10_1_8_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=9, version=0)
class Microsoft_Windows_Direct3D10_1_9_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=10, version=0)
class Microsoft_Windows_Direct3D10_1_10_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=11, version=0)
class Microsoft_Windows_Direct3D10_1_11_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=12, version=0)
class Microsoft_Windows_Direct3D10_1_12_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=13, version=0)
class Microsoft_Windows_Direct3D10_1_13_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=14, version=0)
class Microsoft_Windows_Direct3D10_1_14_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=15, version=0)
class Microsoft_Windows_Direct3D10_1_15_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=16, version=0)
class Microsoft_Windows_Direct3D10_1_16_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=17, version=0)
class Microsoft_Windows_Direct3D10_1_17_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=18, version=0)
class Microsoft_Windows_Direct3D10_1_18_0(Etw):
pattern = Struct(
"Resources" / Int32ul,
"pIDXGISurfaces" / Int64ul,
"hNewKMResources" / Int32ul
)
| 31.324873
| 123
| 0.59893
|
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=1, version=0)
class Microsoft_Windows_Direct3D10_1_1_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchOldDebugObjectName" / Int32ul,
"OldDebugObjectName" / Bytes(lambda this: this.CchOldDebugObjectName),
"CchNewDebugObjectName" / Int32ul,
"NewDebugObjectName" / Bytes(lambda this: this.CchNewDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=2, version=0)
class Microsoft_Windows_Direct3D10_1_2_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchDebugObjectName" / Int32ul,
"DebugObjectName" / Bytes(lambda this: this.CchDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=3, version=0)
class Microsoft_Windows_Direct3D10_1_3_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=4, version=0)
class Microsoft_Windows_Direct3D10_1_4_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=5, version=0)
class Microsoft_Windows_Direct3D10_1_5_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=6, version=0)
class Microsoft_Windows_Direct3D10_1_6_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=7, version=0)
class Microsoft_Windows_Direct3D10_1_7_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=8, version=0)
class Microsoft_Windows_Direct3D10_1_8_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=9, version=0)
class Microsoft_Windows_Direct3D10_1_9_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=10, version=0)
class Microsoft_Windows_Direct3D10_1_10_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=11, version=0)
class Microsoft_Windows_Direct3D10_1_11_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=12, version=0)
class Microsoft_Windows_Direct3D10_1_12_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=13, version=0)
class Microsoft_Windows_Direct3D10_1_13_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=14, version=0)
class Microsoft_Windows_Direct3D10_1_14_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=15, version=0)
class Microsoft_Windows_Direct3D10_1_15_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=16, version=0)
class Microsoft_Windows_Direct3D10_1_16_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=17, version=0)
class Microsoft_Windows_Direct3D10_1_17_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=18, version=0)
class Microsoft_Windows_Direct3D10_1_18_0(Etw):
pattern = Struct(
"Resources" / Int32ul,
"pIDXGISurfaces" / Int64ul,
"hNewKMResources" / Int32ul
)
| true
| true
|
f7034fac37342e4fdf12c3f5cf813e8c085a1bd0
| 661
|
py
|
Python
|
jassen/project/blog/migrations/0002_auto_20180424_0154.py
|
cabilangan112/intern-blog-plus
|
444b01835967fd5a57cde28823cd5718ff5d6ae4
|
[
"MIT"
] | null | null | null |
jassen/project/blog/migrations/0002_auto_20180424_0154.py
|
cabilangan112/intern-blog-plus
|
444b01835967fd5a57cde28823cd5718ff5d6ae4
|
[
"MIT"
] | null | null | null |
jassen/project/blog/migrations/0002_auto_20180424_0154.py
|
cabilangan112/intern-blog-plus
|
444b01835967fd5a57cde28823cd5718ff5d6ae4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-04-24 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='sub_Title',
new_name='sub_title',
),
migrations.RemoveField(
model_name='comment',
name='vote',
),
migrations.AddField(
model_name='comment',
name='text',
field=models.CharField(default=1, max_length=150),
preserve_default=False,
),
]
| 22.793103
| 62
| 0.543116
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='sub_Title',
new_name='sub_title',
),
migrations.RemoveField(
model_name='comment',
name='vote',
),
migrations.AddField(
model_name='comment',
name='text',
field=models.CharField(default=1, max_length=150),
preserve_default=False,
),
]
| true
| true
|
f7035180b6f7bf910aba924e67a73260d56aec53
| 1,508
|
py
|
Python
|
tests/sentry/interfaces/test_template.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/interfaces/test_template.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/interfaces/test_template.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.template import Template
from sentry.models import Event
from sentry.testutils import TestCase
class TemplateTest(TestCase):
@fixture
def interface(self):
return Template.to_python(dict(
filename='foo.html',
context_line='hello world',
lineno=1,
))
def test_serialize(self):
result = self.interface.to_json()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
result = self.interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.template.get_context')
@mock.patch('sentry.interfaces.template.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
result = self.interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
def test_serialize_unserialize_behavior(self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
| 33.511111
| 85
| 0.69496
|
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.template import Template
from sentry.models import Event
from sentry.testutils import TestCase
class TemplateTest(TestCase):
@fixture
def interface(self):
return Template.to_python(dict(
filename='foo.html',
context_line='hello world',
lineno=1,
))
def test_serialize(self):
result = self.interface.to_json()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
result = self.interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.template.get_context')
@mock.patch('sentry.interfaces.template.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
result = self.interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
def test_serialize_unserialize_behavior(self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
| true
| true
|
f70351c925858ba94b95ff42be6da2e8a34b5975
| 118
|
py
|
Python
|
dpaste/contribute.py
|
mozilla-it/dpaste-1
|
bfb5015b542dfd074a65588c29038dcb5bfeecea
|
[
"MIT"
] | null | null | null |
dpaste/contribute.py
|
mozilla-it/dpaste-1
|
bfb5015b542dfd074a65588c29038dcb5bfeecea
|
[
"MIT"
] | 3
|
2021-07-20T21:57:32.000Z
|
2021-07-30T15:51:49.000Z
|
dpaste/contribute.py
|
mozilla-it/pastebin
|
bfb5015b542dfd074a65588c29038dcb5bfeecea
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def contrib_file(request):
return render(request, "dpaste/contribute.html")
| 19.666667
| 52
| 0.779661
|
from django.shortcuts import render
def contrib_file(request):
return render(request, "dpaste/contribute.html")
| true
| true
|
f7035214ae296532a472484c63d4fbdfda9f742d
| 7,045
|
py
|
Python
|
src/ClientEdit.py
|
wendellbrito/ICS
|
fee28c21ac21d11a89b25efa158e1baaa9a5c54d
|
[
"MIT"
] | 2
|
2020-09-12T13:14:01.000Z
|
2020-09-22T12:34:31.000Z
|
src/ClientEdit.py
|
wendellbrito/ICS
|
fee28c21ac21d11a89b25efa158e1baaa9a5c54d
|
[
"MIT"
] | 2
|
2020-10-10T20:07:29.000Z
|
2022-03-19T23:51:42.000Z
|
src/ClientEdit.py
|
wendellbrito/ICS
|
fee28c21ac21d11a89b25efa158e1baaa9a5c54d
|
[
"MIT"
] | null | null | null |
from tkinter import messagebox
from ClientInsert import *
class ClientEdit(ClientInsert):
def __init__(self, db, id_cliente, master):
super().__init__(db, master)
self.title('Editar Cliente')
self.__id_cliente = id_cliente
self.__list = master
table_cliente = db.select("CLIENTE", ["*"],
['id_cliente'], [str(id_cliente)])[0]
table_municipio = db.select("MUNICIPIO",
["id_uf_municipio", "nome_municipio"],
["id_municipio"],
[str(table_cliente["id_municipio_cliente"])])[0]
table_uf = db.select("UF",
["nome_uf"],
["id_uf"],
[str(table_municipio["id_uf_municipio"])])[0]
table_telefone = db.select("TELEFONE",
["numero_telefone", "ddd_telefone"],
["id_cliente_telefone"],
[str(id_cliente)])
telefones = ""
for telefone in table_telefone:
if (telefone['ddd_telefone'] != 0 and
telefone['numero_telefone'] != 0):
telefones += str(telefone['ddd_telefone'])
telefones += str(telefone['numero_telefone'])
self._ClientForm__str_rsocial.set(
table_cliente['rsocial_cliente'])
self._ClientForm__str_nfantasia.set(
table_cliente['nfantasia_cliente'])
self._ClientForm__tracer_cnpj.set(
str(table_cliente['cnpj_cliente']))
self._ClientForm__tracer_iestadual.set(
str(table_cliente['iestadual_cliente']))
self._ClientForm__tracer_imunicipal.set(
str(table_cliente['imunicipal_cliente']))
self._ClientForm__str_logradouro.set(
table_cliente['logradouro_cliente'])
self._ClientForm__str_complemento.set(
table_cliente['complemento_cliente'])
self._ClientForm__tracer_cep.set(
str(table_cliente['cep_cliente']))
self._ClientForm__tracer_telefone.set(
telefones)
celular = str(table_cliente['ddd_cel_cliente'])
celular += str(table_cliente['ncel_cliente'])
self._ClientForm__tracer_ncel.set(
celular)
self._ClientForm__str_bairro.set(
table_cliente['bairro_cliente'])
self._ClientForm__str_email.set(
table_cliente['email_cliente'])
self._ClientForm__str_url.set(
table_cliente['url_cliente'])
self._ClientForm__str_municipio.set(table_municipio["nome_municipio"])
self._ClientForm__str_uf.set(table_uf["nome_uf"])
self._ClientForm__int_whatsapp.set(table_cliente["whatsapp_cliente"])
self._ClientForm__button_salvar.config(
command=self.__button_salvar_action)
for i in range(0, len(self._ClientInsert__list_ufs)):
if self._ClientInsert__list_ufs[i] == table_uf['nome_uf']:
self._ClientForm__combo_uf.current(i)
for i in range(0, len(self._ClientInsert__list_municipios)):
if self._ClientInsert__list_municipios[i] == table_municipio['nome_municipio']:
self._ClientForm__combo_municipio.current(i)
def __button_salvar_action(self):
data = self._ClientInsert__data_validation()
if data == None:
return
else:
self.__database_update(data)
def __database_update(self, data):
rsocial = data[0]
nfantasia = data[1]
cnpj = data[2]
iestadual = data[3]
imunicipal = data[4]
logradouro = data[5]
complemento = data[6]
bairro = data[7]
municipio = data[8]
uf = data[9]
cep = data[10]
telefone = data[11]
ncel = data[12]
whatsapp = data[13]
email = data[14]
url = data[15]
uf_id = str(self._ClientInsert__db.select("UF",
['id_uf'], ['nome_uf'], [uf])[0]['id_uf'])
municipio_id = self._ClientInsert__db.select("MUNICIPIO",
['id_municipio'],
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
if len(municipio_id) == 0:
self._ClientInsert__db.insert("MUNICIPIO",
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
municipio_id = str(
self._ClientInsert__db.last_insert_id()[0]['LAST_INSERT_ID()'])
else:
municipio_id = str(municipio_id[0]['id_municipio'])
self._ClientInsert__db.update("CLIENTE",
['bairro_cliente',
'cep_cliente',
'rsocial_cliente',
'ncel_cliente',
'ddd_cel_cliente',
'nfantasia_cliente',
'whatsapp_cliente',
'cnpj_cliente',
'iestadual_cliente',
'imunicipal_cliente',
'logradouro_cliente',
'email_cliente',
'complemento_cliente',
'url_cliente',
'id_municipio_cliente'],
[bairro,
cep,
rsocial,
ncel[2:],
ncel[:2],
nfantasia,
whatsapp,
cnpj,
iestadual,
imunicipal,
logradouro,
email,
complemento,
url,
municipio_id],
['id_cliente'],
[str(self.__id_cliente)])
table_telefone_id = self._ClientInsert__db.select("TELEFONE",
['id_telefone'],
['id_cliente_telefone'],
[str(self.__id_cliente)])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone,
self._ClientInsert__number_telefone],
['id_telefone'],
[str(table_telefone_id[0]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone2,
self._ClientInsert__number_telefone2],
['id_telefone'],
[str(table_telefone_id[1]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone3,
self._ClientInsert__number_telefone3],
['id_telefone'],
[str(table_telefone_id[2]['id_telefone'])])
messagebox.showinfo("Informação", "Dados alterados!", parent=self)
self.destroy()
self.__list.filter_client()
| 36.128205
| 91
| 0.538964
|
from tkinter import messagebox
from ClientInsert import *
class ClientEdit(ClientInsert):
def __init__(self, db, id_cliente, master):
super().__init__(db, master)
self.title('Editar Cliente')
self.__id_cliente = id_cliente
self.__list = master
table_cliente = db.select("CLIENTE", ["*"],
['id_cliente'], [str(id_cliente)])[0]
table_municipio = db.select("MUNICIPIO",
["id_uf_municipio", "nome_municipio"],
["id_municipio"],
[str(table_cliente["id_municipio_cliente"])])[0]
table_uf = db.select("UF",
["nome_uf"],
["id_uf"],
[str(table_municipio["id_uf_municipio"])])[0]
table_telefone = db.select("TELEFONE",
["numero_telefone", "ddd_telefone"],
["id_cliente_telefone"],
[str(id_cliente)])
telefones = ""
for telefone in table_telefone:
if (telefone['ddd_telefone'] != 0 and
telefone['numero_telefone'] != 0):
telefones += str(telefone['ddd_telefone'])
telefones += str(telefone['numero_telefone'])
self._ClientForm__str_rsocial.set(
table_cliente['rsocial_cliente'])
self._ClientForm__str_nfantasia.set(
table_cliente['nfantasia_cliente'])
self._ClientForm__tracer_cnpj.set(
str(table_cliente['cnpj_cliente']))
self._ClientForm__tracer_iestadual.set(
str(table_cliente['iestadual_cliente']))
self._ClientForm__tracer_imunicipal.set(
str(table_cliente['imunicipal_cliente']))
self._ClientForm__str_logradouro.set(
table_cliente['logradouro_cliente'])
self._ClientForm__str_complemento.set(
table_cliente['complemento_cliente'])
self._ClientForm__tracer_cep.set(
str(table_cliente['cep_cliente']))
self._ClientForm__tracer_telefone.set(
telefones)
celular = str(table_cliente['ddd_cel_cliente'])
celular += str(table_cliente['ncel_cliente'])
self._ClientForm__tracer_ncel.set(
celular)
self._ClientForm__str_bairro.set(
table_cliente['bairro_cliente'])
self._ClientForm__str_email.set(
table_cliente['email_cliente'])
self._ClientForm__str_url.set(
table_cliente['url_cliente'])
self._ClientForm__str_municipio.set(table_municipio["nome_municipio"])
self._ClientForm__str_uf.set(table_uf["nome_uf"])
self._ClientForm__int_whatsapp.set(table_cliente["whatsapp_cliente"])
self._ClientForm__button_salvar.config(
command=self.__button_salvar_action)
for i in range(0, len(self._ClientInsert__list_ufs)):
if self._ClientInsert__list_ufs[i] == table_uf['nome_uf']:
self._ClientForm__combo_uf.current(i)
for i in range(0, len(self._ClientInsert__list_municipios)):
if self._ClientInsert__list_municipios[i] == table_municipio['nome_municipio']:
self._ClientForm__combo_municipio.current(i)
def __button_salvar_action(self):
data = self._ClientInsert__data_validation()
if data == None:
return
else:
self.__database_update(data)
def __database_update(self, data):
rsocial = data[0]
nfantasia = data[1]
cnpj = data[2]
iestadual = data[3]
imunicipal = data[4]
logradouro = data[5]
complemento = data[6]
bairro = data[7]
municipio = data[8]
uf = data[9]
cep = data[10]
telefone = data[11]
ncel = data[12]
whatsapp = data[13]
email = data[14]
url = data[15]
uf_id = str(self._ClientInsert__db.select("UF",
['id_uf'], ['nome_uf'], [uf])[0]['id_uf'])
municipio_id = self._ClientInsert__db.select("MUNICIPIO",
['id_municipio'],
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
if len(municipio_id) == 0:
self._ClientInsert__db.insert("MUNICIPIO",
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
municipio_id = str(
self._ClientInsert__db.last_insert_id()[0]['LAST_INSERT_ID()'])
else:
municipio_id = str(municipio_id[0]['id_municipio'])
self._ClientInsert__db.update("CLIENTE",
['bairro_cliente',
'cep_cliente',
'rsocial_cliente',
'ncel_cliente',
'ddd_cel_cliente',
'nfantasia_cliente',
'whatsapp_cliente',
'cnpj_cliente',
'iestadual_cliente',
'imunicipal_cliente',
'logradouro_cliente',
'email_cliente',
'complemento_cliente',
'url_cliente',
'id_municipio_cliente'],
[bairro,
cep,
rsocial,
ncel[2:],
ncel[:2],
nfantasia,
whatsapp,
cnpj,
iestadual,
imunicipal,
logradouro,
email,
complemento,
url,
municipio_id],
['id_cliente'],
[str(self.__id_cliente)])
table_telefone_id = self._ClientInsert__db.select("TELEFONE",
['id_telefone'],
['id_cliente_telefone'],
[str(self.__id_cliente)])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone,
self._ClientInsert__number_telefone],
['id_telefone'],
[str(table_telefone_id[0]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone2,
self._ClientInsert__number_telefone2],
['id_telefone'],
[str(table_telefone_id[1]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone3,
self._ClientInsert__number_telefone3],
['id_telefone'],
[str(table_telefone_id[2]['id_telefone'])])
messagebox.showinfo("Informação", "Dados alterados!", parent=self)
self.destroy()
self.__list.filter_client()
| true
| true
|
f703531b591af3d5317bed220eaa477c0403e4d5
| 2,576
|
py
|
Python
|
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | 1
|
2020-07-14T09:05:56.000Z
|
2020-07-14T09:05:56.000Z
|
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | null | null | null |
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | null | null | null |
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Title of the document</title>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.min.css">
<style>
.tradingview-widget-container {{
position: sticky;
top: 20px;
}}
.stocks-view {{
display: flex;
flex-wrap: nowrap;
}}
.stocks-listing {{
width: 780px;
flex-wrap: nowrap;
padding: 20px;
}}
.stocks-graph {{
flex-wrap: nowrap;
padding: 20px;
}}
th.sticky-header {{
position: sticky;
top: 0;
z-index: 10;
background-color: white;
}}
.positive-movement {{
color: green;
font-weight: bold;
}}
.negative-movement {{
color: red;
font-weight: bold;
}}
.blue-category {{
background-color: lightsteelblue;
}}
</style>
</head>
<body>
{}
<div class="stocks-view">
<div class="stocks-listing">
<table>
<thead>
<tr>
<th class="sticky-header">Symbol</th>
<th class="sticky-header">April 1 2019</th>
<th class="sticky-header">Dec 2 2019</th>
<th class="sticky-header">Today</th>
<th class="sticky-header">Movement since April 1 2019</th>
<th class="sticky-header">Movement since Dec 2 2019</th>
<th class="sticky-header">Bankruptcy probability</th>
</tr>
</thead>
<tbody>
{}
</tbody>
</table>
</div>
<div class="stocks-graph"
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_63a66"></div>
<div class="tradingview-widget-copyright"><a href="https://www.tradingview.com/symbols/AAPL/" rel="noopener" target="_blank"><span class="blue-text">AAPL Chart</span></a> by TradingView</div>
</div>
<!-- TradingView Widget END -->
</div>
</div>
<script type="text/javascript">
function renderChart(symbol) {{
new TradingView.widget(
{{
"width": 750,
"height": 500,
"symbol": symbol,
"interval": "180",
"timezone": "Etc/UTC",
"theme": "light",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_63a66"
}}
);
}}
document.addEventListener('DOMContentLoaded', function(){{
renderChart('BA');
}}, false);
</script>
</body>
</html>"""
| 24.533333
| 195
| 0.572593
|
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Title of the document</title>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.min.css">
<style>
.tradingview-widget-container {{
position: sticky;
top: 20px;
}}
.stocks-view {{
display: flex;
flex-wrap: nowrap;
}}
.stocks-listing {{
width: 780px;
flex-wrap: nowrap;
padding: 20px;
}}
.stocks-graph {{
flex-wrap: nowrap;
padding: 20px;
}}
th.sticky-header {{
position: sticky;
top: 0;
z-index: 10;
background-color: white;
}}
.positive-movement {{
color: green;
font-weight: bold;
}}
.negative-movement {{
color: red;
font-weight: bold;
}}
.blue-category {{
background-color: lightsteelblue;
}}
</style>
</head>
<body>
{}
<div class="stocks-view">
<div class="stocks-listing">
<table>
<thead>
<tr>
<th class="sticky-header">Symbol</th>
<th class="sticky-header">April 1 2019</th>
<th class="sticky-header">Dec 2 2019</th>
<th class="sticky-header">Today</th>
<th class="sticky-header">Movement since April 1 2019</th>
<th class="sticky-header">Movement since Dec 2 2019</th>
<th class="sticky-header">Bankruptcy probability</th>
</tr>
</thead>
<tbody>
{}
</tbody>
</table>
</div>
<div class="stocks-graph"
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_63a66"></div>
<div class="tradingview-widget-copyright"><a href="https://www.tradingview.com/symbols/AAPL/" rel="noopener" target="_blank"><span class="blue-text">AAPL Chart</span></a> by TradingView</div>
</div>
<!-- TradingView Widget END -->
</div>
</div>
<script type="text/javascript">
function renderChart(symbol) {{
new TradingView.widget(
{{
"width": 750,
"height": 500,
"symbol": symbol,
"interval": "180",
"timezone": "Etc/UTC",
"theme": "light",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_63a66"
}}
);
}}
document.addEventListener('DOMContentLoaded', function(){{
renderChart('BA');
}}, false);
</script>
</body>
</html>"""
| true
| true
|
f7035407860da263626ea5a54711bf08f2144cde
| 2,342
|
py
|
Python
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update.py
|
viewdy/phantomjs
|
eddb0db1d253fd0c546060a4555554c8ee08c13c
|
[
"BSD-3-Clause"
] | 1
|
2015-05-27T13:52:20.000Z
|
2015-05-27T13:52:20.000Z
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | null | null | null |
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | 1
|
2017-03-19T13:03:23.000Z
|
2017-03-19T13:03:23.000Z
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Update(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.update,
Options.quiet,
]
def run(self, state):
if not self._options.update:
return
_log.info("Updating working directory")
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
| 42.581818
| 137
| 0.751921
|
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Update(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.update,
Options.quiet,
]
def run(self, state):
if not self._options.update:
return
_log.info("Updating working directory")
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
| true
| true
|
f70354f7311915d757b4bd5757b16caa8a12167e
| 255
|
py
|
Python
|
labs/Topic05-data_structures/lab5_02_summer_months.py
|
danielmccallion/pands-problems
|
738c45072f22b2511a5d8fefba8fce2482a1a8cb
|
[
"MIT"
] | null | null | null |
labs/Topic05-data_structures/lab5_02_summer_months.py
|
danielmccallion/pands-problems
|
738c45072f22b2511a5d8fefba8fce2482a1a8cb
|
[
"MIT"
] | null | null | null |
labs/Topic05-data_structures/lab5_02_summer_months.py
|
danielmccallion/pands-problems
|
738c45072f22b2511a5d8fefba8fce2482a1a8cb
|
[
"MIT"
] | null | null | null |
# Daniel Mc Callion
# This program prints the summer months
months = ("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December")
summer = months[4:7]
for month in summer:
print(month)
| 12.142857
| 39
| 0.654902
|
months = ("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December")
summer = months[4:7]
for month in summer:
print(month)
| true
| true
|
f7035500220f9f8a16c236d0967d581dfd9a08f8
| 1,277
|
py
|
Python
|
jupiter_orm/jupiter_orm/Field.py
|
dianbaer/fast
|
362bd0ca9f6039ea70cd665438dd8731e4c144a5
|
[
"MIT"
] | 226
|
2017-10-26T12:05:26.000Z
|
2021-12-06T15:14:54.000Z
|
jupiter_orm/jupiter_orm/Field.py
|
dianbaer/fast
|
362bd0ca9f6039ea70cd665438dd8731e4c144a5
|
[
"MIT"
] | null | null | null |
jupiter_orm/jupiter_orm/Field.py
|
dianbaer/fast
|
362bd0ca9f6039ea70cd665438dd8731e4c144a5
|
[
"MIT"
] | 73
|
2017-10-27T12:38:59.000Z
|
2019-12-23T07:29:48.000Z
|
class FieldC():
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchat(255)'):
super().__init__(name, ddl, primary_key, default)
class TinyIntFieldC(FieldC):
def __init__(self, name=None, default=0):
super().__init__(name, 'tinyint', False, default)
class IntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'int', primary_key, default)
class BigIntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class DoubleFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'double', primary_key, default)
class TextFieldC(FieldC):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
| 31.925
| 88
| 0.657009
|
class FieldC():
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchat(255)'):
super().__init__(name, ddl, primary_key, default)
class TinyIntFieldC(FieldC):
def __init__(self, name=None, default=0):
super().__init__(name, 'tinyint', False, default)
class IntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'int', primary_key, default)
class BigIntFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class DoubleFieldC(FieldC):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'double', primary_key, default)
class TextFieldC(FieldC):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
| true
| true
|
f7035501b1ba8b994d2ab016cc823cd9eb4351cf
| 1,937
|
py
|
Python
|
api/tests/test_user.py
|
josayko/SoftDeskAPI
|
a168abb09b09c5cb5f8f4957d028ebde02f3d128
|
[
"MIT"
] | null | null | null |
api/tests/test_user.py
|
josayko/SoftDeskAPI
|
a168abb09b09c5cb5f8f4957d028ebde02f3d128
|
[
"MIT"
] | null | null | null |
api/tests/test_user.py
|
josayko/SoftDeskAPI
|
a168abb09b09c5cb5f8f4957d028ebde02f3d128
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
"""
Test user creation manager
"""
def test_create_user(self):
"""
Creates a new user with email as primary identifier instead of username
"""
User = get_user_model()
user = User.objects.create_user(email="normal@user.com", password="foo")
self.assertEqual(user.email, "normal@user.com")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def test_create_superuser(self):
"""
Creates a superuser with the custom user model
"""
User = get_user_model()
admin_user = User.objects.create_superuser(email="super@user.com", password="foo")
self.assertEqual(admin_user.email, "super@user.com")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(email="super@user.com", password="foo", is_superuser=False)
| 37.980392
| 101
| 0.649974
|
from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(email="normal@user.com", password="foo")
self.assertEqual(user.email, "normal@user.com")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def test_create_superuser(self):
User = get_user_model()
admin_user = User.objects.create_superuser(email="super@user.com", password="foo")
self.assertEqual(admin_user.email, "super@user.com")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(email="super@user.com", password="foo", is_superuser=False)
| true
| true
|
f7035541b25714f015c602c461c950ce0dc803b9
| 4,098
|
py
|
Python
|
src/transfer_rna_identification.py
|
simoncchu/xRNAGlass
|
603f4a7909e307f510365172ab9a8f912e80591d
|
[
"MIT"
] | null | null | null |
src/transfer_rna_identification.py
|
simoncchu/xRNAGlass
|
603f4a7909e307f510365172ab9a8f912e80591d
|
[
"MIT"
] | null | null | null |
src/transfer_rna_identification.py
|
simoncchu/xRNAGlass
|
603f4a7909e307f510365172ab9a8f912e80591d
|
[
"MIT"
] | null | null | null |
import pysam
from optparse import OptionParser
from x_gene_annotation import *
class mRNA_Transfer():
def call_transfer_mut(self, sf_rna, sf_dna_up, sf_dna_bottom, sf_candidate):
m_rna_vars = self.load_variants(sf_rna)
m_DNA_RNA_ovlp_vars = self.get_overlap_variants(sf_dna_bottom, m_rna_vars)
m_candidates = self.get_mut_exclusive_var(sf_dna_up, m_DNA_RNA_ovlp_vars)
self.get_sub_var(sf_rna, m_candidates, sf_candidate)
def load_variants(self, sf_vcfFile):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
m_variants[s_id] = 1
return m_variants
def get_overlap_variants(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
m_variants[s_id] = 1
return m_variants
def get_mut_exclusive_var(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
# if s_id not in m_existing_vars:
m_variants[s_id] = 1
m_mut_exc_var = {}
for s_id in m_existing_vars:
if s_id not in m_variants:
m_mut_exc_var[s_id] = 1
return m_mut_exc_var
def get_sub_var(self, sf_vcfFile, m_existing_vars, sf_sub_vcf):
vcf = pysam.VariantFile(sf_vcfFile)
vcf_out = pysam.VariantFile(sf_sub_vcf, 'w', header=vcf.header)
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
vcf_out.write(var)
####
####
##parse the options:
def parse_option():
parser = OptionParser()
parser.add_option("-p", "--path", dest="wfolder", type="string",
help="Working folder")
parser.add_option("--gene", dest="gene", default="",
help="Gene Annotation file", metavar="FILE")
parser.add_option("--rna", dest="rna",
help="RNA mutation vcf file ", metavar="FILE")
parser.add_option("--phase", dest="phase",
help="Mutation phasing", metavar="FILE")
parser.add_option("--dna_up", dest="dna_up",
help="DNA mutation file of scion", metavar="FILE")
parser.add_option("--dna_bottom", dest="dna_bottom",
help="DNA mutation file of root ", metavar="FILE")
parser.add_option("-c", dest="cutoff", type="int", default=0,
help="cutoff of minimum supporting reads")
parser.add_option("-o", "--output", dest="output",
help="candidate mutation file", metavar="FILE")
(options, args) = parser.parse_args()
return (options, args)
####
if __name__ == '__main__':
(options, args) = parse_option()
sf_rna_mut=options.rna
sf_dna_up=options.dna_up
sf_dna_bottom=options.dna_bottom
sf_candidates=options.output
rna_transfer=mRNA_Transfer()
rna_transfer.call_transfer_mut(sf_rna_mut, sf_dna_up, sf_dna_bottom, sf_candidates)
sf_gene_annotation = options.gene
UP_DOWN_GENE=1500
if sf_gene_annotation !="":
gff = GFF3(sf_gene_annotation)
iextnd = UP_DOWN_GENE
gff.load_gene_annotation_with_extnd(iextnd)
gff.index_gene_annotation_interval_tree()
gff.annotate_results(sf_candidates, sf_candidates+".with_gene_annotation")
####
| 39.786408
| 88
| 0.60859
|
import pysam
from optparse import OptionParser
from x_gene_annotation import *
class mRNA_Transfer():
def call_transfer_mut(self, sf_rna, sf_dna_up, sf_dna_bottom, sf_candidate):
m_rna_vars = self.load_variants(sf_rna)
m_DNA_RNA_ovlp_vars = self.get_overlap_variants(sf_dna_bottom, m_rna_vars)
m_candidates = self.get_mut_exclusive_var(sf_dna_up, m_DNA_RNA_ovlp_vars)
self.get_sub_var(sf_rna, m_candidates, sf_candidate)
def load_variants(self, sf_vcfFile):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
m_variants[s_id] = 1
return m_variants
def get_overlap_variants(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
m_variants[s_id] = 1
return m_variants
def get_mut_exclusive_var(self, sf_vcfFile, m_existing_vars):
vcf = pysam.VariantFile(sf_vcfFile)
m_variants = {}
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
m_variants[s_id] = 1
m_mut_exc_var = {}
for s_id in m_existing_vars:
if s_id not in m_variants:
m_mut_exc_var[s_id] = 1
return m_mut_exc_var
def get_sub_var(self, sf_vcfFile, m_existing_vars, sf_sub_vcf):
vcf = pysam.VariantFile(sf_vcfFile)
vcf_out = pysam.VariantFile(sf_sub_vcf, 'w', header=vcf.header)
for unique_id, var in enumerate(vcf.fetch()):
chrm = var.chrom
start = var.pos
s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0])
if s_id in m_existing_vars:
vcf_out.write(var)
def parse_option():
parser = OptionParser()
parser.add_option("-p", "--path", dest="wfolder", type="string",
help="Working folder")
parser.add_option("--gene", dest="gene", default="",
help="Gene Annotation file", metavar="FILE")
parser.add_option("--rna", dest="rna",
help="RNA mutation vcf file ", metavar="FILE")
parser.add_option("--phase", dest="phase",
help="Mutation phasing", metavar="FILE")
parser.add_option("--dna_up", dest="dna_up",
help="DNA mutation file of scion", metavar="FILE")
parser.add_option("--dna_bottom", dest="dna_bottom",
help="DNA mutation file of root ", metavar="FILE")
parser.add_option("-c", dest="cutoff", type="int", default=0,
help="cutoff of minimum supporting reads")
parser.add_option("-o", "--output", dest="output",
help="candidate mutation file", metavar="FILE")
(options, args) = parser.parse_args()
return (options, args)
if __name__ == '__main__':
(options, args) = parse_option()
sf_rna_mut=options.rna
sf_dna_up=options.dna_up
sf_dna_bottom=options.dna_bottom
sf_candidates=options.output
rna_transfer=mRNA_Transfer()
rna_transfer.call_transfer_mut(sf_rna_mut, sf_dna_up, sf_dna_bottom, sf_candidates)
sf_gene_annotation = options.gene
UP_DOWN_GENE=1500
if sf_gene_annotation !="":
gff = GFF3(sf_gene_annotation)
iextnd = UP_DOWN_GENE
gff.load_gene_annotation_with_extnd(iextnd)
gff.index_gene_annotation_interval_tree()
gff.annotate_results(sf_candidates, sf_candidates+".with_gene_annotation")
| true
| true
|
f703555c16bc4c3f133e0bfe1c68b46fa0864538
| 2,049
|
py
|
Python
|
nipype/interfaces/niftyfit/tests/test_asl.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/interfaces/niftyfit/tests/test_asl.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/interfaces/niftyfit/tests/test_asl.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import pytest
from ....testing import example_data
from ...niftyreg import get_custom_path
from ..asl import FitAsl
from ...niftyreg.tests.test_regutils import no_nifty_tool
@pytest.mark.skipif(
no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed")
def test_fit_asl():
""" Testing FitAsl interface."""
# Create the test node
fit_asl = FitAsl()
# Check if the command is properly defined
cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR')
assert fit_asl.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
fit_asl.run()
# Tests on the interface:
# Runs cbf fitting assuming all tissue is GM!
in_file = example_data('asl.nii.gz')
fit_asl.inputs.source_file = in_file
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl.cmdline == expected_cmd
# Runs cbf fitting using IR/SR T1 data to estimate the local T1 and uses
# the segmentation data to fit tissue specific blood flow parameters
# (lambda,transit times,T1)
fit_asl2 = FitAsl(sig=True)
in_file = example_data('asl.nii.gz')
t1map = example_data('T1map.nii.gz')
seg = example_data('segmentation0.nii.gz')
fit_asl2.inputs.source_file = in_file
fit_asl2.inputs.t1map = t1map
fit_asl2.inputs.seg = seg
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \
-seg {seg} -sig -syn {syn} -t1map {t1map}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
t1map=t1map,
seg=seg,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl2.cmdline == expected_cmd
| 29.271429
| 76
| 0.652513
|
import pytest
from ....testing import example_data
from ...niftyreg import get_custom_path
from ..asl import FitAsl
from ...niftyreg.tests.test_regutils import no_nifty_tool
@pytest.mark.skipif(
no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed")
def test_fit_asl():
fit_asl = FitAsl()
cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR')
assert fit_asl.cmd == cmd
with pytest.raises(ValueError):
fit_asl.run()
in_file = example_data('asl.nii.gz')
fit_asl.inputs.source_file = in_file
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl.cmdline == expected_cmd
fit_asl2 = FitAsl(sig=True)
in_file = example_data('asl.nii.gz')
t1map = example_data('T1map.nii.gz')
seg = example_data('segmentation0.nii.gz')
fit_asl2.inputs.source_file = in_file
fit_asl2.inputs.t1map = t1map
fit_asl2.inputs.seg = seg
cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \
-seg {seg} -sig -syn {syn} -t1map {t1map}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
t1map=t1map,
seg=seg,
cbf='asl_cbf.nii.gz',
error='asl_error.nii.gz',
syn='asl_syn.nii.gz',
)
assert fit_asl2.cmdline == expected_cmd
| true
| true
|
f70355a4ebb7124ef003a7bb016d5179dd150016
| 3,341
|
py
|
Python
|
tflite-onnx/onnx_tflite/tflite/DimensionMetadata.py
|
jwj04ok/ONNX_Convertor
|
067a17e16dfc8aa80e36f44c4523959daf7359f5
|
[
"MIT"
] | 193
|
2017-12-20T16:46:20.000Z
|
2022-03-29T07:40:54.000Z
|
tinyengine/tflite/DimensionMetadata.py
|
liuyy3364/mcunet
|
f53f9e20e8e912bdb111b4c32da75e71e9a59597
|
[
"Apache-2.0"
] | 141
|
2017-12-21T08:00:20.000Z
|
2021-06-15T14:53:03.000Z
|
tinyengine/tflite/DimensionMetadata.py
|
liuyy3364/mcunet
|
f53f9e20e8e912bdb111b4c32da75e71e9a59597
|
[
"Apache-2.0"
] | 55
|
2017-12-22T18:40:13.000Z
|
2022-01-17T05:43:51.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class DimensionMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDimensionMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionMetadata()
x.Init(buf, n + offset)
return x
@classmethod
def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# DimensionMetadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DimensionMetadata
def Format(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def DenseSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegmentsType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegments(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# DimensionMetadata
def ArrayIndicesType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArrayIndices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def DimensionMetadataStart(builder): builder.StartObject(6)
def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0)
def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0)
def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
def DimensionMetadataEnd(builder): return builder.EndObject()
| 40.253012
| 167
| 0.70877
|
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class DimensionMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDimensionMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionMetadata()
x.Init(buf, n + offset)
return x
@classmethod
def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Format(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def DenseSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def ArraySegmentsType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
def ArraySegments(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def ArrayIndicesType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
def ArrayIndices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def DimensionMetadataStart(builder): builder.StartObject(6)
def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0)
def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0)
def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
def DimensionMetadataEnd(builder): return builder.EndObject()
| true
| true
|
f70356c78dbf90e925826ccdd072b16ddefaaec6
| 529
|
py
|
Python
|
openslides/agenda/personal_info.py
|
DerPate/OpenSlides
|
2733a47d315fec9b8f3cb746fd5f3739be225d65
|
[
"MIT"
] | 1
|
2015-03-22T02:07:23.000Z
|
2015-03-22T02:07:23.000Z
|
openslides/agenda/personal_info.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
openslides/agenda/personal_info.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy
from openslides.utils.personal_info import PersonalInfo
from .models import Item
class AgendaPersonalInfo(PersonalInfo):
"""
Class for personal info block for the agenda app.
"""
headline = ugettext_lazy('I am on the list of speakers of the following items')
default_weight = 10
def get_queryset(self):
return Item.objects.filter(
speaker__person=self.request.user,
speaker__begin_time=None)
| 25.190476
| 83
| 0.705104
|
from django.utils.translation import ugettext_lazy
from openslides.utils.personal_info import PersonalInfo
from .models import Item
class AgendaPersonalInfo(PersonalInfo):
headline = ugettext_lazy('I am on the list of speakers of the following items')
default_weight = 10
def get_queryset(self):
return Item.objects.filter(
speaker__person=self.request.user,
speaker__begin_time=None)
| true
| true
|
f703583cefb84f5df202bae943b7c97370e8d38a
| 1,104
|
py
|
Python
|
exercises/test_06_01.py
|
jp2321/dl4cv_dev
|
c8d7dd74d4b591681101770ff2c3603637974e72
|
[
"MIT"
] | 1
|
2022-03-28T12:37:04.000Z
|
2022-03-28T12:37:04.000Z
|
exercises/test_06_01.py
|
jp2321/dl4cv_dev
|
c8d7dd74d4b591681101770ff2c3603637974e72
|
[
"MIT"
] | null | null | null |
exercises/test_06_01.py
|
jp2321/dl4cv_dev
|
c8d7dd74d4b591681101770ff2c3603637974e72
|
[
"MIT"
] | 1
|
2020-04-25T04:40:34.000Z
|
2020-04-25T04:40:34.000Z
|
def test():
from tensorflow.keras import datasets
assert model.get_layer("class_prediction").get_config()["units"]==43, "Check the number of output classes"
assert model.get_layer("class_prediction").get_config()["activation"]=="softmax", "Check your activation function"
assert model.output[0].name== 'class_prediction/Identity:0', "How does the output look like?"
assert model.output[2].name== 'y1_prediction/Identity:0', "How does the output look like?"
assert model.output[3].name== 'x2_prediction/Identity:0', "How does the output look like?"
assert model.output[4].name== 'y2_prediction/Identity:0', "How does the output look like?"
assert model.get_layer("y1_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("y2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x1_prediction").get_config()["units"]==1, "Check the number of outputs"
__msg__.good("WELL DONE!")
| 69
| 118
| 0.719203
|
def test():
from tensorflow.keras import datasets
assert model.get_layer("class_prediction").get_config()["units"]==43, "Check the number of output classes"
assert model.get_layer("class_prediction").get_config()["activation"]=="softmax", "Check your activation function"
assert model.output[0].name== 'class_prediction/Identity:0', "How does the output look like?"
assert model.output[2].name== 'y1_prediction/Identity:0', "How does the output look like?"
assert model.output[3].name== 'x2_prediction/Identity:0', "How does the output look like?"
assert model.output[4].name== 'y2_prediction/Identity:0', "How does the output look like?"
assert model.get_layer("y1_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("y2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x1_prediction").get_config()["units"]==1, "Check the number of outputs"
__msg__.good("WELL DONE!")
| true
| true
|
f7035a76ad270c53ba05886eaab94d613aec6fa7
| 699
|
py
|
Python
|
backend/authentication/serializers.py
|
Zoki92/Calories-Counter
|
4b10c5661869c5a0645e59abb434ca26fd497f37
|
[
"MIT"
] | null | null | null |
backend/authentication/serializers.py
|
Zoki92/Calories-Counter
|
4b10c5661869c5a0645e59abb434ca26fd497f37
|
[
"MIT"
] | 6
|
2021-03-10T17:12:50.000Z
|
2022-03-02T08:30:05.000Z
|
backend/authentication/serializers.py
|
Zoki92/Calories-Counter
|
4b10c5661869c5a0645e59abb434ca26fd497f37
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import CustomUser
User = get_user_model()
class TokenSerializer(serializers.Serializer):
"""
This serializer serializes the token data
"""
access = serializers.CharField(max_length=255)
refresh = serializers.CharField(max_length=255)
class UserSerializer(serializers.ModelSerializer):
"""
Serializes User class
"""
class Meta:
model = User
fields = "__all__"
def create(self, validated_data):
user = User(email=validated_data["email"])
user.set_password(validated_data['password'])
user.save()
return user
| 24.103448
| 53
| 0.695279
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import CustomUser
User = get_user_model()
class TokenSerializer(serializers.Serializer):
access = serializers.CharField(max_length=255)
refresh = serializers.CharField(max_length=255)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = "__all__"
def create(self, validated_data):
user = User(email=validated_data["email"])
user.set_password(validated_data['password'])
user.save()
return user
| true
| true
|
f7035accd560e6660402bbd8023959cb49a7b842
| 2,512
|
py
|
Python
|
setup.py
|
ccthien/jaeger-client-python
|
51d9027c3e0b87f6721dd14e7bc2b3303ce682c2
|
[
"Apache-2.0"
] | 2
|
2020-05-24T11:39:13.000Z
|
2021-02-17T19:52:41.000Z
|
setup.py
|
ccthien/jaeger-client-python
|
51d9027c3e0b87f6721dd14e7bc2b3303ce682c2
|
[
"Apache-2.0"
] | 4
|
2020-12-03T16:06:36.000Z
|
2020-12-06T02:15:42.000Z
|
setup.py
|
ccthien/jaeger-client-python
|
51d9027c3e0b87f6721dd14e7bc2b3303ce682c2
|
[
"Apache-2.0"
] | 3
|
2018-10-23T23:04:20.000Z
|
2019-03-15T15:50:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version = None
with open('jaeger_client/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
assert version is not None, \
'Could not determine version number from jaeger_client/__init__.py'
setup(
name='jaeger-client',
version=version,
url='https://github.com/jaegertracing/jaeger-client-python',
description='Jaeger Python OpenTracing Tracer implementation',
author='Yuri Shkuro',
author_email='ys@uber.com',
packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']),
include_package_data=True,
license='Apache License 2.0',
zip_safe=False,
keywords='jaeger, tracing, opentracing',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'threadloop>=1,<2',
'thrift',
'tornado>=4.3',
'opentracing>=2.1,<3.0',
],
# Uncomment below if need to test with unreleased version of opentracing
# dependency_links=[
# 'git+ssh://git@github.com/opentracing/opentracing-python.git@BRANCHNAME#egg=opentracing',
# ],
test_suite='tests',
extras_require={
':python_version<"3"': [
'futures',
],
'tests': [
'mock==1.0.1',
'pycurl>=7.43,<8',
# pinned to avoid RemovedInPytest4Warning
'pytest>=3.7.0,<3.8.0',
'pytest-cov==2.5.1',
'coverage<4.4', # can remove after https://bitbucket.org/ned/coveragepy/issues/581/44b1-44-breaking-in-ci
'pytest-timeout==1.3.1',
'pytest-tornado',
# pin <3.2 as otherwise it requires pytest>=3.8
'pytest-benchmark[histogram]>=3.0.0rc1,<3.2',
'pytest-localserver',
'flake8',
'flake8-quotes',
'codecov',
'tchannel>=0.27;python_version=="2.7"', # This is only used in python 2
'opentracing_instrumentation>=3,<4',
'prometheus_client==0.3.1',
]
},
)
| 33.493333
| 118
| 0.574841
|
import re
from setuptools import setup, find_packages
version = None
with open('jaeger_client/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
assert version is not None, \
'Could not determine version number from jaeger_client/__init__.py'
setup(
name='jaeger-client',
version=version,
url='https://github.com/jaegertracing/jaeger-client-python',
description='Jaeger Python OpenTracing Tracer implementation',
author='Yuri Shkuro',
author_email='ys@uber.com',
packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']),
include_package_data=True,
license='Apache License 2.0',
zip_safe=False,
keywords='jaeger, tracing, opentracing',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'threadloop>=1,<2',
'thrift',
'tornado>=4.3',
'opentracing>=2.1,<3.0',
],
test_suite='tests',
extras_require={
':python_version<"3"': [
'futures',
],
'tests': [
'mock==1.0.1',
'pycurl>=7.43,<8',
'pytest>=3.7.0,<3.8.0',
'pytest-cov==2.5.1',
'coverage<4.4', 'pytest-timeout==1.3.1',
'pytest-tornado',
'pytest-benchmark[histogram]>=3.0.0rc1,<3.2',
'pytest-localserver',
'flake8',
'flake8-quotes',
'codecov',
'tchannel>=0.27;python_version=="2.7"', 'opentracing_instrumentation>=3,<4',
'prometheus_client==0.3.1',
]
},
)
| true
| true
|
f7035af3b56b3d69f5dea99514b6a6f709912518
| 118,762
|
py
|
Python
|
nova/virt/libvirt/config.py
|
Pushparajkvp/nova
|
c80d0be3f4ff34d2a5b4ee60379cc8481dc7a0b7
|
[
"Apache-2.0"
] | 1
|
2021-06-10T17:08:14.000Z
|
2021-06-10T17:08:14.000Z
|
nova/virt/libvirt/config.py
|
Pushparajkvp/nova
|
c80d0be3f4ff34d2a5b4ee60379cc8481dc7a0b7
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/config.py
|
Pushparajkvp/nova
|
c80d0be3f4ff34d2a5b4ee60379cc8481dc7a0b7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Configuration for libvirt objects.
Classes to represent the configuration of various libvirt objects
and support conversion to/from XML. These classes are solely concerned
by providing direct Object <-> XML document conversions. No policy or
operational decisions should be made by code in these classes. Such
policy belongs in the 'designer.py' module which provides simplified
helpers for populating up config object instances.
"""
import time
from collections import OrderedDict
from lxml import etree
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.pci import utils as pci_utils
from nova.virt import hardware
# Namespace to use for Nova specific metadata items in XML
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, node_name, **kwargs):
if self.ns_uri is None:
return etree.Element(node_name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + node_name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, node_name, value, **kwargs):
child = self._new_node(node_name, **kwargs)
child.text = str(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
msg = (_("Root element name should be '%(name)s' not '%(tag)s'") %
{'name': self.root_name, 'tag': xmldoc.tag})
raise exception.InvalidInput(msg)
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, encoding='unicode',
pretty_print=pretty_print)
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigDomainCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCaps, self).__init__(
root_name="domainCapabilities", **kwargs)
self._features = None
self._machine = None
self._alias = None
self._devices = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "features":
features = LibvirtConfigDomainCapsFeatures()
features.parse_dom(c)
self._features = features
elif c.tag == "machine":
self._machine = c.text
elif c.tag == "devices":
devices = LibvirtConfigDomainCapsDevices()
devices.parse_dom(c)
self._devices = devices
@property
def features(self):
if self._features is None:
return []
return self._features.features
@property
def machine_type(self):
if self._machine is None:
return ""
return self._machine
@property
def machine_type_alias(self):
if self._alias is None:
return self._machine
return self._alias
@machine_type_alias.setter
def machine_type_alias(self, alias):
self._alias = alias
@property
def devices(self):
if self._devices is None:
return []
return self._devices
class LibvirtConfigDomainCapsVideoModels(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='video', **kwargs)
self.supported = False
self.models = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.models = {str(node) for node in
xmldoc.xpath("//enum[@name='modelType']/value/text()")}
class LibvirtConfigDomainCapsDiskBuses(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='disk', **kwargs)
self.supported = False
self.buses = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsDiskBuses, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.buses = {str(node) for node in
xmldoc.xpath("//enum[@name='bus']/value/text()")}
class LibvirtConfigDomainCapsDevices(LibvirtConfigObject):
DEVICE_PARSERS = {
'video': LibvirtConfigDomainCapsVideoModels,
'disk': LibvirtConfigDomainCapsDiskBuses,
}
def __init__(self, **kwargs):
super().__init__(root_name='devices', **kwargs)
self.devices = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
for c in list(xmldoc):
device = self.DEVICE_PARSERS.get(c.tag)
if device:
device = device()
device.parse_dom(c)
self.devices.add(device)
def _get_device(self, device_type):
for device in self.devices:
if type(device) == self.DEVICE_PARSERS.get(device_type):
return device
return None
@property
def disk(self):
return self._get_device('disk')
@property
def video(self):
return self._get_device('video')
class LibvirtConfigDomainCapsFeatures(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatures, self).__init__(
root_name="features", **kwargs)
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatures, self).parse_dom(xmldoc)
for c in xmldoc:
feature = None
if c.tag == "sev":
feature = LibvirtConfigDomainCapsFeatureSev()
if feature:
feature.parse_dom(c)
self.features.append(feature)
# There are many other features and domain capabilities,
# but we don't need to regenerate the XML (it's read-only
# data provided by libvirtd), so there's no point parsing
# them until we actually need their values.
# For the same reason, we do not need a format_dom() method, but
# it's a bug if this ever gets called and we inherited one from
# the base class, so override that to watch out for accidental
# calls.
def format_dom(self):
raise RuntimeError(_('BUG: tried to generate domainCapabilities XML'))
class LibvirtConfigDomainCapsFeatureSev(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatureSev, self).__init__(
root_name='sev', **kwargs)
self.supported = False
self.cbitpos = None
self.reduced_phys_bits = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatureSev, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
for c in list(xmldoc):
if c.tag == 'reducedPhysBits':
self.reduced_phys_bits = int(c.text)
elif c.tag == 'cbitpos':
self.cbitpos = int(c.text)
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc[0]
for xmlcell in xmlcells:
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = 0
self.mempages = []
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc:
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "pages":
pages = LibvirtConfigCapsNUMAPages()
pages.parse_dom(c)
self.mempages.append(pages)
elif c.tag == "cpus":
for c2 in c:
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
for pages in self.mempages:
cell.append(pages.format_dom())
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsNUMAPages(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMAPages, self).__init__(
root_name="pages", **kwargs)
self.size = None
self.total = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc)
self.size = int(xmldoc.get("size"))
self.total = int(xmldoc.text)
def format_dom(self):
pages = super(LibvirtConfigCapsNUMAPages, self).format_dom()
pages.text = str(self.total)
pages.set("size", str(self.size))
pages.set("unit", "KiB")
return pages
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
# Map domain types such as 'qemu' and 'kvm' to
# LibvirtConfigCapsGuestDomain instances.
self.domains = OrderedDict()
self.default_domain = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "os_type":
self.ostype = child.text
elif child.tag == "arch":
self.parse_arch(child)
def parse_arch(self, xmldoc):
self.arch = xmldoc.get("name")
# NOTE(aspiers): The data relating to each <domain> element
# under <arch> (such as <emulator> and many <machine>
# elements) is structured in a slightly odd way. There is one
# "default" domain such as
#
# <domain type='qemu'/>
#
# which has no child elements, and all its data is provided in
# sibling elements. Then others such as
#
# <domain type='kvm'>
#
# will have their <emulator> and <machine> elements as
# children. So we need to handle the two cases separately.
self.default_domain = LibvirtConfigCapsGuestDomain()
for child in xmldoc:
if child.tag == "domain":
if list(child):
# This domain has children, so create a new instance,
# parse it, and register it in the dict of domains.
domain = LibvirtConfigCapsGuestDomain()
domain.parse_dom(child)
self.domains[domain.domtype] = domain
else:
# This is the childless <domain/> element for the
# default domain
self.default_domain.parse_domain(child)
self.domains[self.default_domain.domtype] = \
self.default_domain
else:
# Sibling element of the default domain
self.default_domain.parse_child(child)
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = self.format_arch()
caps.append(arch)
return caps
def format_arch(self):
arch = etree.Element("arch", name=self.arch)
for c in self.default_domain.format_dom():
arch.append(c)
arch.append(self._new_node("domain", type=self.default_domain.domtype))
for domtype, domain in self.domains.items():
if domtype == self.default_domain.domtype:
# We've already added this domain at the top level
continue
arch.append(domain.format_dom())
return arch
class LibvirtConfigCapsGuestDomain(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuestDomain, self).__init__(
root_name="domain", **kwargs)
self.domtype = None
# Track <emulator> values, which we need in order to be able
# to call virConnectGetDomainCapabilities() - typically
# something like '/usr/bin/qemu-system-i386'.
self.emulator = None
self.machines = {}
self.aliases = {}
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuestDomain, self).parse_dom(xmldoc)
self.parse_domain(xmldoc)
for c in xmldoc:
self.parse_child(c)
def parse_child(self, xmldoc):
if xmldoc.tag == "emulator":
self.emulator = xmldoc.text
elif xmldoc.tag == "machine":
self.parse_machine(xmldoc)
def parse_domain(self, xmldoc):
self.domtype = xmldoc.get("type")
if self.domtype is None:
raise exception.InvalidInput(
"Didn't find domain type in %s", xmldoc)
def parse_machine(self, xmldoc):
if 'canonical' in xmldoc.attrib:
self.aliases[xmldoc.text] = xmldoc.attrib
else:
self.machines[xmldoc.text] = xmldoc.attrib
def format_dom(self):
domain = super(LibvirtConfigCapsGuestDomain, self).format_dom()
if self.domtype is not None:
domain.set("type", self.domtype)
if self.emulator is not None:
domain.append(self._text_node("emulator", self.emulator))
for mach_type, machine in self.machines.items():
domain.append(self._text_node("machine", mach_type, **machine))
for alias, machine in self.aliases.items():
domain.append(self._text_node("machine", alias, **machine))
return domain
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
self.policy = "require"
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
self.policy = xmldoc.get("policy", "require")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
if f.policy != "disable":
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
# sorting the features to allow more predictable tests
for f in sorted(self.features, key=lambda x: x.name):
if f.policy != "disable":
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
self.memAccess = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
self.memAccess = xmldoc.get("memAccess")
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
if self.memAccess is not None:
cell.set("memAccess", self.memAccess)
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc:
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
self.system_family = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = etree.Element("bios")
system = etree.Element("system")
if self.bios_vendor is not None:
bios.append(self._text_node("entry", self.bios_vendor,
name="vendor"))
if self.bios_version is not None:
bios.append(self._text_node("entry", self.bios_version,
name="version"))
if self.system_manufacturer is not None:
system.append(self._text_node("entry", self.system_manufacturer,
name="manufacturer"))
if self.system_product is not None:
system.append(self._text_node("entry", self.system_product,
name="product"))
if self.system_version is not None:
system.append(self._text_node("entry", self.system_version,
name="version"))
if self.system_serial is not None:
system.append(self._text_node("entry", self.system_serial,
name="serial"))
if self.system_uuid is not None:
system.append(self._text_node("entry", self.system_uuid,
name="uuid"))
if self.system_family is not None:
system.append(self._text_node("entry", self.system_family,
name="family"))
if len(list(bios)) > 0:
sysinfo.append(bios)
if len(list(system)) > 0:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
@property
def uses_virtio(self):
return False
class LibvirtConfigGuestVTPM(LibvirtConfigGuestDevice):
def __init__(self, vtpm_config, vtpm_secret_uuid, **kwargs):
super(LibvirtConfigGuestVTPM, self).__init__(root_name="tpm", **kwargs)
self.version = vtpm_config.version
self.model = vtpm_config.model
self.secret_uuid = vtpm_secret_uuid
def format_dom(self):
# <tpm model='$model'>
dev = super(LibvirtConfigGuestVTPM, self).format_dom()
dev.set("model", self.model)
# <backend type='emulator' version='$version'>
back = etree.Element("backend")
back.set("type", "emulator")
back.set("version", self.version)
# <encryption secret='$secret_uuid'/>
enc = etree.Element("encryption")
enc.set("secret", self.secret_uuid)
back.append(enc)
dev.append(back)
return dev
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.driver_io = None
self.driver_iommu = False
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.disk_read_bytes_sec_max = None
self.disk_write_bytes_sec_max = None
self.disk_total_bytes_sec_max = None
self.disk_read_iops_sec_max = None
self.disk_write_iops_sec_max = None
self.disk_total_iops_sec_max = None
self.disk_size_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.shareable = False
self.snapshot = None
self.backing_store = None
self.device_addr = None
self.boot_order = None
self.mirror = None
self.encryption = None
def _format_iotune(self, dev):
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if self.disk_read_bytes_sec_max is not None:
iotune.append(self._text_node("read_bytes_sec_max",
self.disk_read_bytes_sec_max))
if self.disk_write_bytes_sec_max is not None:
iotune.append(self._text_node("write_bytes_sec_max",
self.disk_write_bytes_sec_max))
if self.disk_total_bytes_sec_max is not None:
iotune.append(self._text_node("total_bytes_sec_max",
self.disk_total_bytes_sec_max))
if self.disk_read_iops_sec_max is not None:
iotune.append(self._text_node("read_iops_sec_max",
self.disk_read_iops_sec_max))
if self.disk_write_iops_sec_max is not None:
iotune.append(self._text_node("write_iops_sec_max",
self.disk_write_iops_sec_max))
if self.disk_total_iops_sec_max is not None:
iotune.append(self._text_node("total_iops_sec_max",
self.disk_total_iops_sec_max))
if self.disk_size_iops_sec is not None:
iotune.append(self._text_node("size_iops_sec",
self.disk_size_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
@property
def uses_virtio(self):
return 'virtio' == self.target_bus
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if any((self.driver_name, self.driver_format, self.driver_cache,
self.driver_discard, self.driver_iommu)):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
if self.driver_io is not None:
drv.set("io", self.driver_io)
if self.driver_iommu:
drv.set("iommu", "on")
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network" and self.source_protocol:
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
self._format_iotune(dev)
# Block size tuning
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
if self.shareable:
dev.append(etree.Element("shareable"))
if self.boot_order:
dev.append(etree.Element("boot", order=self.boot_order))
if self.device_addr:
dev.append(self.device_addr.format_dom())
if self.encryption:
dev.append(self.encryption.format_dom())
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
self.driver_io = c.get('io')
self.driver_iommu = c.get('iommu', '') == "on"
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
elif c.tag == 'readonly':
self.readonly = True
elif c.tag == 'shareable':
self.shareable = True
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'boot':
self.boot_order = c.get('order')
elif c.tag == 'mirror':
m = LibvirtConfigGuestDiskMirror()
m.parse_dom(c)
self.mirror = m
elif c.tag == 'encryption':
e = LibvirtConfigGuestDiskEncryption()
e.parse_dom(c)
self.encryption = e
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c:
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if len(c):
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
"""Disk class for handling disk information in snapshots.
Similar to LibvirtConfigGuestDisk, but used to represent
disk entities in <domainsnapshot> structures rather than
real devices. These typically have fewer members, and
different expectations for which fields are required.
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc:
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.source_file = None
self.source_dev = None
self.target_dir = "/"
self.driver_type = "loop"
self.driver_format = "raw"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
if self.source_type == "file":
dev.append(etree.Element("driver", type = self.driver_type,
format = self.driver_format))
dev.append(etree.Element("source", file=self.source_file))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_dev))
else:
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestFilesys, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'driver':
if self.source_type == 'file':
self.driver_type = c.get('type')
self.driver_format = c.get('format')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_file = c.get('file')
elif self.source_type == 'block':
self.source_dev = c.get('dev')
else:
self.source_dir = c.get('dir')
elif c.tag == 'target':
self.target_dir = c.get('dir')
class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs)
self.type = None
self.uuid = None
def parse_dom(self, xmldoc):
self.type = xmldoc.get('type')
self.uuid = xmldoc.get('uuid')
def format_dom(self):
obj = etree.Element("secret")
obj.set("type", self.type)
obj.set("uuid", self.uuid)
return obj
class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
"""https://libvirt.org/formatstorageencryption.html
"""
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs)
self.format = None
self.secret = None
def parse_dom(self, xmldoc):
self.format = xmldoc.get('format')
for c in xmldoc:
if c.tag == 'secret':
m = LibvirtConfigGuestDiskEncryptionSecret()
m.parse_dom(c)
self.secret = m
def format_dom(self):
obj = etree.Element("encryption")
obj.set("format", self.format)
obj.append(self.secret.format_dom())
return obj
class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs)
self.ready = None
def parse_dom(self, xmldoc):
self.ready = xmldoc.get('ready')
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestDeviceAddress(LibvirtConfigObject):
def __init__(self, type=None, **kwargs):
super(LibvirtConfigGuestDeviceAddress, self).__init__(
root_name='address', **kwargs)
self.type = type
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddress, self).format_dom()
xml.set("type", self.type)
return xml
@staticmethod
def parse_dom(xmldoc):
addr_type = xmldoc.get('type')
if addr_type == 'pci':
obj = LibvirtConfigGuestDeviceAddressPCI()
elif addr_type == 'drive':
obj = LibvirtConfigGuestDeviceAddressDrive()
else:
return None
obj.parse_dom(xmldoc)
return obj
class LibvirtConfigGuestDeviceAddressDrive(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressDrive, self).\
__init__(type='drive', **kwargs)
self.controller = None
self.bus = None
self.target = None
self.unit = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressDrive, self).format_dom()
if self.controller is not None:
xml.set("controller", str(self.controller))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.target is not None:
xml.set("target", str(self.target))
if self.unit is not None:
xml.set("unit", str(self.unit))
return xml
def parse_dom(self, xmldoc):
self.controller = xmldoc.get('controller')
self.bus = xmldoc.get('bus')
self.target = xmldoc.get('target')
self.unit = xmldoc.get('unit')
def format_address(self):
return None
class LibvirtConfigGuestDeviceAddressPCI(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressPCI, self).\
__init__(type='pci', **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressPCI, self).format_dom()
if self.domain is not None:
xml.set("domain", str(self.domain))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.slot is not None:
xml.set("slot", str(self.slot))
if self.function is not None:
xml.set("function", str(self.function))
return xml
def parse_dom(self, xmldoc):
self.domain = xmldoc.get('domain')
self.bus = xmldoc.get('bus')
self.slot = xmldoc.get('slot')
self.function = xmldoc.get('function')
def format_address(self):
if self.domain is not None:
return pci_utils.get_pci_address(self.domain[2:],
self.bus[2:],
self.slot[2:],
self.function[2:])
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.driver_iommu = False
self.vhostuser_mode = None
self.vhostuser_path = None
self.vhostuser_type = None
self.vhost_queues = None
self.vhost_rx_queue_size = None
self.vhost_tx_queue_size = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
self.device_addr = None
self.mtu = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestInterface):
return False
# NOTE(arches) Skip checking target_dev for vhostuser
# vif type; target_dev is not a valid value for vhostuser.
# NOTE(gibi): For macvtap cases the domain has a target_dev
# generated by libvirt. It is not set by the vif driver code
# so it is not in config returned by the vif driver so we
# should not match on that.
return (
self.mac_addr == other.mac_addr and
self.net_type == other.net_type and
self.source_dev == other.source_dev and
(self.net_type == 'vhostuser' or not self.target_dev or
self.target_dev == other.target_dev) and
self.vhostuser_path == other.vhostuser_path)
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
drv_elem = None
if (self.driver_name or
self.driver_iommu or
self.net_type == "vhostuser"):
drv_elem = etree.Element("driver")
if self.driver_name and self.net_type != "vhostuser":
# For vhostuser interface we should not set the driver name.
drv_elem.set("name", self.driver_name)
if self.driver_iommu:
drv_elem.set("iommu", "on")
if drv_elem is not None:
if self.vhost_queues is not None:
drv_elem.set('queues', str(self.vhost_queues))
if self.vhost_rx_queue_size is not None:
drv_elem.set('rx_queue_size', str(self.vhost_rx_queue_size))
if self.vhost_tx_queue_size is not None:
drv_elem.set('tx_queue_size', str(self.vhost_tx_queue_size))
if (drv_elem.get('name') or drv_elem.get('queues') or
drv_elem.get('rx_queue_size') or
drv_elem.get('tx_queue_size') or
drv_elem.get('iommu')):
# Append the driver element into the dom only if name
# or queues or tx/rx or iommu attributes are set.
dev.append(drv_elem)
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
elif self.net_type == "vhostuser":
dev.append(etree.Element("source", type=self.vhostuser_type,
mode=self.vhostuser_mode,
path=self.vhostuser_path))
elif self.net_type == "bridge":
dev.append(etree.Element("source", bridge=self.source_dev))
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=str(self.vlan))
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestInterface, self).parse_dom(xmldoc)
self.net_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'mac':
self.mac_addr = c.get('address')
elif c.tag == 'model':
self.model = c.get('type')
elif c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_iommu = (c.get('iommu', '') == 'on')
self.vhost_queues = c.get('queues')
self.vhost_rx_queue_size = c.get('rx_queue_size')
self.vhost_tx_queue_size = c.get('tx_queue_size')
elif c.tag == 'source':
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
self.vhostuser_path = c.get('path')
elif self.net_type == 'hostdev':
for sub in c:
if sub.tag == 'address' and sub.get('type') == 'pci':
# strip the 0x prefix on each attribute since
# format_dom puts them back on - note that
# LibvirtConfigGuestHostdevPCI does not do this...
self.source_dev = (
pci_utils.get_pci_address(
sub.get('domain')[2:],
sub.get('bus')[2:],
sub.get('slot')[2:],
sub.get('function')[2:]
)
)
else:
self.source_dev = c.get('bridge')
elif c.tag == 'target':
self.target_dev = c.get('dev')
elif c.tag == 'script':
self.script = c.get('path')
elif c.tag == 'vlan':
# NOTE(mriedem): The vlan element can have multiple tag
# sub-elements but we're currently only storing a single tag
# id in the vlan attribute.
for sub in c:
if sub.tag == 'tag' and sub.get('id'):
self.vlan = int(sub.get('id'))
break
elif c.tag == 'virtualport':
self.vporttype = c.get('type')
for sub in c:
if sub.tag == 'parameters':
for k, v in dict(sub.attrib).items():
self.add_vport_param(k, v)
elif c.tag == 'filterref':
self.filtername = c.get('filter')
for sub in c:
if sub.tag == 'parameter':
self.add_filter_param(sub.get('name'),
sub.get('value'))
elif c.tag == 'bandwidth':
for sub in c:
# Note that only average is mandatory, burst and peak are
# optional (and all are ints).
if sub.tag == 'inbound':
self.vif_inbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_inbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_inbound_peak = int(sub.get('peak'))
elif sub.tag == 'outbound':
self.vif_outbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_outbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_outbound_peak = int(sub.get('peak'))
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'mtu':
self.mtu = int(c.get('size'))
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
self.driver_iommu = False
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.type
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
if self.driver_iommu:
dev.append(etree.Element("driver", iommu="on"))
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
if self.period is not None:
dev.append(etree.Element('stats', period=str(self.period)))
if self.driver_iommu:
dev.append(etree.Element('driver', iommu='on'))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
self.driver_iommu = False
@property
def uses_virtio(self):
model_is_virtio = 'virtio-scsi' == self.model
type_is_virtio = 'virtio-serial' == self.type
return model_is_virtio or type_is_virtio
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
if self.driver_iommu:
controller.append(etree.Element("driver", iommu="on"))
return controller
class LibvirtConfigGuestUSBHostController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUSBHostController, self).__init__(**kwargs)
self.type = 'usb'
class LibvirtConfigGuestPCIeRootController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root'
class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootPortController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root-port'
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
# managed attribute is only used by PCI devices but mediated devices
# need to say managed=no
self.managed = kwargs.get('managed', 'yes')
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return list(xmldoc)
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
# On the other hand nova generates these values without the 0x prefix
self.domain = None
self.bus = None
self.slot = None
self.function = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestHostdevPCI):
return False
# NOTE(gibi): nova generates hexa string without 0x prefix but
# libvirt uses that prefix when returns the config so we need to
# normalize the strings before comparison
return (
int(self.domain, 16) == int(other.domain, 16) and
int(self.bus, 16) == int(other.bus, 16) and
int(self.slot, 16) == int(other.slot, 16) and
int(self.function, 16) == int(other.function, 16))
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element(
"address",
domain=self.domain if self.domain.startswith('0x')
else '0x' + self.domain,
bus=self.bus if self.bus.startswith('0x') else '0x' + self.bus,
slot=self.slot if self.slot.startswith('0x') else '0x' + self.slot,
function=self.function if self.function.startswith('0x')
else '0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevMDEV, self).__init__(
mode='subsystem', type='mdev', managed='no', **kwargs)
# model attribute is only supported by mediated devices
self.model = kwargs.get('model', 'vfio-pci')
self.uuid = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevMDEV, self).format_dom()
if self.model:
dev.set("model", self.model)
address = etree.Element("address", uuid=self.uuid)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
children = super(LibvirtConfigGuestHostdevMDEV, self).parse_dom(xmldoc)
if xmldoc.get('model'):
self.model = xmldoc.get('model')
for c in children:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.uuid = sub.get('uuid')
return
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
self.log = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
if self.log:
dev.append(self.log.format_dom())
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
self.target_type = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None or self.target_type is not None:
target = etree.Element("target")
if self.target_port is not None:
target.set("port", str(self.target_port))
if self.target_type is not None:
target.set("type", self.target_type)
dev.append(target)
return dev
class LibvirtConfigGuestCharDeviceLog(LibvirtConfigObject):
"""Represents a sub-element to a character device."""
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharDeviceLog, self).__init__(root_name="log",
**kwargs)
self.file = None
self.append = "off"
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCharDeviceLog, self).parse_dom(xmldoc)
self.file = xmldoc.get("file")
self.append = xmldoc.get("append")
def format_dom(self):
log = super(LibvirtConfigGuestCharDeviceLog, self).format_dom()
log.set("file", self.file)
log.set("append", self.append)
return log
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__(
root_name="emulatorpin",
**kwargs)
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom()
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneVCPUSched(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUSched, self).__init__(
root_name="vcpusched",
**kwargs)
self.vcpus = None
self.scheduler = None
self.priority = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUSched, self).format_dom()
if self.vcpus is not None:
root.set("vcpus",
hardware.format_cpu_spec(self.vcpus))
if self.scheduler is not None:
root.set("scheduler", self.scheduler)
if self.priority is not None:
root.set("priority", str(self.priority))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
self.emulatorpin = None
self.vcpusched = []
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
if self.emulatorpin is not None:
root.append(self.emulatorpin.format_dom())
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
for sched in self.vcpusched:
root.append(sched.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = []
self.sharedpages = True
self.locked = False
self.filesource = False
self.sharedaccess = False
self.allocateimmediate = False
self.discard = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
hugepages = etree.Element("hugepages")
for item in self.hugepages:
hugepages.append(item.format_dom())
root.append(hugepages)
if not self.sharedpages:
root.append(etree.Element("nosharepages"))
if self.locked:
root.append(etree.Element("locked"))
if self.filesource:
root.append(etree.Element("source", type="file"))
if self.sharedaccess:
root.append(etree.Element("access", mode="shared"))
if self.allocateimmediate:
root.append(etree.Element("allocation", mode="immediate"))
if self.discard:
root.append(etree.Element("discard"))
return root
class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBackingPage, self).__init__(
root_name="page", **kwargs)
self.size_kb = None
self.nodeset = None
def format_dom(self):
page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom()
page.set("size", str(self.size_kb))
page.set("nodeset", hardware.format_cpu_spec(self.nodeset))
page.set("unit", "KiB")
return page
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
unit="KiB"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
unit="KiB"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
unit="KiB"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
unit="KiB"))
return root
class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemory, self).__init__(
root_name="memory", **kwargs)
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom()
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemNode, self).__init__(
root_name="memnode", **kwargs)
self.cellid = 0
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom()
root.set("cellid", str(self.cellid))
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATune, self).__init__(
root_name="numatune", **kwargs)
self.memory = None
self.memnodes = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATune, self).format_dom()
if self.memory is not None:
root.append(self.memory.format_dom())
for node in self.memnodes:
root.append(node.format_dom())
return root
class LibvirtConfigGuestFeature(LibvirtConfigObject):
def __init__(self, name, **kwargs):
super(LibvirtConfigGuestFeature, self).__init__(root_name=name,
**kwargs)
class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi",
**kwargs)
class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic",
**kwargs)
class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeaturePAE, self).__init__("pae",
**kwargs)
class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureKvmHidden, self).__init__("kvm",
**kwargs)
def format_dom(self):
root = super(LibvirtConfigGuestFeatureKvmHidden, self).format_dom()
root.append(etree.Element("hidden", state="on"))
return root
class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
def __init__(self, state, **kwargs):
super(LibvirtConfigGuestFeaturePMU, self).__init__("pmu", **kwargs)
# NOTE(sean-k-mooney): bool_from_string is needed to handle the raw
# flavor exta_sepc value. bool_from_string internally checks if the
# value is already a bool and returns it. As such it's safe to use
# with the image metadata property too, so we call it unconditionally.
self.state = strutils.bool_from_string(state)
def format_dom(self):
root = super(LibvirtConfigGuestFeaturePMU, self).format_dom()
root.attrib['state'] = "on" if self.state else "off"
return root
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
MIN_SPINLOCK_RETRIES = 4095
# The spoofed vendor_id can be any alphanumeric string
SPOOFED_VENDOR_ID = "1234567890ab"
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv",
**kwargs)
self.relaxed = False
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
def format_dom(self):
root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom()
if self.relaxed:
root.append(etree.Element("relaxed", state="on"))
if self.vapic:
root.append(etree.Element("vapic", state="on"))
if self.spinlocks:
root.append(etree.Element("spinlocks", state="on",
retries=str(self.spinlock_retries)))
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
return root
class LibvirtConfigGuestSEVLaunchSecurity(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSEVLaunchSecurity, self).__init__(
root_name='launchSecurity', **kwargs)
self.cbitpos = None
self.reduced_phys_bits = None
def format_dom(self):
root = super(LibvirtConfigGuestSEVLaunchSecurity, self).format_dom()
root.set('type', 'sev')
policy = etree.Element('policy')
policy.text = '0x0033' # hardcoded default according to the spec
root.append(policy)
cbitpos = etree.Element('cbitpos')
cbitpos.text = str(self.cbitpos)
root.append(cbitpos)
reducedPhysBits = etree.Element('reducedPhysBits')
reducedPhysBits.text = str(self.reduced_phys_bits)
root.append(reducedPhysBits)
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.max_memory_size = None
self.max_memory_slots = 0
self.membacking = None
self.memtune = None
self.numatune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.features = []
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_loader_type = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_init_env = {}
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
self.metadata = []
self.idmaps = []
self.perf_events = []
self.launch_security = None
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.max_memory_size is not None:
max_memory = self._text_node("maxMemory", self.max_memory_size)
max_memory.set("slots", str(self.max_memory_slots))
root.append(max_memory)
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.numatune is not None:
root.append(self.numatune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
# Generate XML nodes for UEFI boot.
if self.os_loader_type == "pflash":
loader = self._text_node("loader", self.os_loader)
loader.set("type", "pflash")
loader.set("readonly", "yes")
os.append(loader)
else:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for name, value in self.os_init_env.items():
initenv = self._text_node("initenv", value)
initenv.set("name", name)
os.append(initenv)
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
if self.os_bootmenu:
os.append(etree.Element("bootmenu", enable="yes"))
root.append(os)
def _format_features(self, root):
if len(self.features) > 0:
features = etree.Element("features")
for feat in self.features:
features.append(feat.format_dom())
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def _format_perf_events(self, root):
if len(self.perf_events) == 0:
return
perfs = etree.Element("perf")
for pe in self.perf_events:
event = etree.Element("event", name=pe, enabled="yes")
perfs.append(event)
root.append(perfs)
def _format_sev(self, root):
if self.launch_security is not None:
root.append(self.launch_security.format_dom())
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
self._format_perf_events(root)
self._format_sev(root)
return root
def _parse_basic_props(self, xmldoc):
# memmbacking, memtune, numatune, metadata are skipped just because
# corresponding config types do not implement parse_dom method
if xmldoc.tag == 'uuid':
self.uuid = xmldoc.text
elif xmldoc.tag == 'name':
self.name = xmldoc.text
elif xmldoc.tag == 'memory':
self.memory = int(xmldoc.text)
elif xmldoc.tag == 'vcpu':
self.vcpus = int(xmldoc.text)
if xmldoc.get('cpuset') is not None:
self.cpuset = hardware.parse_cpu_spec(xmldoc.get('cpuset'))
def _parse_os(self, xmldoc):
# smbios is skipped just because LibvirtConfigGuestSMBIOS
# does not implement parse_dom method
for c in xmldoc:
if c.tag == 'type':
self.os_type = c.text
self.os_mach_type = c.get('machine')
elif c.tag == 'kernel':
self.os_kernel = c.text
elif c.tag == 'loader':
self.os_loader = c.text
if c.get('type') == 'pflash':
self.os_loader_type = 'pflash'
elif c.tag == 'initrd':
self.os_initrd = c.text
elif c.tag == 'cmdline':
self.os_cmdline = c.text
elif c.tag == 'root':
self.os_root = c.text
elif c.tag == 'init':
self.os_init_path = c.text
elif c.tag == 'boot':
self.os_boot_dev.append(c.get('dev'))
elif c.tag == 'bootmenu':
if c.get('enable') == 'yes':
self.os_bootmenu = True
elif c.tag == 'initenv':
self.os_init_env[c.get('name')] = c.text
def parse_dom(self, xmldoc):
self.virt_type = xmldoc.get('type')
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestFilesys
# LibvirtConfigGuestHostdevPCI
# LibvirtConfigGuestHostdevMDEV
# LibvirtConfigGuestInterface
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
# LibvirtConfigGuestVPMEM
for c in xmldoc:
if c.tag == 'devices':
for d in c:
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'filesystem':
obj = LibvirtConfigGuestFilesys()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'mdev':
obj = LibvirtConfigGuestHostdevMDEV()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'interface':
obj = LibvirtConfigGuestInterface()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'memory' and d.get('model') == 'nvdimm':
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
if idmap.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif idmap.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(idmap)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
elif c.tag == 'perf':
for p in c:
if p.get('enabled') and p.get('enabled') == 'yes':
self.add_perf_event(p.get('name'))
elif c.tag == 'os':
self._parse_os(c)
else:
self._parse_basic_props(c)
def add_device(self, dev):
self.devices.append(dev)
def add_perf_event(self, event):
self.perf_events.append(event)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
"""Libvirt Node Devices parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.pci_capability = None
self.mdev_information = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") in ['pci', 'net']:
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
elif c.tag == "capability" and c.get("type") in ['mdev']:
mdev_info = LibvirtConfigNodeDeviceMdevInformation()
mdev_info.parse_dom(c)
self.mdev_information = mdev_info
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
"""Libvirt Node Devices pci capability parser."""
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.numa_node = None
self.fun_capability = []
self.mdev_capability = []
self.interface = None
self.address = None
self.link_state = None
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = int(c.get('id'), 16)
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = int(c.get('id'), 16)
elif c.tag == "numa":
self.numa_node = int(c.get('node'))
elif c.tag == "interface":
self.interface = c.text
elif c.tag == "address":
self.address = c.text
elif c.tag == "link":
self.link_state = c.get('state')
elif c.tag == "feature":
self.features.append(c.get('name'))
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
elif c.tag == "capability" and c.get('type') in ('mdev_types',):
mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap()
mdevcap.parse_dom(c)
self.mdev_capability.append(mdevcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list() # list of tuple (domain,bus,slot,function)
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc:
if c.tag == "address":
self.device_addrs.append((int(c.get('domain'), 16),
int(c.get('bus'), 16),
int(c.get('slot'), 16),
int(c.get('function'), 16)))
class LibvirtConfigNodeDeviceMdevCapableSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
# mdev_types is a list of dictionaries where each item looks like:
# {'type': 'nvidia-11', 'name': 'GRID M60-0B', 'deviceAPI': 'vfio-pci',
# 'availableInstances': 16}
self.mdev_types = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
mdev_type = {'type': c.get('id')}
for e in c:
mdev_type[e.tag] = (int(e.text)
if e.tag == 'availableInstances'
else e.text)
self.mdev_types.append(mdev_type)
class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevInformation, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.device_model = 'virtio'
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.device_model
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', self.device_model)
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
class LibvirtConfigSecret(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigSecret,
self).__init__(root_name="secret")
self.ephemeral = False
self.private = False
self.description = None
self.uuid = None
self.usage_type = None
self.usage_id = None
def get_yes_no_str(self, value):
if value:
return 'yes'
return 'no'
def format_dom(self):
root = super(LibvirtConfigSecret, self).format_dom()
root.set("ephemeral", self.get_yes_no_str(self.ephemeral))
root.set("private", self.get_yes_no_str(self.private))
if self.description is not None:
root.append(self._text_node("description", str(self.description)))
if self.uuid is not None:
root.append(self._text_node("uuid", str(self.uuid)))
usage = self._new_node("usage")
usage.set("type", self.usage_type)
if self.usage_type in ('ceph', 'vtpm'):
usage.append(self._text_node('name', str(self.usage_id)))
elif self.usage_type == 'iscsi':
usage.append(self._text_node('target', str(self.usage_id)))
elif self.usage_type == 'volume':
usage.append(self._text_node('volume', str(self.usage_id)))
root.append(usage)
return root
class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVPMEM, self).__init__(
root_name="memory", **kwargs)
self.model = "nvdimm"
self.access = "shared"
self.source_path = kwargs.get("devpath", "")
self.align_size = kwargs.get("align_kb", 0)
self.pmem = True
self.target_size = kwargs.get("size_kb", 0)
self.target_node = 0
self.label_size = 2 * units.Ki
def format_dom(self):
memory = super(LibvirtConfigGuestVPMEM, self).format_dom()
memory.set("model", self.model)
memory.set("access", self.access)
source = etree.Element("source")
source.append(self._text_node("path", self.source_path))
source.append(self._text_node("alignsize", self.align_size))
if self.pmem is True:
source.append(etree.Element("pmem"))
target = etree.Element("target")
target.append(self._text_node("size", self.target_size))
target.append(self._text_node("node", self.target_node))
label = etree.Element("label")
label.append(self._text_node("size", self.label_size))
target.append(label)
memory.append(source)
memory.append(target)
return memory
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestVPMEM, self).parse_dom(xmldoc)
self.model = xmldoc.get("model")
self.access = xmldoc.get("access")
for c in list(xmldoc):
if c.tag == "source":
for sub in list(c):
if sub.tag == "path":
self.source_path = sub.text
if sub.tag == "alignsize":
self.align_size = sub.text
elif c.tag == "target":
for sub in list(c):
if sub.tag == "size":
self.target_size = sub.text
class LibvirtConfigGuestSound(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSound, self).__init__(root_name="sound",
**kwargs)
self.model = "ich6"
self.codec_type = "micro"
#self.address_type = "pci"
#self.address_domain = "0x0000"
#self.address_bus = "0x00"
#self.address_slot = "0x04"
#self.address_function = "0x0"
def format_dom(self):
dev = super(LibvirtConfigGuestSound, self).format_dom()
dev.set("model", self.model)
drv_codec = etree.Element("codec")
drv_codec.set("type", self.codec_type)
#drv_address = etree.Element("address")
#drv_address.set("type", self.address_type)
#drv_address.set("domain", self.address_domain)
#drv_address.set("bus", self.address_bus)
#drv_address.set("slot", self.address_slot)
#drv_address.set("function", self.address_function)
dev.append(drv_codec)
return dev
| 34.584158
| 79
| 0.569517
|
import time
from collections import OrderedDict
from lxml import etree
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.pci import utils as pci_utils
from nova.virt import hardware
NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
self.root_name = kwargs.get("root_name")
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
def _new_node(self, node_name, **kwargs):
if self.ns_uri is None:
return etree.Element(node_name, **kwargs)
else:
return etree.Element("{" + self.ns_uri + "}" + node_name,
nsmap={self.ns_prefix: self.ns_uri},
**kwargs)
def _text_node(self, node_name, value, **kwargs):
child = self._new_node(node_name, **kwargs)
child.text = str(value)
return child
def format_dom(self):
return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
def parse_dom(self, xmldoc):
if self.root_name != xmldoc.tag:
msg = (_("Root element name should be '%(name)s' not '%(tag)s'") %
{'name': self.root_name, 'tag': xmldoc.tag})
raise exception.InvalidInput(msg)
def to_xml(self, pretty_print=True):
root = self.format_dom()
xml_str = etree.tostring(root, encoding='unicode',
pretty_print=pretty_print)
return xml_str
class LibvirtConfigCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCaps, self).__init__(root_name="capabilities",
**kwargs)
self.host = None
self.guests = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "host":
host = LibvirtConfigCapsHost()
host.parse_dom(c)
self.host = host
elif c.tag == "guest":
guest = LibvirtConfigCapsGuest()
guest.parse_dom(c)
self.guests.append(guest)
def format_dom(self):
caps = super(LibvirtConfigCaps, self).format_dom()
if self.host:
caps.append(self.host.format_dom())
for g in self.guests:
caps.append(g.format_dom())
return caps
class LibvirtConfigDomainCaps(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCaps, self).__init__(
root_name="domainCapabilities", **kwargs)
self._features = None
self._machine = None
self._alias = None
self._devices = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCaps, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "features":
features = LibvirtConfigDomainCapsFeatures()
features.parse_dom(c)
self._features = features
elif c.tag == "machine":
self._machine = c.text
elif c.tag == "devices":
devices = LibvirtConfigDomainCapsDevices()
devices.parse_dom(c)
self._devices = devices
@property
def features(self):
if self._features is None:
return []
return self._features.features
@property
def machine_type(self):
if self._machine is None:
return ""
return self._machine
@property
def machine_type_alias(self):
if self._alias is None:
return self._machine
return self._alias
@machine_type_alias.setter
def machine_type_alias(self, alias):
self._alias = alias
@property
def devices(self):
if self._devices is None:
return []
return self._devices
class LibvirtConfigDomainCapsVideoModels(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='video', **kwargs)
self.supported = False
self.models = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.models = {str(node) for node in
xmldoc.xpath("//enum[@name='modelType']/value/text()")}
class LibvirtConfigDomainCapsDiskBuses(LibvirtConfigObject):
def __init__(self, **kwargs):
super().__init__(root_name='disk', **kwargs)
self.supported = False
self.buses = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsDiskBuses, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
self.buses = {str(node) for node in
xmldoc.xpath("//enum[@name='bus']/value/text()")}
class LibvirtConfigDomainCapsDevices(LibvirtConfigObject):
DEVICE_PARSERS = {
'video': LibvirtConfigDomainCapsVideoModels,
'disk': LibvirtConfigDomainCapsDiskBuses,
}
def __init__(self, **kwargs):
super().__init__(root_name='devices', **kwargs)
self.devices = set()
def parse_dom(self, xmldoc):
super().parse_dom(xmldoc)
for c in list(xmldoc):
device = self.DEVICE_PARSERS.get(c.tag)
if device:
device = device()
device.parse_dom(c)
self.devices.add(device)
def _get_device(self, device_type):
for device in self.devices:
if type(device) == self.DEVICE_PARSERS.get(device_type):
return device
return None
@property
def disk(self):
return self._get_device('disk')
@property
def video(self):
return self._get_device('video')
class LibvirtConfigDomainCapsFeatures(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatures, self).__init__(
root_name="features", **kwargs)
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatures, self).parse_dom(xmldoc)
for c in xmldoc:
feature = None
if c.tag == "sev":
feature = LibvirtConfigDomainCapsFeatureSev()
if feature:
feature.parse_dom(c)
self.features.append(feature)
# them until we actually need their values.
# For the same reason, we do not need a format_dom() method, but
# it's a bug if this ever gets called and we inherited one from
def format_dom(self):
raise RuntimeError(_('BUG: tried to generate domainCapabilities XML'))
class LibvirtConfigDomainCapsFeatureSev(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigDomainCapsFeatureSev, self).__init__(
root_name='sev', **kwargs)
self.supported = False
self.cbitpos = None
self.reduced_phys_bits = None
def parse_dom(self, xmldoc):
super(LibvirtConfigDomainCapsFeatureSev, self).parse_dom(xmldoc)
if xmldoc.get('supported') == 'yes':
self.supported = True
for c in list(xmldoc):
if c.tag == 'reducedPhysBits':
self.reduced_phys_bits = int(c.text)
elif c.tag == 'cbitpos':
self.cbitpos = int(c.text)
class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMATopology, self).__init__(
root_name="topology",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
xmlcells = xmldoc[0]
for xmlcell in xmlcells:
cell = LibvirtConfigCapsNUMACell()
cell.parse_dom(xmlcell)
self.cells.append(cell)
def format_dom(self):
topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
cells = etree.Element("cells")
cells.set("num", str(len(self.cells)))
topo.append(cells)
for cell in self.cells:
cells.append(cell.format_dom())
return topo
class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.memory = 0
self.mempages = []
self.cpus = []
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
for c in xmldoc:
if c.tag == "memory":
self.memory = int(c.text)
elif c.tag == "pages":
pages = LibvirtConfigCapsNUMAPages()
pages.parse_dom(c)
self.mempages.append(pages)
elif c.tag == "cpus":
for c2 in c:
cpu = LibvirtConfigCapsNUMACPU()
cpu.parse_dom(c2)
self.cpus.append(cpu)
def format_dom(self):
cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
cell.set("id", str(self.id))
mem = etree.Element("memory")
mem.set("unit", "KiB")
mem.text = str(self.memory)
cell.append(mem)
for pages in self.mempages:
cell.append(pages.format_dom())
cpus = etree.Element("cpus")
cpus.set("num", str(len(self.cpus)))
for cpu in self.cpus:
cpus.append(cpu.format_dom())
cell.append(cpus)
return cell
class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
**kwargs)
self.id = None
self.socket_id = None
self.core_id = None
self.siblings = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
self.id = int(xmldoc.get("id"))
if xmldoc.get("socket_id") is not None:
self.socket_id = int(xmldoc.get("socket_id"))
if xmldoc.get("core_id") is not None:
self.core_id = int(xmldoc.get("core_id"))
if xmldoc.get("siblings") is not None:
self.siblings = hardware.parse_cpu_spec(
xmldoc.get("siblings"))
def format_dom(self):
cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
cpu.set("id", str(self.id))
if self.socket_id is not None:
cpu.set("socket_id", str(self.socket_id))
if self.core_id is not None:
cpu.set("core_id", str(self.core_id))
if self.siblings is not None:
cpu.set("siblings",
hardware.format_cpu_spec(self.siblings))
return cpu
class LibvirtConfigCapsNUMAPages(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsNUMAPages, self).__init__(
root_name="pages", **kwargs)
self.size = None
self.total = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc)
self.size = int(xmldoc.get("size"))
self.total = int(xmldoc.text)
def format_dom(self):
pages = super(LibvirtConfigCapsNUMAPages, self).format_dom()
pages.text = str(self.total)
pages.set("size", str(self.size))
pages.set("unit", "KiB")
return pages
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsHost, self).__init__(root_name="host",
**kwargs)
self.cpu = None
self.uuid = None
self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "cpu":
cpu = LibvirtConfigCPU()
cpu.parse_dom(c)
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
elif c.tag == "topology":
self.topology = LibvirtConfigCapsNUMATopology()
self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
if self.uuid:
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
if self.topology:
caps.append(self.topology.format_dom())
return caps
class LibvirtConfigCapsGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuest, self).__init__(root_name="guest",
**kwargs)
self.arch = None
self.ostype = None
self.domains = OrderedDict()
self.default_domain = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "os_type":
self.ostype = child.text
elif child.tag == "arch":
self.parse_arch(child)
def parse_arch(self, xmldoc):
self.arch = xmldoc.get("name")
self.default_domain = LibvirtConfigCapsGuestDomain()
for child in xmldoc:
if child.tag == "domain":
if list(child):
domain = LibvirtConfigCapsGuestDomain()
domain.parse_dom(child)
self.domains[domain.domtype] = domain
else:
self.default_domain.parse_domain(child)
self.domains[self.default_domain.domtype] = \
self.default_domain
else:
self.default_domain.parse_child(child)
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
if self.ostype is not None:
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = self.format_arch()
caps.append(arch)
return caps
def format_arch(self):
arch = etree.Element("arch", name=self.arch)
for c in self.default_domain.format_dom():
arch.append(c)
arch.append(self._new_node("domain", type=self.default_domain.domtype))
for domtype, domain in self.domains.items():
if domtype == self.default_domain.domtype:
continue
arch.append(domain.format_dom())
return arch
class LibvirtConfigCapsGuestDomain(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCapsGuestDomain, self).__init__(
root_name="domain", **kwargs)
self.domtype = None
# Track <emulator> values, which we need in order to be able
# to call virConnectGetDomainCapabilities() - typically
# something like '/usr/bin/qemu-system-i386'.
self.emulator = None
self.machines = {}
self.aliases = {}
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuestDomain, self).parse_dom(xmldoc)
self.parse_domain(xmldoc)
for c in xmldoc:
self.parse_child(c)
def parse_child(self, xmldoc):
if xmldoc.tag == "emulator":
self.emulator = xmldoc.text
elif xmldoc.tag == "machine":
self.parse_machine(xmldoc)
def parse_domain(self, xmldoc):
self.domtype = xmldoc.get("type")
if self.domtype is None:
raise exception.InvalidInput(
"Didn't find domain type in %s", xmldoc)
def parse_machine(self, xmldoc):
if 'canonical' in xmldoc.attrib:
self.aliases[xmldoc.text] = xmldoc.attrib
else:
self.machines[xmldoc.text] = xmldoc.attrib
def format_dom(self):
domain = super(LibvirtConfigCapsGuestDomain, self).format_dom()
if self.domtype is not None:
domain.set("type", self.domtype)
if self.emulator is not None:
domain.append(self._text_node("emulator", self.emulator))
for mach_type, machine in self.machines.items():
domain.append(self._text_node("machine", mach_type, **machine))
for alias, machine in self.aliases.items():
domain.append(self._text_node("machine", alias, **machine))
return domain
class LibvirtConfigGuestTimer(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestTimer, self).__init__(root_name="timer",
**kwargs)
self.name = "platform"
self.track = None
self.tickpolicy = None
self.present = None
def format_dom(self):
tm = super(LibvirtConfigGuestTimer, self).format_dom()
tm.set("name", self.name)
if self.track is not None:
tm.set("track", self.track)
if self.tickpolicy is not None:
tm.set("tickpolicy", self.tickpolicy)
if self.present is not None:
if self.present:
tm.set("present", "yes")
else:
tm.set("present", "no")
return tm
class LibvirtConfigGuestClock(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestClock, self).__init__(root_name="clock",
**kwargs)
self.offset = "utc"
self.adjustment = None
self.timezone = None
self.timers = []
def format_dom(self):
clk = super(LibvirtConfigGuestClock, self).format_dom()
clk.set("offset", self.offset)
if self.adjustment:
clk.set("adjustment", self.adjustment)
elif self.timezone:
clk.set("timezone", self.timezone)
for tm in self.timers:
clk.append(tm.format_dom())
return clk
def add_timer(self, tm):
self.timers.append(tm)
class LibvirtConfigCPUFeature(LibvirtConfigObject):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigCPUFeature, self).__init__(root_name='feature',
**kwargs)
self.name = name
self.policy = "require"
def parse_dom(self, xmldoc):
super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc)
self.name = xmldoc.get("name")
self.policy = xmldoc.get("policy", "require")
def format_dom(self):
ft = super(LibvirtConfigCPUFeature, self).format_dom()
ft.set("name", self.name)
return ft
def __eq__(self, obj):
return obj.name == self.name
def __ne__(self, obj):
return obj.name != self.name
def __hash__(self):
return hash(self.name)
class LibvirtConfigCPU(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigCPU, self).__init__(root_name='cpu',
**kwargs)
self.arch = None
self.vendor = None
self.model = None
self.sockets = None
self.cores = None
self.threads = None
self.features = set()
def parse_dom(self, xmldoc):
super(LibvirtConfigCPU, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "arch":
self.arch = c.text
elif c.tag == "model":
self.model = c.text
elif c.tag == "vendor":
self.vendor = c.text
elif c.tag == "topology":
self.sockets = int(c.get("sockets"))
self.cores = int(c.get("cores"))
self.threads = int(c.get("threads"))
elif c.tag == "feature":
f = LibvirtConfigCPUFeature()
f.parse_dom(c)
if f.policy != "disable":
self.add_feature(f)
def format_dom(self):
cpu = super(LibvirtConfigCPU, self).format_dom()
if self.arch is not None:
cpu.append(self._text_node("arch", self.arch))
if self.model is not None:
cpu.append(self._text_node("model", self.model))
if self.vendor is not None:
cpu.append(self._text_node("vendor", self.vendor))
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
top.set("threads", str(self.threads))
cpu.append(top)
for f in sorted(self.features, key=lambda x: x.name):
if f.policy != "disable":
cpu.append(f.format_dom())
return cpu
def add_feature(self, feat):
self.features.add(feat)
class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature):
def __init__(self, name=None, **kwargs):
super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs)
self.policy = "require"
def format_dom(self):
ft = super(LibvirtConfigGuestCPUFeature, self).format_dom()
ft.set("policy", self.policy)
return ft
class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
**kwargs)
self.id = None
self.cpus = None
self.memory = None
self.memAccess = None
def parse_dom(self, xmldoc):
if xmldoc.get("id") is not None:
self.id = int(xmldoc.get("id"))
if xmldoc.get("memory") is not None:
self.memory = int(xmldoc.get("memory"))
if xmldoc.get("cpus") is not None:
self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
self.memAccess = xmldoc.get("memAccess")
def format_dom(self):
cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
if self.id is not None:
cell.set("id", str(self.id))
if self.cpus is not None:
cell.set("cpus",
hardware.format_cpu_spec(self.cpus))
if self.memory is not None:
cell.set("memory", str(self.memory))
if self.memAccess is not None:
cell.set("memAccess", self.memAccess)
return cell
class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
**kwargs)
self.cells = []
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc)
for child in xmldoc:
if child.tag == "cell":
cell = LibvirtConfigGuestCPUNUMACell()
cell.parse_dom(child)
self.cells.append(cell)
def format_dom(self):
numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
for cell in self.cells:
numa.append(cell.format_dom())
return numa
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPU, self).__init__(**kwargs)
self.mode = None
self.match = "exact"
self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
for child in xmldoc:
if child.tag == "numa":
numa = LibvirtConfigGuestCPUNUMA()
numa.parse_dom(child)
self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
if self.numa is not None:
cpu.append(self.numa.format_dom())
return cpu
class LibvirtConfigGuestSMBIOS(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios",
**kwargs)
self.mode = "sysinfo"
def format_dom(self):
smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom()
smbios.set("mode", self.mode)
return smbios
class LibvirtConfigGuestSysinfo(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo",
**kwargs)
self.type = "smbios"
self.bios_vendor = None
self.bios_version = None
self.system_manufacturer = None
self.system_product = None
self.system_version = None
self.system_serial = None
self.system_uuid = None
self.system_family = None
def format_dom(self):
sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom()
sysinfo.set("type", self.type)
bios = etree.Element("bios")
system = etree.Element("system")
if self.bios_vendor is not None:
bios.append(self._text_node("entry", self.bios_vendor,
name="vendor"))
if self.bios_version is not None:
bios.append(self._text_node("entry", self.bios_version,
name="version"))
if self.system_manufacturer is not None:
system.append(self._text_node("entry", self.system_manufacturer,
name="manufacturer"))
if self.system_product is not None:
system.append(self._text_node("entry", self.system_product,
name="product"))
if self.system_version is not None:
system.append(self._text_node("entry", self.system_version,
name="version"))
if self.system_serial is not None:
system.append(self._text_node("entry", self.system_serial,
name="serial"))
if self.system_uuid is not None:
system.append(self._text_node("entry", self.system_uuid,
name="uuid"))
if self.system_family is not None:
system.append(self._text_node("entry", self.system_family,
name="family"))
if len(list(bios)) > 0:
sysinfo.append(bios)
if len(list(system)) > 0:
sysinfo.append(system)
return sysinfo
class LibvirtConfigGuestDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDevice, self).__init__(**kwargs)
@property
def uses_virtio(self):
return False
class LibvirtConfigGuestVTPM(LibvirtConfigGuestDevice):
def __init__(self, vtpm_config, vtpm_secret_uuid, **kwargs):
super(LibvirtConfigGuestVTPM, self).__init__(root_name="tpm", **kwargs)
self.version = vtpm_config.version
self.model = vtpm_config.model
self.secret_uuid = vtpm_secret_uuid
def format_dom(self):
dev = super(LibvirtConfigGuestVTPM, self).format_dom()
dev.set("model", self.model)
back = etree.Element("backend")
back.set("type", "emulator")
back.set("version", self.version)
enc = etree.Element("encryption")
enc.set("secret", self.secret_uuid)
back.append(enc)
dev.append(back)
return dev
class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = "file"
self.source_device = "disk"
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
self.driver_io = None
self.driver_iommu = False
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
self.disk_read_bytes_sec = None
self.disk_read_iops_sec = None
self.disk_write_bytes_sec = None
self.disk_write_iops_sec = None
self.disk_total_bytes_sec = None
self.disk_total_iops_sec = None
self.disk_read_bytes_sec_max = None
self.disk_write_bytes_sec_max = None
self.disk_total_bytes_sec_max = None
self.disk_read_iops_sec_max = None
self.disk_write_iops_sec_max = None
self.disk_total_iops_sec_max = None
self.disk_size_iops_sec = None
self.logical_block_size = None
self.physical_block_size = None
self.readonly = False
self.shareable = False
self.snapshot = None
self.backing_store = None
self.device_addr = None
self.boot_order = None
self.mirror = None
self.encryption = None
def _format_iotune(self, dev):
iotune = etree.Element("iotune")
if self.disk_read_bytes_sec is not None:
iotune.append(self._text_node("read_bytes_sec",
self.disk_read_bytes_sec))
if self.disk_read_iops_sec is not None:
iotune.append(self._text_node("read_iops_sec",
self.disk_read_iops_sec))
if self.disk_write_bytes_sec is not None:
iotune.append(self._text_node("write_bytes_sec",
self.disk_write_bytes_sec))
if self.disk_write_iops_sec is not None:
iotune.append(self._text_node("write_iops_sec",
self.disk_write_iops_sec))
if self.disk_total_bytes_sec is not None:
iotune.append(self._text_node("total_bytes_sec",
self.disk_total_bytes_sec))
if self.disk_total_iops_sec is not None:
iotune.append(self._text_node("total_iops_sec",
self.disk_total_iops_sec))
if self.disk_read_bytes_sec_max is not None:
iotune.append(self._text_node("read_bytes_sec_max",
self.disk_read_bytes_sec_max))
if self.disk_write_bytes_sec_max is not None:
iotune.append(self._text_node("write_bytes_sec_max",
self.disk_write_bytes_sec_max))
if self.disk_total_bytes_sec_max is not None:
iotune.append(self._text_node("total_bytes_sec_max",
self.disk_total_bytes_sec_max))
if self.disk_read_iops_sec_max is not None:
iotune.append(self._text_node("read_iops_sec_max",
self.disk_read_iops_sec_max))
if self.disk_write_iops_sec_max is not None:
iotune.append(self._text_node("write_iops_sec_max",
self.disk_write_iops_sec_max))
if self.disk_total_iops_sec_max is not None:
iotune.append(self._text_node("total_iops_sec_max",
self.disk_total_iops_sec_max))
if self.disk_size_iops_sec is not None:
iotune.append(self._text_node("size_iops_sec",
self.disk_size_iops_sec))
if len(iotune) > 0:
dev.append(iotune)
@property
def uses_virtio(self):
return 'virtio' == self.target_bus
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
dev.set("type", self.source_type)
dev.set("device", self.source_device)
if any((self.driver_name, self.driver_format, self.driver_cache,
self.driver_discard, self.driver_iommu)):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
if self.driver_io is not None:
drv.set("io", self.driver_io)
if self.driver_iommu:
drv.set("iommu", "on")
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network" and self.source_protocol:
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
self._format_iotune(dev)
if (self.logical_block_size is not None or
self.physical_block_size is not None):
blockio = etree.Element("blockio")
if self.logical_block_size is not None:
blockio.set('logical_block_size', self.logical_block_size)
if self.physical_block_size is not None:
blockio.set('physical_block_size', self.physical_block_size)
dev.append(blockio)
if self.readonly:
dev.append(etree.Element("readonly"))
if self.shareable:
dev.append(etree.Element("shareable"))
if self.boot_order:
dev.append(etree.Element("boot", order=self.boot_order))
if self.device_addr:
dev.append(self.device_addr.format_dom())
if self.encryption:
dev.append(self.encryption.format_dom())
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
self.driver_io = c.get('io')
self.driver_iommu = c.get('iommu', '') == "on"
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
elif c.tag == 'backingStore':
b = LibvirtConfigGuestDiskBackingStore()
b.parse_dom(c)
self.backing_store = b
elif c.tag == 'readonly':
self.readonly = True
elif c.tag == 'shareable':
self.shareable = True
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'boot':
self.boot_order = c.get('order')
elif c.tag == 'mirror':
m = LibvirtConfigGuestDiskMirror()
m.parse_dom(c)
self.mirror = m
elif c.tag == 'encryption':
e = LibvirtConfigGuestDiskEncryption()
e.parse_dom(c)
self.encryption = e
class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskBackingStore, self).__init__(
root_name="backingStore", **kwargs)
self.index = None
self.source_type = None
self.source_file = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.driver_name = None
self.driver_format = None
self.backing_store = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.index = xmldoc.get('index')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
elif c.tag == 'source':
self.source_file = c.get('file')
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for d in c:
if d.tag == 'host':
self.source_hosts.append(d.get('name'))
self.source_ports.append(d.get('port'))
elif c.tag == 'backingStore':
if len(c):
self.backing_store = LibvirtConfigGuestDiskBackingStore()
self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk",
**kwargs)
self.source_type = None
self.source_device = None
self.name = None
self.snapshot = None
self.driver_name = None
self.driver_format = None
self.driver_cache = None
self.source_path = None
self.source_protocol = None
self.source_name = None
self.source_hosts = []
self.source_ports = []
self.target_dev = None
self.target_path = None
self.target_bus = None
self.auth_username = None
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
def format_dom(self):
dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom()
if self.name:
dev.attrib['name'] = self.name
if self.snapshot:
dev.attrib['snapshot'] = self.snapshot
if self.source_type:
dev.set("type", self.source_type)
if self.source_device:
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)
if self.driver_format is not None:
drv.set("type", self.driver_format)
if self.driver_cache is not None:
drv.set("cache", self.driver_cache)
dev.append(drv)
if self.source_type == "file":
dev.append(etree.Element("source", file=self.source_path))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_path))
elif self.source_type == "mount":
dev.append(etree.Element("source", dir=self.source_path))
elif self.source_type == "network":
source = etree.Element("source", protocol=self.source_protocol)
if self.source_name is not None:
source.set('name', self.source_name)
hosts_info = zip(self.source_hosts, self.source_ports)
for name, port in hosts_info:
host = etree.Element('host', name=name)
if port is not None:
host.set('port', port)
source.append(host)
dev.append(source)
if self.auth_secret_type is not None:
auth = etree.Element("auth")
auth.set("username", self.auth_username)
auth.append(etree.Element("secret", type=self.auth_secret_type,
uuid=self.auth_secret_uuid))
dev.append(auth)
if self.source_type == "mount":
dev.append(etree.Element("target", dir=self.target_path))
else:
if self.target_bus and self.target_dev:
dev.append(etree.Element("target", dev=self.target_dev,
bus=self.target_bus))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
self.snapshot = xmldoc.get('snapshot')
for c in xmldoc:
if c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
elif self.source_type == 'block':
self.source_path = c.get('dev')
elif self.source_type == 'mount':
self.source_path = c.get('dir')
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
for sub in c:
if sub.tag == 'host':
self.source_hosts.append(sub.get('name'))
self.source_ports.append(sub.get('port'))
elif c.tag == 'serial':
self.serial = c.text
for c in xmldoc:
if c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem",
**kwargs)
self.source_type = "mount"
self.source_dir = None
self.source_file = None
self.source_dev = None
self.target_dir = "/"
self.driver_type = "loop"
self.driver_format = "raw"
def format_dom(self):
dev = super(LibvirtConfigGuestFilesys, self).format_dom()
dev.set("type", self.source_type)
if self.source_type == "file":
dev.append(etree.Element("driver", type = self.driver_type,
format = self.driver_format))
dev.append(etree.Element("source", file=self.source_file))
elif self.source_type == "block":
dev.append(etree.Element("source", dev=self.source_dev))
else:
dev.append(etree.Element("source", dir=self.source_dir))
dev.append(etree.Element("target", dir=self.target_dir))
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestFilesys, self).parse_dom(xmldoc)
self.source_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'driver':
if self.source_type == 'file':
self.driver_type = c.get('type')
self.driver_format = c.get('format')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_file = c.get('file')
elif self.source_type == 'block':
self.source_dev = c.get('dev')
else:
self.source_dir = c.get('dir')
elif c.tag == 'target':
self.target_dir = c.get('dir')
class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs)
self.type = None
self.uuid = None
def parse_dom(self, xmldoc):
self.type = xmldoc.get('type')
self.uuid = xmldoc.get('uuid')
def format_dom(self):
obj = etree.Element("secret")
obj.set("type", self.type)
obj.set("uuid", self.uuid)
return obj
class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs)
self.format = None
self.secret = None
def parse_dom(self, xmldoc):
self.format = xmldoc.get('format')
for c in xmldoc:
if c.tag == 'secret':
m = LibvirtConfigGuestDiskEncryptionSecret()
m.parse_dom(c)
self.secret = m
def format_dom(self):
obj = etree.Element("encryption")
obj.set("format", self.format)
obj.append(self.secret.format_dom())
return obj
class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs)
self.ready = None
def parse_dom(self, xmldoc):
self.ready = xmldoc.get('ready')
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
self.count = 10000
def parse_dom(self, xmldoc):
self.start = int(xmldoc.get('start'))
self.target = int(xmldoc.get('target'))
self.count = int(xmldoc.get('count'))
def format_dom(self):
obj = super(LibvirtConfigGuestIDMap, self).format_dom()
obj.set("start", str(self.start))
obj.set("target", str(self.target))
obj.set("count", str(self.count))
return obj
class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid",
**kwargs)
class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid",
**kwargs)
class LibvirtConfigGuestDeviceAddress(LibvirtConfigObject):
def __init__(self, type=None, **kwargs):
super(LibvirtConfigGuestDeviceAddress, self).__init__(
root_name='address', **kwargs)
self.type = type
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddress, self).format_dom()
xml.set("type", self.type)
return xml
@staticmethod
def parse_dom(xmldoc):
addr_type = xmldoc.get('type')
if addr_type == 'pci':
obj = LibvirtConfigGuestDeviceAddressPCI()
elif addr_type == 'drive':
obj = LibvirtConfigGuestDeviceAddressDrive()
else:
return None
obj.parse_dom(xmldoc)
return obj
class LibvirtConfigGuestDeviceAddressDrive(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressDrive, self).\
__init__(type='drive', **kwargs)
self.controller = None
self.bus = None
self.target = None
self.unit = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressDrive, self).format_dom()
if self.controller is not None:
xml.set("controller", str(self.controller))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.target is not None:
xml.set("target", str(self.target))
if self.unit is not None:
xml.set("unit", str(self.unit))
return xml
def parse_dom(self, xmldoc):
self.controller = xmldoc.get('controller')
self.bus = xmldoc.get('bus')
self.target = xmldoc.get('target')
self.unit = xmldoc.get('unit')
def format_address(self):
return None
class LibvirtConfigGuestDeviceAddressPCI(LibvirtConfigGuestDeviceAddress):
def __init__(self, **kwargs):
super(LibvirtConfigGuestDeviceAddressPCI, self).\
__init__(type='pci', **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
def format_dom(self):
xml = super(LibvirtConfigGuestDeviceAddressPCI, self).format_dom()
if self.domain is not None:
xml.set("domain", str(self.domain))
if self.bus is not None:
xml.set("bus", str(self.bus))
if self.slot is not None:
xml.set("slot", str(self.slot))
if self.function is not None:
xml.set("function", str(self.function))
return xml
def parse_dom(self, xmldoc):
self.domain = xmldoc.get('domain')
self.bus = xmldoc.get('bus')
self.slot = xmldoc.get('slot')
self.function = xmldoc.get('function')
def format_address(self):
if self.domain is not None:
return pci_utils.get_pci_address(self.domain[2:],
self.bus[2:],
self.slot[2:],
self.function[2:])
class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInterface, self).__init__(
root_name="interface",
**kwargs)
self.net_type = None
self.target_dev = None
self.model = None
self.mac_addr = None
self.script = None
self.source_dev = None
self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
self.filterparams = []
self.driver_name = None
self.driver_iommu = False
self.vhostuser_mode = None
self.vhostuser_path = None
self.vhostuser_type = None
self.vhost_queues = None
self.vhost_rx_queue_size = None
self.vhost_tx_queue_size = None
self.vif_inbound_peak = None
self.vif_inbound_burst = None
self.vif_inbound_average = None
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
self.vlan = None
self.device_addr = None
self.mtu = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestInterface):
return False
return (
self.mac_addr == other.mac_addr and
self.net_type == other.net_type and
self.source_dev == other.source_dev and
(self.net_type == 'vhostuser' or not self.target_dev or
self.target_dev == other.target_dev) and
self.vhostuser_path == other.vhostuser_path)
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
if self.net_type == "hostdev":
dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
drv_elem = None
if (self.driver_name or
self.driver_iommu or
self.net_type == "vhostuser"):
drv_elem = etree.Element("driver")
if self.driver_name and self.net_type != "vhostuser":
drv_elem.set("name", self.driver_name)
if self.driver_iommu:
drv_elem.set("iommu", "on")
if drv_elem is not None:
if self.vhost_queues is not None:
drv_elem.set('queues', str(self.vhost_queues))
if self.vhost_rx_queue_size is not None:
drv_elem.set('rx_queue_size', str(self.vhost_rx_queue_size))
if self.vhost_tx_queue_size is not None:
drv_elem.set('tx_queue_size', str(self.vhost_tx_queue_size))
if (drv_elem.get('name') or drv_elem.get('queues') or
drv_elem.get('rx_queue_size') or
drv_elem.get('tx_queue_size') or
drv_elem.get('iommu')):
dev.append(drv_elem)
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
elif self.net_type == "hostdev":
source_elem = etree.Element("source")
domain, bus, slot, func = \
pci_utils.get_pci_address_fields(self.source_dev)
addr_elem = etree.Element("address", type='pci')
addr_elem.set("domain", "0x%s" % (domain))
addr_elem.set("bus", "0x%s" % (bus))
addr_elem.set("slot", "0x%s" % (slot))
addr_elem.set("function", "0x%s" % (func))
source_elem.append(addr_elem)
dev.append(source_elem)
elif self.net_type == "vhostuser":
dev.append(etree.Element("source", type=self.vhostuser_type,
mode=self.vhostuser_mode,
path=self.vhostuser_path))
elif self.net_type == "bridge":
dev.append(etree.Element("source", bridge=self.source_dev))
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
if self.mtu is not None:
dev.append(etree.Element("mtu", size=str(self.mtu)))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
if self.vlan and self.net_type in ("direct", "hostdev"):
vlan_elem = etree.Element("vlan")
tag_elem = etree.Element("tag", id=str(self.vlan))
vlan_elem.append(tag_elem)
dev.append(vlan_elem)
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
param = etree.Element("parameters")
param.set(p['key'], p['value'])
vport.append(param)
dev.append(vport)
if self.filtername is not None:
filter = etree.Element("filterref", filter=self.filtername)
for p in self.filterparams:
filter.append(etree.Element("parameter",
name=p['key'],
value=p['value']))
dev.append(filter)
if self.vif_inbound_average or self.vif_outbound_average:
bandwidth = etree.Element("bandwidth")
if self.vif_inbound_average is not None:
vif_inbound = etree.Element("inbound",
average=str(self.vif_inbound_average))
if self.vif_inbound_peak is not None:
vif_inbound.set("peak", str(self.vif_inbound_peak))
if self.vif_inbound_burst is not None:
vif_inbound.set("burst", str(self.vif_inbound_burst))
bandwidth.append(vif_inbound)
if self.vif_outbound_average is not None:
vif_outbound = etree.Element("outbound",
average=str(self.vif_outbound_average))
if self.vif_outbound_peak is not None:
vif_outbound.set("peak", str(self.vif_outbound_peak))
if self.vif_outbound_burst is not None:
vif_outbound.set("burst", str(self.vif_outbound_burst))
bandwidth.append(vif_outbound)
dev.append(bandwidth)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestInterface, self).parse_dom(xmldoc)
self.net_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'mac':
self.mac_addr = c.get('address')
elif c.tag == 'model':
self.model = c.get('type')
elif c.tag == 'driver':
self.driver_name = c.get('name')
self.driver_iommu = (c.get('iommu', '') == 'on')
self.vhost_queues = c.get('queues')
self.vhost_rx_queue_size = c.get('rx_queue_size')
self.vhost_tx_queue_size = c.get('tx_queue_size')
elif c.tag == 'source':
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
self.vhostuser_path = c.get('path')
elif self.net_type == 'hostdev':
for sub in c:
if sub.tag == 'address' and sub.get('type') == 'pci':
self.source_dev = (
pci_utils.get_pci_address(
sub.get('domain')[2:],
sub.get('bus')[2:],
sub.get('slot')[2:],
sub.get('function')[2:]
)
)
else:
self.source_dev = c.get('bridge')
elif c.tag == 'target':
self.target_dev = c.get('dev')
elif c.tag == 'script':
self.script = c.get('path')
elif c.tag == 'vlan':
# id in the vlan attribute.
for sub in c:
if sub.tag == 'tag' and sub.get('id'):
self.vlan = int(sub.get('id'))
break
elif c.tag == 'virtualport':
self.vporttype = c.get('type')
for sub in c:
if sub.tag == 'parameters':
for k, v in dict(sub.attrib).items():
self.add_vport_param(k, v)
elif c.tag == 'filterref':
self.filtername = c.get('filter')
for sub in c:
if sub.tag == 'parameter':
self.add_filter_param(sub.get('name'),
sub.get('value'))
elif c.tag == 'bandwidth':
for sub in c:
# Note that only average is mandatory, burst and peak are
# optional (and all are ints).
if sub.tag == 'inbound':
self.vif_inbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_inbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_inbound_peak = int(sub.get('peak'))
elif sub.tag == 'outbound':
self.vif_outbound_average = int(sub.get('average'))
if sub.get('burst'):
self.vif_outbound_burst = int(sub.get('burst'))
if sub.get('peak'):
self.vif_outbound_peak = int(sub.get('peak'))
elif c.tag == 'address':
obj = LibvirtConfigGuestDeviceAddress.parse_dom(c)
self.device_addr = obj
elif c.tag == 'mtu':
self.mtu = int(c.get('size'))
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
def add_vport_param(self, key, value):
self.vportparams.append({'key': key, 'value': value})
class LibvirtConfigGuestInput(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestInput, self).__init__(root_name="input",
**kwargs)
self.type = "tablet"
self.bus = "usb"
self.driver_iommu = False
def format_dom(self):
dev = super(LibvirtConfigGuestInput, self).format_dom()
dev.set("type", self.type)
dev.set("bus", self.bus)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics",
**kwargs)
self.type = "vnc"
self.autoport = True
self.keymap = None
self.listen = None
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
dev.set("type", self.type)
if self.autoport:
dev.set("autoport", "yes")
else:
dev.set("autoport", "no")
if self.keymap:
dev.set("keymap", self.keymap)
if self.listen:
dev.set("listen", self.listen)
return dev
class LibvirtConfigSeclabel(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel",
**kwargs)
self.type = 'dynamic'
self.baselabel = None
def format_dom(self):
seclabel = super(LibvirtConfigSeclabel, self).format_dom()
seclabel.set('type', self.type)
if self.baselabel:
seclabel.append(self._text_node("baselabel", self.baselabel))
return seclabel
class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVideo, self).__init__(root_name="video",
**kwargs)
self.type = 'cirrus'
self.vram = None
self.heads = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.type
def format_dom(self):
dev = super(LibvirtConfigGuestVideo, self).format_dom()
model = etree.Element("model")
model.set("type", self.type)
if self.vram:
model.set("vram", str(self.vram))
if self.heads:
model.set("heads", str(self.heads))
dev.append(model)
if self.driver_iommu:
dev.append(etree.Element("driver", iommu="on"))
return dev
class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigMemoryBalloon, self).__init__(
root_name='memballoon',
**kwargs)
self.model = None
self.period = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.model
def format_dom(self):
dev = super(LibvirtConfigMemoryBalloon, self).format_dom()
dev.set('model', str(self.model))
if self.period is not None:
dev.append(etree.Element('stats', period=str(self.period)))
if self.driver_iommu:
dev.append(etree.Element('driver', iommu='on'))
return dev
class LibvirtConfigGuestController(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestController,
self).__init__(root_name="controller", **kwargs)
self.type = None
self.index = None
self.model = None
self.driver_iommu = False
@property
def uses_virtio(self):
model_is_virtio = 'virtio-scsi' == self.model
type_is_virtio = 'virtio-serial' == self.type
return model_is_virtio or type_is_virtio
def format_dom(self):
controller = super(LibvirtConfigGuestController, self).format_dom()
controller.set("type", self.type)
if self.index is not None:
controller.set("index", str(self.index))
if self.model:
controller.set("model", str(self.model))
if self.driver_iommu:
controller.append(etree.Element("driver", iommu="on"))
return controller
class LibvirtConfigGuestUSBHostController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestUSBHostController, self).__init__(**kwargs)
self.type = 'usb'
class LibvirtConfigGuestPCIeRootController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root'
class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController):
def __init__(self, **kwargs):
super(LibvirtConfigGuestPCIeRootPortController, self).\
__init__(**kwargs)
self.type = 'pci'
self.model = 'pcie-root-port'
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdev, self).\
__init__(root_name="hostdev", **kwargs)
self.mode = kwargs.get('mode')
self.type = kwargs.get('type')
# managed attribute is only used by PCI devices but mediated devices
# need to say managed=no
self.managed = kwargs.get('managed', 'yes')
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
dev.set("mode", self.mode)
dev.set("type", self.type)
dev.set("managed", self.managed)
return dev
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.type = xmldoc.get('type')
self.managed = xmldoc.get('managed')
return list(xmldoc)
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
__init__(mode='subsystem', type='pci',
**kwargs)
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
# On the other hand nova generates these values without the 0x prefix
self.domain = None
self.bus = None
self.slot = None
self.function = None
def __eq__(self, other):
if not isinstance(other, LibvirtConfigGuestHostdevPCI):
return False
# NOTE(gibi): nova generates hexa string without 0x prefix but
# libvirt uses that prefix when returns the config so we need to
# normalize the strings before comparison
return (
int(self.domain, 16) == int(other.domain, 16) and
int(self.bus, 16) == int(other.bus, 16) and
int(self.slot, 16) == int(other.slot, 16) and
int(self.function, 16) == int(other.function, 16))
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom()
address = etree.Element(
"address",
domain=self.domain if self.domain.startswith('0x')
else '0x' + self.domain,
bus=self.bus if self.bus.startswith('0x') else '0x' + self.bus,
slot=self.slot if self.slot.startswith('0x') else '0x' + self.slot,
function=self.function if self.function.startswith('0x')
else '0x' + self.function)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc)
for c in childs:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.domain = sub.get('domain')
self.bus = sub.get('bus')
self.slot = sub.get('slot')
self.function = sub.get('function')
class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevMDEV, self).__init__(
mode='subsystem', type='mdev', managed='no', **kwargs)
# model attribute is only supported by mediated devices
self.model = kwargs.get('model', 'vfio-pci')
self.uuid = None
def format_dom(self):
dev = super(LibvirtConfigGuestHostdevMDEV, self).format_dom()
if self.model:
dev.set("model", self.model)
address = etree.Element("address", uuid=self.uuid)
source = etree.Element("source")
source.append(address)
dev.append(source)
return dev
def parse_dom(self, xmldoc):
children = super(LibvirtConfigGuestHostdevMDEV, self).parse_dom(xmldoc)
if xmldoc.get('model'):
self.model = xmldoc.get('model')
for c in children:
if c.tag == "source":
for sub in c:
if sub.tag == 'address':
self.uuid = sub.get('uuid')
return
class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharBase, self).__init__(**kwargs)
self.type = "pty"
self.source_path = None
self.listen_port = None
self.listen_host = None
self.log = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
elif self.type == "tcp":
dev.append(etree.Element("source", mode="bind",
host=self.listen_host,
service=str(self.listen_port)))
if self.log:
dev.append(self.log.format_dom())
return dev
class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChar, self).__init__(**kwargs)
self.target_port = None
self.target_type = None
def format_dom(self):
dev = super(LibvirtConfigGuestChar, self).format_dom()
if self.target_port is not None or self.target_type is not None:
target = etree.Element("target")
if self.target_port is not None:
target.set("port", str(self.target_port))
if self.target_type is not None:
target.set("type", self.target_type)
dev.append(target)
return dev
class LibvirtConfigGuestCharDeviceLog(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCharDeviceLog, self).__init__(root_name="log",
**kwargs)
self.file = None
self.append = "off"
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCharDeviceLog, self).parse_dom(xmldoc)
self.file = xmldoc.get("file")
self.append = xmldoc.get("append")
def format_dom(self):
log = super(LibvirtConfigGuestCharDeviceLog, self).format_dom()
log.set("file", self.file)
log.set("append", self.append)
return log
class LibvirtConfigGuestSerial(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSerial, self).__init__(root_name="serial",
**kwargs)
class LibvirtConfigGuestConsole(LibvirtConfigGuestChar):
def __init__(self, **kwargs):
super(LibvirtConfigGuestConsole, self).__init__(root_name="console",
**kwargs)
class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase):
def __init__(self, **kwargs):
super(LibvirtConfigGuestChannel, self).__init__(root_name="channel",
**kwargs)
self.target_type = "virtio"
self.target_name = None
def format_dom(self):
dev = super(LibvirtConfigGuestChannel, self).format_dom()
target = etree.Element("target", type=self.target_type)
if self.target_name is not None:
target.set("name", self.target_name)
dev.append(target)
return dev
class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog",
**kwargs)
self.model = 'i6300esb'
self.action = 'reset'
def format_dom(self):
dev = super(LibvirtConfigGuestWatchdog, self).format_dom()
dev.set('model', self.model)
dev.set('action', self.action)
return dev
class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
root_name="vcpupin",
**kwargs)
self.id = None
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
root.set("vcpu", str(self.id))
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__(
root_name="emulatorpin",
**kwargs)
self.cpuset = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom()
if self.cpuset is not None:
root.set("cpuset",
hardware.format_cpu_spec(self.cpuset))
return root
class LibvirtConfigGuestCPUTuneVCPUSched(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTuneVCPUSched, self).__init__(
root_name="vcpusched",
**kwargs)
self.vcpus = None
self.scheduler = None
self.priority = None
def format_dom(self):
root = super(LibvirtConfigGuestCPUTuneVCPUSched, self).format_dom()
if self.vcpus is not None:
root.set("vcpus",
hardware.format_cpu_spec(self.vcpus))
if self.scheduler is not None:
root.set("scheduler", self.scheduler)
if self.priority is not None:
root.set("priority", str(self.priority))
return root
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune",
**kwargs)
self.shares = None
self.quota = None
self.period = None
self.vcpupin = []
self.emulatorpin = None
self.vcpusched = []
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
if self.shares is not None:
root.append(self._text_node("shares", str(self.shares)))
if self.quota is not None:
root.append(self._text_node("quota", str(self.quota)))
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
if self.emulatorpin is not None:
root.append(self.emulatorpin.format_dom())
for vcpu in self.vcpupin:
root.append(vcpu.format_dom())
for sched in self.vcpusched:
root.append(sched.format_dom())
return root
class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBacking, self).__init__(
root_name="memoryBacking", **kwargs)
self.hugepages = []
self.sharedpages = True
self.locked = False
self.filesource = False
self.sharedaccess = False
self.allocateimmediate = False
self.discard = False
def format_dom(self):
root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
if self.hugepages:
hugepages = etree.Element("hugepages")
for item in self.hugepages:
hugepages.append(item.format_dom())
root.append(hugepages)
if not self.sharedpages:
root.append(etree.Element("nosharepages"))
if self.locked:
root.append(etree.Element("locked"))
if self.filesource:
root.append(etree.Element("source", type="file"))
if self.sharedaccess:
root.append(etree.Element("access", mode="shared"))
if self.allocateimmediate:
root.append(etree.Element("allocation", mode="immediate"))
if self.discard:
root.append(etree.Element("discard"))
return root
class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryBackingPage, self).__init__(
root_name="page", **kwargs)
self.size_kb = None
self.nodeset = None
def format_dom(self):
page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom()
page.set("size", str(self.size_kb))
page.set("nodeset", hardware.format_cpu_spec(self.nodeset))
page.set("unit", "KiB")
return page
class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestMemoryTune, self).__init__(
root_name="memtune", **kwargs)
self.hard_limit = None
self.soft_limit = None
self.swap_hard_limit = None
self.min_guarantee = None
def format_dom(self):
root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
if self.hard_limit is not None:
root.append(self._text_node("hard_limit",
str(self.hard_limit),
unit="KiB"))
if self.soft_limit is not None:
root.append(self._text_node("soft_limit",
str(self.soft_limit),
unit="KiB"))
if self.swap_hard_limit is not None:
root.append(self._text_node("swap_hard_limit",
str(self.swap_hard_limit),
unit="KiB"))
if self.min_guarantee is not None:
root.append(self._text_node("min_guarantee",
str(self.min_guarantee),
unit="KiB"))
return root
class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemory, self).__init__(
root_name="memory", **kwargs)
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom()
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATuneMemNode, self).__init__(
root_name="memnode", **kwargs)
self.cellid = 0
self.mode = "strict"
self.nodeset = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom()
root.set("cellid", str(self.cellid))
root.set("mode", self.mode)
root.set("nodeset", hardware.format_cpu_spec(self.nodeset))
return root
class LibvirtConfigGuestNUMATune(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestNUMATune, self).__init__(
root_name="numatune", **kwargs)
self.memory = None
self.memnodes = []
def format_dom(self):
root = super(LibvirtConfigGuestNUMATune, self).format_dom()
if self.memory is not None:
root.append(self.memory.format_dom())
for node in self.memnodes:
root.append(node.format_dom())
return root
class LibvirtConfigGuestFeature(LibvirtConfigObject):
def __init__(self, name, **kwargs):
super(LibvirtConfigGuestFeature, self).__init__(root_name=name,
**kwargs)
class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi",
**kwargs)
class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic",
**kwargs)
class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeaturePAE, self).__init__("pae",
**kwargs)
class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature):
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureKvmHidden, self).__init__("kvm",
**kwargs)
def format_dom(self):
root = super(LibvirtConfigGuestFeatureKvmHidden, self).format_dom()
root.append(etree.Element("hidden", state="on"))
return root
class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
def __init__(self, state, **kwargs):
super(LibvirtConfigGuestFeaturePMU, self).__init__("pmu", **kwargs)
# NOTE(sean-k-mooney): bool_from_string is needed to handle the raw
# flavor exta_sepc value. bool_from_string internally checks if the
# value is already a bool and returns it. As such it's safe to use
self.state = strutils.bool_from_string(state)
def format_dom(self):
root = super(LibvirtConfigGuestFeaturePMU, self).format_dom()
root.attrib['state'] = "on" if self.state else "off"
return root
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
MIN_SPINLOCK_RETRIES = 4095
SPOOFED_VENDOR_ID = "1234567890ab"
def __init__(self, **kwargs):
super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv",
**kwargs)
self.relaxed = False
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
def format_dom(self):
root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom()
if self.relaxed:
root.append(etree.Element("relaxed", state="on"))
if self.vapic:
root.append(etree.Element("vapic", state="on"))
if self.spinlocks:
root.append(etree.Element("spinlocks", state="on",
retries=str(self.spinlock_retries)))
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
return root
class LibvirtConfigGuestSEVLaunchSecurity(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSEVLaunchSecurity, self).__init__(
root_name='launchSecurity', **kwargs)
self.cbitpos = None
self.reduced_phys_bits = None
def format_dom(self):
root = super(LibvirtConfigGuestSEVLaunchSecurity, self).format_dom()
root.set('type', 'sev')
policy = etree.Element('policy')
policy.text = '0x0033' root.append(policy)
cbitpos = etree.Element('cbitpos')
cbitpos.text = str(self.cbitpos)
root.append(cbitpos)
reducedPhysBits = etree.Element('reducedPhysBits')
reducedPhysBits.text = str(self.reduced_phys_bits)
root.append(reducedPhysBits)
return root
class LibvirtConfigGuest(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuest, self).__init__(root_name="domain",
**kwargs)
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
self.max_memory_size = None
self.max_memory_slots = 0
self.membacking = None
self.memtune = None
self.numatune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
self.cputune = None
self.features = []
self.clock = None
self.sysinfo = None
self.os_type = None
self.os_loader = None
self.os_loader_type = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
self.os_init_env = {}
self.os_root = None
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
self.metadata = []
self.idmaps = []
self.perf_events = []
self.launch_security = None
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
if self.max_memory_size is not None:
max_memory = self._text_node("maxMemory", self.max_memory_size)
max_memory.set("slots", str(self.max_memory_slots))
root.append(max_memory)
if self.membacking is not None:
root.append(self.membacking.format_dom())
if self.memtune is not None:
root.append(self.memtune.format_dom())
if self.numatune is not None:
root.append(self.numatune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
root.append(vcpu)
else:
root.append(self._text_node("vcpu", self.vcpus))
if len(self.metadata) > 0:
metadata = etree.Element("metadata")
for m in self.metadata:
metadata.append(m.format_dom())
root.append(metadata)
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
if self.os_loader_type == "pflash":
loader = self._text_node("loader", self.os_loader)
loader.set("type", "pflash")
loader.set("readonly", "yes")
os.append(loader)
else:
os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
os.append(self._text_node("cmdline", self.os_cmdline))
if self.os_root is not None:
os.append(self._text_node("root", self.os_root))
if self.os_init_path is not None:
os.append(self._text_node("init", self.os_init_path))
for name, value in self.os_init_env.items():
initenv = self._text_node("initenv", value)
initenv.set("name", name)
os.append(initenv)
for boot_dev in self.os_boot_dev:
os.append(etree.Element("boot", dev=boot_dev))
if self.os_smbios is not None:
os.append(self.os_smbios.format_dom())
if self.os_bootmenu:
os.append(etree.Element("bootmenu", enable="yes"))
root.append(os)
def _format_features(self, root):
if len(self.features) > 0:
features = etree.Element("features")
for feat in self.features:
features.append(feat.format_dom())
root.append(features)
def _format_devices(self, root):
if len(self.devices) == 0:
return
devices = etree.Element("devices")
for dev in self.devices:
devices.append(dev.format_dom())
root.append(devices)
def _format_idmaps(self, root):
if len(self.idmaps) == 0:
return
idmaps = etree.Element("idmap")
for idmap in self.idmaps:
idmaps.append(idmap.format_dom())
root.append(idmaps)
def _format_perf_events(self, root):
if len(self.perf_events) == 0:
return
perfs = etree.Element("perf")
for pe in self.perf_events:
event = etree.Element("event", name=pe, enabled="yes")
perfs.append(event)
root.append(perfs)
def _format_sev(self, root):
if self.launch_security is not None:
root.append(self.launch_security.format_dom())
def format_dom(self):
root = super(LibvirtConfigGuest, self).format_dom()
root.set("type", self.virt_type)
self._format_basic_props(root)
if self.sysinfo is not None:
root.append(self.sysinfo.format_dom())
self._format_os(root)
self._format_features(root)
if self.cputune is not None:
root.append(self.cputune.format_dom())
if self.clock is not None:
root.append(self.clock.format_dom())
if self.cpu is not None:
root.append(self.cpu.format_dom())
self._format_devices(root)
self._format_idmaps(root)
self._format_perf_events(root)
self._format_sev(root)
return root
def _parse_basic_props(self, xmldoc):
if xmldoc.tag == 'uuid':
self.uuid = xmldoc.text
elif xmldoc.tag == 'name':
self.name = xmldoc.text
elif xmldoc.tag == 'memory':
self.memory = int(xmldoc.text)
elif xmldoc.tag == 'vcpu':
self.vcpus = int(xmldoc.text)
if xmldoc.get('cpuset') is not None:
self.cpuset = hardware.parse_cpu_spec(xmldoc.get('cpuset'))
def _parse_os(self, xmldoc):
for c in xmldoc:
if c.tag == 'type':
self.os_type = c.text
self.os_mach_type = c.get('machine')
elif c.tag == 'kernel':
self.os_kernel = c.text
elif c.tag == 'loader':
self.os_loader = c.text
if c.get('type') == 'pflash':
self.os_loader_type = 'pflash'
elif c.tag == 'initrd':
self.os_initrd = c.text
elif c.tag == 'cmdline':
self.os_cmdline = c.text
elif c.tag == 'root':
self.os_root = c.text
elif c.tag == 'init':
self.os_init_path = c.text
elif c.tag == 'boot':
self.os_boot_dev.append(c.get('dev'))
elif c.tag == 'bootmenu':
if c.get('enable') == 'yes':
self.os_bootmenu = True
elif c.tag == 'initenv':
self.os_init_env[c.get('name')] = c.text
def parse_dom(self, xmldoc):
self.virt_type = xmldoc.get('type')
for c in xmldoc:
if c.tag == 'devices':
for d in c:
if d.tag == 'disk':
obj = LibvirtConfigGuestDisk()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'filesystem':
obj = LibvirtConfigGuestFilesys()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'pci':
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'hostdev' and d.get('type') == 'mdev':
obj = LibvirtConfigGuestHostdevMDEV()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'interface':
obj = LibvirtConfigGuestInterface()
obj.parse_dom(d)
self.devices.append(obj)
elif d.tag == 'memory' and d.get('model') == 'nvdimm':
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
if idmap.tag == 'uid':
obj = LibvirtConfigGuestUIDMap()
elif idmap.tag == 'gid':
obj = LibvirtConfigGuestGIDMap()
if obj:
obj.parse_dom(idmap)
self.idmaps.append(obj)
elif c.tag == 'cpu':
obj = LibvirtConfigGuestCPU()
obj.parse_dom(c)
self.cpu = obj
elif c.tag == 'perf':
for p in c:
if p.get('enabled') and p.get('enabled') == 'yes':
self.add_perf_event(p.get('name'))
elif c.tag == 'os':
self._parse_os(c)
else:
self._parse_basic_props(c)
def add_device(self, dev):
self.devices.append(dev)
def add_perf_event(self, event):
self.perf_events.append(event)
def set_clock(self, clk):
self.clock = clk
class LibvirtConfigGuestSnapshot(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSnapshot, self).__init__(
root_name="domainsnapshot",
**kwargs)
self.name = None
self.disks = []
def format_dom(self):
ss = super(LibvirtConfigGuestSnapshot, self).format_dom()
if self.name:
ss.append(self._text_node("name", self.name))
disks = etree.Element('disks')
for disk in self.disks:
disks.append(disk.format_dom())
ss.append(disks)
return ss
def add_disk(self, disk):
self.disks.append(disk)
class LibvirtConfigNodeDevice(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevice, self).__init__(root_name="device",
**kwargs)
self.name = None
self.parent = None
self.pci_capability = None
self.mdev_information = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "name":
self.name = c.text
elif c.tag == "parent":
self.parent = c.text
elif c.tag == "capability" and c.get("type") in ['pci', 'net']:
pcicap = LibvirtConfigNodeDevicePciCap()
pcicap.parse_dom(c)
self.pci_capability = pcicap
elif c.tag == "capability" and c.get("type") in ['mdev']:
mdev_info = LibvirtConfigNodeDeviceMdevInformation()
mdev_info.parse_dom(c)
self.mdev_information = mdev_info
class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciCap, self).__init__(
root_name="capability", **kwargs)
self.domain = None
self.bus = None
self.slot = None
self.function = None
self.product = None
self.product_id = None
self.vendor = None
self.vendor_id = None
self.numa_node = None
self.fun_capability = []
self.mdev_capability = []
self.interface = None
self.address = None
self.link_state = None
self.features = []
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "domain":
self.domain = int(c.text)
elif c.tag == "slot":
self.slot = int(c.text)
elif c.tag == "bus":
self.bus = int(c.text)
elif c.tag == "function":
self.function = int(c.text)
elif c.tag == "product":
self.product = c.text
self.product_id = int(c.get('id'), 16)
elif c.tag == "vendor":
self.vendor = c.text
self.vendor_id = int(c.get('id'), 16)
elif c.tag == "numa":
self.numa_node = int(c.get('node'))
elif c.tag == "interface":
self.interface = c.text
elif c.tag == "address":
self.address = c.text
elif c.tag == "link":
self.link_state = c.get('state')
elif c.tag == "feature":
self.features.append(c.get('name'))
elif c.tag == "capability" and c.get('type') in \
('virt_functions', 'phys_function'):
funcap = LibvirtConfigNodeDevicePciSubFunctionCap()
funcap.parse_dom(c)
self.fun_capability.append(funcap)
elif c.tag == "capability" and c.get('type') in ('mdev_types',):
mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap()
mdevcap.parse_dom(c)
self.mdev_capability.append(mdevcap)
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.device_addrs = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc)
self.type = xmldoc.get("type")
for c in xmldoc:
if c.tag == "address":
self.device_addrs.append((int(c.get('domain'), 16),
int(c.get('bus'), 16),
int(c.get('slot'), 16),
int(c.get('function'), 16)))
class LibvirtConfigNodeDeviceMdevCapableSubFunctionCap(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap, self).__init__(
root_name="capability", **kwargs)
self.mdev_types = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
mdev_type = {'type': c.get('id')}
for e in c:
mdev_type[e.tag] = (int(e.text)
if e.tag == 'availableInstances'
else e.text)
self.mdev_types.append(mdev_type)
class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
def __init__(self, **kwargs):
super(LibvirtConfigNodeDeviceMdevInformation, self).__init__(
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
self).parse_dom(xmldoc)
for c in xmldoc:
if c.tag == "type":
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.device_model = 'virtio'
self.model = 'random'
self.backend = None
self.rate_period = None
self.rate_bytes = None
self.driver_iommu = False
@property
def uses_virtio(self):
return 'virtio' == self.device_model
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', self.device_model)
backend = etree.Element("backend")
backend.set("model", self.model)
backend.text = self.backend
if self.rate_period and self.rate_bytes:
rate = etree.Element("rate")
rate.set("period", str(self.rate_period))
rate.set("bytes", str(self.rate_bytes))
dev.append(rate)
dev.append(backend)
if self.driver_iommu:
dev.append(etree.Element('driver', iommu="on"))
return dev
class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaInstance,
self).__init__(root_name="instance",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.package = None
self.flavor = None
self.name = None
self.creationTime = None
self.owner = None
self.roottype = None
self.rootid = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
pkg = self._new_node("package")
pkg.set("version", self.package)
meta.append(pkg)
if self.name is not None:
meta.append(self._text_node("name", self.name))
if self.creationTime is not None:
timestr = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(self.creationTime))
meta.append(self._text_node("creationTime", timestr))
if self.flavor is not None:
meta.append(self.flavor.format_dom())
if self.owner is not None:
meta.append(self.owner.format_dom())
if self.roottype is not None and self.rootid is not None:
root = self._new_node("root")
root.set("type", self.roottype)
root.set("uuid", str(self.rootid))
meta.append(root)
return meta
class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaFlavor,
self).__init__(root_name="flavor",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.name = None
self.memory = None
self.disk = None
self.swap = None
self.ephemeral = None
self.vcpus = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
meta.set("name", self.name)
if self.memory is not None:
meta.append(self._text_node("memory", str(self.memory)))
if self.disk is not None:
meta.append(self._text_node("disk", str(self.disk)))
if self.swap is not None:
meta.append(self._text_node("swap", str(self.swap)))
if self.ephemeral is not None:
meta.append(self._text_node("ephemeral", str(self.ephemeral)))
if self.vcpus is not None:
meta.append(self._text_node("vcpus", str(self.vcpus)))
return meta
class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigGuestMetaNovaOwner,
self).__init__(root_name="owner",
ns_prefix="nova",
ns_uri=NOVA_NS)
self.userid = None
self.username = None
self.projectid = None
self.projectname = None
def format_dom(self):
meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
if self.userid is not None and self.username is not None:
user = self._text_node("user", self.username)
user.set("uuid", self.userid)
meta.append(user)
if self.projectid is not None and self.projectname is not None:
project = self._text_node("project", self.projectname)
project.set("uuid", self.projectid)
meta.append(project)
return meta
class LibvirtConfigSecret(LibvirtConfigObject):
def __init__(self):
super(LibvirtConfigSecret,
self).__init__(root_name="secret")
self.ephemeral = False
self.private = False
self.description = None
self.uuid = None
self.usage_type = None
self.usage_id = None
def get_yes_no_str(self, value):
if value:
return 'yes'
return 'no'
def format_dom(self):
root = super(LibvirtConfigSecret, self).format_dom()
root.set("ephemeral", self.get_yes_no_str(self.ephemeral))
root.set("private", self.get_yes_no_str(self.private))
if self.description is not None:
root.append(self._text_node("description", str(self.description)))
if self.uuid is not None:
root.append(self._text_node("uuid", str(self.uuid)))
usage = self._new_node("usage")
usage.set("type", self.usage_type)
if self.usage_type in ('ceph', 'vtpm'):
usage.append(self._text_node('name', str(self.usage_id)))
elif self.usage_type == 'iscsi':
usage.append(self._text_node('target', str(self.usage_id)))
elif self.usage_type == 'volume':
usage.append(self._text_node('volume', str(self.usage_id)))
root.append(usage)
return root
class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestVPMEM, self).__init__(
root_name="memory", **kwargs)
self.model = "nvdimm"
self.access = "shared"
self.source_path = kwargs.get("devpath", "")
self.align_size = kwargs.get("align_kb", 0)
self.pmem = True
self.target_size = kwargs.get("size_kb", 0)
self.target_node = 0
self.label_size = 2 * units.Ki
def format_dom(self):
memory = super(LibvirtConfigGuestVPMEM, self).format_dom()
memory.set("model", self.model)
memory.set("access", self.access)
source = etree.Element("source")
source.append(self._text_node("path", self.source_path))
source.append(self._text_node("alignsize", self.align_size))
if self.pmem is True:
source.append(etree.Element("pmem"))
target = etree.Element("target")
target.append(self._text_node("size", self.target_size))
target.append(self._text_node("node", self.target_node))
label = etree.Element("label")
label.append(self._text_node("size", self.label_size))
target.append(label)
memory.append(source)
memory.append(target)
return memory
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestVPMEM, self).parse_dom(xmldoc)
self.model = xmldoc.get("model")
self.access = xmldoc.get("access")
for c in list(xmldoc):
if c.tag == "source":
for sub in list(c):
if sub.tag == "path":
self.source_path = sub.text
if sub.tag == "alignsize":
self.align_size = sub.text
elif c.tag == "target":
for sub in list(c):
if sub.tag == "size":
self.target_size = sub.text
class LibvirtConfigGuestSound(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
super(LibvirtConfigGuestSound, self).__init__(root_name="sound",
**kwargs)
self.model = "ich6"
self.codec_type = "micro"
def format_dom(self):
dev = super(LibvirtConfigGuestSound, self).format_dom()
dev.set("model", self.model)
drv_codec = etree.Element("codec")
drv_codec.set("type", self.codec_type)
dev.append(drv_codec)
return dev
| true
| true
|
f7035baa2430208a8619aaa2de77421f4f732be8
| 4,889
|
py
|
Python
|
king_phisher/server/plugins.py
|
padfoot999/king-phisher
|
d09ca94900c32bc5b292685e0ec84b9ad093301f
|
[
"BSD-3-Clause"
] | 1
|
2016-10-20T14:58:09.000Z
|
2016-10-20T14:58:09.000Z
|
king_phisher/server/plugins.py
|
padfoot999/king-phisher
|
d09ca94900c32bc5b292685e0ec84b9ad093301f
|
[
"BSD-3-Clause"
] | null | null | null |
king_phisher/server/plugins.py
|
padfoot999/king-phisher
|
d09ca94900c32bc5b292685e0ec84b9ad093301f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-17T21:33:24.000Z
|
2019-12-17T21:33:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/plugins.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
from king_phisher import errors
from king_phisher import find
from king_phisher import plugins
class ServerPlugin(plugins.PluginBase):
"""
The base object to be inherited by plugins that are loaded into the King
Phisher server. This provides a convenient interface for interacting with
the runtime.
"""
_logging_prefix = 'KingPhisher.Plugins.Server.'
def __init__(self, root_config):
self.root_config = root_config
"""A reference to the main server instance :py:attr:`~king_phisher.server.server.KingPhisherServer.config`."""
self.server = None
"""A reference to the :py:class:`~king_phisher.server.server.KingPhisherServer` instance. Only available if the instance has been created."""
super(ServerPlugin, self).__init__()
for option in self.options:
if self.config[option.name] is None:
raise errors.KingPhisherPluginError(self.name, 'missing required option: ' + option.name)
@property
def config(self):
"""
A dictionary that can be used by this plugin to access it's
configuration. Any changes to this configuration will be lost with the
server restarts.
"""
config = self.root_config.get('server.plugins').get(self.name)
if config is None:
config = {}
self.root_config.get('server.plugins')[self.name] = config
return config
class ServerPluginManager(plugins.PluginManagerBase):
"""
The manager for plugins loaded into the King Phisher server application.
"""
_plugin_klass = ServerPlugin
def __init__(self, config):
self.config = config
path = self._get_path()
self._server = None
super(ServerPluginManager, self).__init__(path, (config,))
for plugin in config.get_if_exists('server.plugins', {}).keys():
# load the plugin
try:
self.load(plugin)
except Exception:
self.logger.critical('failed to load plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to load')
# check compatibility
klass = self[plugin]
for req_type, req_value, req_met in klass.compatibility:
req_type = req_type.lower()
if req_met:
self.logger.debug("plugin {0} requirement {1} ({2}) met".format(plugin, req_type, req_value))
continue
self.logger.warning("plugin {0} unmet requirement {1} ({2})".format(plugin, req_type, req_value))
raise errors.KingPhisherPluginError(plugin, 'failed to meet requirement: ' + req_type)
# enable the plugin
try:
self.enable(plugin)
except errors.KingPhisherPluginError as error:
raise error
except Exception:
self.logger.critical('failed to enable plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to enable')
def _get_path(self):
path = [find.find_data_directory('plugins')]
extra_dirs = self.config.get_if_exists('server.plugin_directories', [])
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
elif not isinstance(extra_dirs, list):
raise errors.KingPhisherInputValidationError('configuration setting server.plugin_directories must be a list')
for directory in extra_dirs:
if not os.path.isdir(directory):
continue
path.append(directory)
return path
@property
def server(self):
return self._server
@server.setter
def server(self, value):
self._server = value
for _, plugin in self:
plugin.server = value
| 38.801587
| 143
| 0.746574
|
import os
from king_phisher import errors
from king_phisher import find
from king_phisher import plugins
class ServerPlugin(plugins.PluginBase):
_logging_prefix = 'KingPhisher.Plugins.Server.'
def __init__(self, root_config):
self.root_config = root_config
self.server = None
super(ServerPlugin, self).__init__()
for option in self.options:
if self.config[option.name] is None:
raise errors.KingPhisherPluginError(self.name, 'missing required option: ' + option.name)
@property
def config(self):
config = self.root_config.get('server.plugins').get(self.name)
if config is None:
config = {}
self.root_config.get('server.plugins')[self.name] = config
return config
class ServerPluginManager(plugins.PluginManagerBase):
_plugin_klass = ServerPlugin
def __init__(self, config):
self.config = config
path = self._get_path()
self._server = None
super(ServerPluginManager, self).__init__(path, (config,))
for plugin in config.get_if_exists('server.plugins', {}).keys():
try:
self.load(plugin)
except Exception:
self.logger.critical('failed to load plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to load')
klass = self[plugin]
for req_type, req_value, req_met in klass.compatibility:
req_type = req_type.lower()
if req_met:
self.logger.debug("plugin {0} requirement {1} ({2}) met".format(plugin, req_type, req_value))
continue
self.logger.warning("plugin {0} unmet requirement {1} ({2})".format(plugin, req_type, req_value))
raise errors.KingPhisherPluginError(plugin, 'failed to meet requirement: ' + req_type)
try:
self.enable(plugin)
except errors.KingPhisherPluginError as error:
raise error
except Exception:
self.logger.critical('failed to enable plugin: ' + plugin, exc_info=True)
raise errors.KingPhisherPluginError(plugin, 'failed to enable')
def _get_path(self):
path = [find.find_data_directory('plugins')]
extra_dirs = self.config.get_if_exists('server.plugin_directories', [])
if isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
elif not isinstance(extra_dirs, list):
raise errors.KingPhisherInputValidationError('configuration setting server.plugin_directories must be a list')
for directory in extra_dirs:
if not os.path.isdir(directory):
continue
path.append(directory)
return path
@property
def server(self):
return self._server
@server.setter
def server(self, value):
self._server = value
for _, plugin in self:
plugin.server = value
| true
| true
|
f7035bae9ef133b15cd82ef673ee423ea88deb57
| 1,155
|
py
|
Python
|
plasma/child_chain/create_initial_make_orders.py
|
kevjue/plasma-mvp
|
510934766a8d9b28969658216566a74b2d450384
|
[
"Apache-2.0"
] | 1
|
2018-07-24T14:19:15.000Z
|
2018-07-24T14:19:15.000Z
|
plasma/child_chain/create_initial_make_orders.py
|
kevjue/plasma-dex
|
510934766a8d9b28969658216566a74b2d450384
|
[
"Apache-2.0"
] | null | null | null |
plasma/child_chain/create_initial_make_orders.py
|
kevjue/plasma-dex
|
510934766a8d9b28969658216566a74b2d450384
|
[
"Apache-2.0"
] | null | null | null |
import click
import sys
from web3 import Web3
from plasma.client.client import Client
from plasma.utils import utils
@click.command()
@click.option('--token_address', help="The ethereum address of the pdex token smart contract", required=True)
@click.option('--root_chain_address', help="The ethereum address of the root chain smart contract", required=True)
def main(token_address, root_chain_address):
client = Client(root_chain_address)
maker_address = '0x0af467F2f6c20e3543B8a2a453e70DF034714aEB'
make_order_hex = client.get_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'))
if make_order_hex == None:
print("No valid utxos to create make order txn")
sys.exit(0)
make_order_hash = utils.hashPersonalMessage(make_order_hex)
signature = utils.sign(make_order_hash, bytes(bytearray.fromhex('46155f862a2249f0ee6d69122ead4ec56cf12a71049a3105a90b9708d7103f77')))
client.submit_signed_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'), make_order_hex, signature.hex())
if __name__ == '__main__':
main()
| 42.777778
| 150
| 0.758442
|
import click
import sys
from web3 import Web3
from plasma.client.client import Client
from plasma.utils import utils
@click.command()
@click.option('--token_address', help="The ethereum address of the pdex token smart contract", required=True)
@click.option('--root_chain_address', help="The ethereum address of the root chain smart contract", required=True)
def main(token_address, root_chain_address):
client = Client(root_chain_address)
maker_address = '0x0af467F2f6c20e3543B8a2a453e70DF034714aEB'
make_order_hex = client.get_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'))
if make_order_hex == None:
print("No valid utxos to create make order txn")
sys.exit(0)
make_order_hash = utils.hashPersonalMessage(make_order_hex)
signature = utils.sign(make_order_hash, bytes(bytearray.fromhex('46155f862a2249f0ee6d69122ead4ec56cf12a71049a3105a90b9708d7103f77')))
client.submit_signed_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'), make_order_hex, signature.hex())
if __name__ == '__main__':
main()
| true
| true
|
f7035c581a4ed9b8309dd60d8abb56daf29c23f9
| 1,763
|
py
|
Python
|
integreat_cms/cms/models/feedback/search_result_feedback.py
|
Carlosbogo/integreat-cms
|
066f188b138e105e72f5420bc36d25709f25402d
|
[
"Apache-2.0"
] | 1
|
2022-01-16T01:15:21.000Z
|
2022-01-16T01:15:21.000Z
|
integreat_cms/cms/models/feedback/search_result_feedback.py
|
Carlosbogo/integreat-cms
|
066f188b138e105e72f5420bc36d25709f25402d
|
[
"Apache-2.0"
] | null | null | null |
integreat_cms/cms/models/feedback/search_result_feedback.py
|
Carlosbogo/integreat-cms
|
066f188b138e105e72f5420bc36d25709f25402d
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .feedback import Feedback
class SearchResultFeedback(Feedback):
"""
Database model representing feedback about search results (e.g. empty results).
"""
search_query = models.CharField(max_length=1000, verbose_name=_("search term"))
@property
def object_name(self):
"""
This property returns the name of the object this feedback comments on.
:return: The name of the object this feedback refers to
:rtype: str
"""
return _("Search results for {}").format(self.search_query)
@property
def object_url(self):
"""
This property returns the url to the object this feedback comments on.
:return: The url to the referred object
:rtype: str
"""
return ""
@property
def related_feedback(self):
"""
This property returns all feedback entries which relate to the same object and have the same is_technical value.
:return: The queryset of related feedback
:rtype: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.feedback.search_result_feedback.SearchResultFeedback ]
"""
return SearchResultFeedback.objects.filter(
region=self.region,
language=self.language,
search_query=self.search_query,
is_technical=self.is_technical,
)
class Meta:
#: The verbose name of the model
verbose_name = _("search result feedback")
#: The plural verbose name of the model
verbose_name_plural = _("search result feedback")
#: The default permissions for this model
default_permissions = ()
| 31.482143
| 131
| 0.653432
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .feedback import Feedback
class SearchResultFeedback(Feedback):
search_query = models.CharField(max_length=1000, verbose_name=_("search term"))
@property
def object_name(self):
return _("Search results for {}").format(self.search_query)
@property
def object_url(self):
return ""
@property
def related_feedback(self):
return SearchResultFeedback.objects.filter(
region=self.region,
language=self.language,
search_query=self.search_query,
is_technical=self.is_technical,
)
class Meta:
verbose_name = _("search result feedback")
verbose_name_plural = _("search result feedback")
default_permissions = ()
| true
| true
|
f7035c68a7fc1602faabfc647ce3667f17f3a9f2
| 8,429
|
py
|
Python
|
src/pca/todoJunto.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | null | null | null |
src/pca/todoJunto.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | 2
|
2021-05-11T16:00:55.000Z
|
2021-08-23T20:45:22.000Z
|
src/pca/todoJunto.py
|
123972/PCA-nutricion
|
aff3c51a71c887c3fa367dbf9d599be5915c80cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import codecs
import sys
import sklearn as sk
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf
from src.pca.metodo_potencia_deflation import power_iteration
from src.pca.metodo_potencia_deflation import power_deflation
def PCA_from_sklearn(X):
"""
componentes_principales(X): Función que devuelve las componentes principales.
Parámetros
----------
n_components: número de componentes.
svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’}
Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al
solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing.
Atributos
---------
varianza_explicada: porcentaje de varianza explicada por cada componente.
valores_singulares: valores singulares correspondientes a cada componente.
pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos.
eigenvalues: son los valores propios utilizando la matriz de covarianza.
Método
---------
fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos.
"""
X = pd.DataFrame(X)
n_components = len(X.columns)
pca_1 = PCA(n_components, svd_solver='full')
componentesprincipales_1 = pca_1.fit_transform(X)
pca_1.components_
var_exp = pca_1.explained_variance_ratio_
##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%.
var_acumulada = var_exp.cumsum()
conteo = (var_acumulada) < 0.8
n_componentes = conteo.sum() + 1
pca = PCA(n_componentes, svd_solver='full')
componentesprincipales = pca.fit_transform(X)
pca.components_
varianza_explicada = pca.explained_variance_ratio_
eigenvalues = pca.explained_variance_
val_sing = pca.singular_values_
return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues
def PCA_from_SVD(A):
"""
Función para PCA a partir de la SVD de numpy
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Calcular SVD
U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_SVD_jacobi(A):
"""
Función para PCA a partir de la SVD
params: A matriz de datos
num_componentes número de componentes deseados
return: valores_singulares Los valores singulares de la descomposición SVD
componentes Los coeficientes para calcular los componentes principales
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
"""
# Centrar los datos
A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame
A_centered = A - A.mean(axis=0)
# Modificar esta línea de código, mandar a llamar la función creada por el equipo
# Calcular SVD
U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)
# Los valores singulares
valores_singulares = S
# Los componentes (coeficientes)
componentes = ((Vt))
# Los datos transformados (componentes principales)
Z = A_centered@np.transpose(Vt)
# La varianza explicada
varianza_explicada = S**2/np.sum(S**2)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = A.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]
def PCA_from_QR_vf(data,niter = 450):
"""
Función para PCA a partir de los eigenvectores
params: data: matriz de datos
niter: número de iteraciones máximas
return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas)
Z Los datos transformados (componentes principales)
varianza_explicada La varianza explicada por cada componente principal
Depende de la función: eigenvectores_QR
"""
# convertir a array
A = np.array(data)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
# Matriz de Covarianzas
#C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1)
C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)
# Calcular algoritmo QR
E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)
# Los componentes (coeficientes)
componentes = Q.T
# Los datos transformados (componentes principales)
# Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array
Z = datos_centrados@Q
# La varianza explicada
varianza_explicada = E/np.sum(E)
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 60%
n = data.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
# regresar 4 objetos
return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes
def PCA_from_potencia(X):
"""
Función que calcula PCA a partir del método de la potencia y deflation de Hotteling
params: A: matriz de datos
return: eigenvalues Numpy array con los eigenvectores de A
eigenvectors Numpy array con los correspondientes eigenvectores de A
"""
prop = 0 # Proporción de varianza explicada
comp = 1
cur_var = 0
comp_vecs = np.zeros([X.shape[1], X.shape[1]])
# convertir a array
A = np.array(X)
# Centrar los datos
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
#Calculamos la matriz de covarianzas
cov = np.dot(X.T, X)/X.shape[0]
#Aplicamos el método de la potencia
evalues_pow, evectors_pow = power_deflation(cov,2000)
# La varianza explicada
varianza_explicada = evalues_pow/np.sum(evalues_pow)
# Los datos transformados (componentes principales)
Z = datos_centrados@evectors_pow
# Calcula número de componentes de manera automatica de acuerdo a la variana explicada
# Threshold de 80%
n = X.shape[1] #numero de columnas
varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
| 35.267782
| 161
| 0.696643
|
import codecs
import sys
import sklearn as sk
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.decomposition import PCA
from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf
from src.pca.metodo_potencia_deflation import power_iteration
from src.pca.metodo_potencia_deflation import power_deflation
def PCA_from_sklearn(X):
X = pd.DataFrame(X)
n_components = len(X.columns)
pca_1 = PCA(n_components, svd_solver='full')
componentesprincipales_1 = pca_1.fit_transform(X)
pca_1.components_
var_exp = pca_1.explained_variance_ratio_
var_acumulada = var_exp.cumsum()
conteo = (var_acumulada) < 0.8
n_componentes = conteo.sum() + 1
pca = PCA(n_componentes, svd_solver='full')
componentesprincipales = pca.fit_transform(X)
pca.components_
varianza_explicada = pca.explained_variance_ratio_
eigenvalues = pca.explained_variance_
val_sing = pca.singular_values_
return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues
def PCA_from_SVD(A):
A = np.array(A) A_centered = A - A.mean(axis=0)
U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)
valores_singulares = S
componentes = ((Vt))
Z = A_centered@np.transpose(Vt)
varianza_explicada = S**2/np.sum(S**2)
n = A.shape[1] varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_SVD_jacobi(A):
A = np.array(A) A_centered = A - A.mean(axis=0)
U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)
valores_singulares = S
componentes = ((Vt))
Z = A_centered@np.transpose(Vt)
varianza_explicada = S**2/np.sum(S**2)
n = A.shape[1] varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]
def PCA_from_QR_vf(data,niter = 450):
A = np.array(data)
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)
E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)
componentes = Q.T
Z = datos_centrados@Q
varianza_explicada = E/np.sum(E)
n = data.shape[1] varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
def PCA_from_potencia(X):
prop = 0 comp = 1
cur_var = 0
comp_vecs = np.zeros([X.shape[1], X.shape[1]])
A = np.array(X)
mean_vec = np.mean(A, axis=0)
datos_centrados = (A - mean_vec)
cov = np.dot(X.T, X)/X.shape[0]
evalues_pow, evectors_pow = power_deflation(cov,2000)
varianza_explicada = evalues_pow/np.sum(evalues_pow)
Z = datos_centrados@evectors_pow
n = X.shape[1] varianza_acumulada = varianza_explicada.cumsum()
conteo = (varianza_acumulada) < 0.8
num_componentes = conteo.sum() + 1
return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
| true
| true
|
f7035c8f97025b538d6a14199b2f82b3876c816c
| 12,847
|
py
|
Python
|
pythonProject/resturant_management/resturant.py
|
slowy07/pythonApps
|
22f9766291dbccd8185035745950c5ee4ebd6a3e
|
[
"MIT"
] | 10
|
2020-10-09T11:05:18.000Z
|
2022-02-13T03:22:10.000Z
|
pythonProject/resturant_management/resturant.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | null | null | null |
pythonProject/resturant_management/resturant.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | 6
|
2020-11-26T12:49:43.000Z
|
2022-03-06T06:46:43.000Z
|
from tkinter import*
import random
import time
root = Tk()
root.geometry("1600x700+0+0")
root.title("Restaurant Management System")
Tops = Frame(root,bg="white",width = 1600,height=50,relief=SUNKEN)
Tops.pack(side=TOP)
f1 = Frame(root,width = 900,height=700,relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root ,width = 400,height=700,relief=SUNKEN)
f2.pack(side=RIGHT)
#------------------TIME--------------
localtime=time.asctime(time.localtime(time.time()))
#-----------------INFO TOP------------
lblinfo = Label(Tops, font=( 'aria' ,30, 'bold' ),text="Restaurant Management System",fg="steel blue",bd=10,anchor='w')
lblinfo.grid(row=0,column=0)
lblinfo = Label(Tops, font=( 'aria' ,20, ),text=localtime,fg="steel blue",anchor=W)
lblinfo.grid(row=1,column=0)
#---------------Calculator------------------
text_Input=StringVar()
operator =""
txtdisplay = Entry(f2,font=('ariel' ,20,'bold'), textvariable=text_Input , bd=5 ,insertwidth=7 ,bg="white",justify='right')
txtdisplay.grid(columnspan=4)
def btnclick(numbers):
global operator
operator=operator + str(numbers)
text_Input.set(operator)
def clrdisplay():
global operator
operator=""
text_Input.set("")
def eqals():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator = ""
def Ref():
x=random.randint(12980, 50876)
randomRef = str(x)
rand.set(randomRef)
cof =float(Fries.get())
colfries= float(Largefries.get())
cob= float(Burger.get())
cofi= float(Filet.get())
cochee= float(Cheese_burger.get())
codr= float(Drinks.get())
costoffries = cof*25
costoflargefries = colfries*40
costofburger = cob*35
costoffilet = cofi*50
costofcheeseburger = cochee*50
costofdrinks = codr*35
costofmeal = "Rp.",str('%.2f'% (costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks))
PayTax=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)*0.33)
Totalcost=(costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)
Ser_Charge=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)/99)
Service="Rp.",str('%.2f'% Ser_Charge)
OverAllCost="Rp.",str( PayTax + Totalcost + Ser_Charge)
PaidTax="Rp.",str('%.2f'% PayTax)
Service_Charge.set(Service)
cost.set(costofmeal)
Tax.set(PaidTax)
Subtotal.set(costofmeal)
Total.set(OverAllCost)
def qexit():
root.destroy()
def reset():
rand.set("")
Fries.set("")
Largefries.set("")
Burger.set("")
Filet.set("")
Subtotal.set("")
Total.set("")
Service_Charge.set("")
Drinks.set("")
Tax.set("")
cost.set("")
Cheese_burger.set("")
btn7=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="7",bg="powder blue", command=lambda: btnclick(7) )
btn7.grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="8",bg="powder blue", command=lambda: btnclick(8) )
btn8.grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="9",bg="powder blue", command=lambda: btnclick(9) )
btn9.grid(row=2,column=2)
Addition=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="+",bg="powder blue", command=lambda: btnclick("+") )
Addition.grid(row=2,column=3)
#---------------------------------------------------------------------------------------------
btn4=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="4",bg="powder blue", command=lambda: btnclick(4) )
btn4.grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="5",bg="powder blue", command=lambda: btnclick(5) )
btn5.grid(row=3,column=1)
btn6=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="6",bg="powder blue", command=lambda: btnclick(6) )
btn6.grid(row=3,column=2)
Substraction=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="-",bg="powder blue", command=lambda: btnclick("-") )
Substraction.grid(row=3,column=3)
#-----------------------------------------------------------------------------------------------
btn1=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="1",bg="powder blue", command=lambda: btnclick(1) )
btn1.grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="2",bg="powder blue", command=lambda: btnclick(2) )
btn2.grid(row=4,column=1)
btn3=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="3",bg="powder blue", command=lambda: btnclick(3) )
btn3.grid(row=4,column=2)
multiply=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="*",bg="powder blue", command=lambda: btnclick("*") )
multiply.grid(row=4,column=3)
#------------------------------------------------------------------------------------------------
btn0=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="0",bg="powder blue", command=lambda: btnclick(0) )
btn0.grid(row=5,column=0)
btnc=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="c",bg="powder blue", command=clrdisplay)
btnc.grid(row=5,column=1)
btnequal=Button(f2,padx=16,pady=16,bd=4,width = 16, fg="black", font=('ariel', 20 ,'bold'),text="=",bg="powder blue",command=eqals)
btnequal.grid(columnspan=4)
Decimal=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text=".",bg="powder blue", command=lambda: btnclick(".") )
Decimal.grid(row=5,column=2)
Division=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="/",bg="powder blue", command=lambda: btnclick("/") )
Division.grid(row=5,column=3)
status = Label(f2,font=('aria', 15, 'bold'),width = 16, text="clifter resturant",bd=2,relief=SUNKEN)
status.grid(row=7,columnspan=3)
#---------------------------------------------------------------------------------------
rand = StringVar()
Fries = StringVar()
Largefries = StringVar()
Burger = StringVar()
Filet = StringVar()
Subtotal = StringVar()
Total = StringVar()
Service_Charge = StringVar()
Drinks = StringVar()
Tax = StringVar()
cost = StringVar()
Cheese_burger = StringVar()
lblreference = Label(f1, font=( 'aria' ,16, 'bold' ),text="Order No.",fg="steel blue",bd=10,anchor='w')
lblreference.grid(row=0,column=0)
txtreference = Entry(f1,font=('ariel' ,16,'bold'), textvariable=rand , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtreference.grid(row=0,column=1)
lblfries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Fries Meal",fg="steel blue",bd=10,anchor='w')
lblfries.grid(row=1,column=0)
txtfries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Fries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtfries.grid(row=1,column=1)
lblLargefries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Lunch Meal",fg="steel blue",bd=10,anchor='w')
lblLargefries.grid(row=2,column=0)
txtLargefries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Largefries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtLargefries.grid(row=2,column=1)
lblburger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Burger Meal",fg="steel blue",bd=10,anchor='w')
lblburger.grid(row=3,column=0)
txtburger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtburger.grid(row=3,column=1)
lblFilet = Label(f1, font=( 'aria' ,16, 'bold' ),text="Pizza Meal",fg="steel blue",bd=10,anchor='w')
lblFilet.grid(row=4,column=0)
txtFilet = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Filet , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtFilet.grid(row=4,column=1)
lblCheese_burger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cheese burger",fg="steel blue",bd=10,anchor='w')
lblCheese_burger.grid(row=5,column=0)
txtCheese_burger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Cheese_burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtCheese_burger.grid(row=5,column=1)
#--------------------------------------------------------------------------------------
lblDrinks = Label(f1, font=( 'aria' ,16, 'bold' ),text="Drinks",fg="steel blue",bd=10,anchor='w')
lblDrinks.grid(row=0,column=2)
txtDrinks = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Drinks , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtDrinks.grid(row=0,column=3)
lblcost = Label(f1, font=( 'aria' ,16, 'bold' ),text="cost",fg="steel blue",bd=10,anchor='w')
lblcost.grid(row=1,column=2)
txtcost = Entry(f1,font=('ariel' ,16,'bold'), textvariable=cost , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtcost.grid(row=1,column=3)
lblService_Charge = Label(f1, font=( 'aria' ,16, 'bold' ),text="Service Charge",fg="steel blue",bd=10,anchor='w')
lblService_Charge.grid(row=2,column=2)
txtService_Charge = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Service_Charge , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtService_Charge.grid(row=2,column=3)
lblTax = Label(f1, font=( 'aria' ,16, 'bold' ),text="Tax",fg="steel blue",bd=10,anchor='w')
lblTax.grid(row=3,column=2)
txtTax = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Tax , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTax.grid(row=3,column=3)
lblSubtotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Subtotal",fg="steel blue",bd=10,anchor='w')
lblSubtotal.grid(row=4,column=2)
txtSubtotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Subtotal , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtSubtotal.grid(row=4,column=3)
lblTotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Total",fg="steel blue",bd=10,anchor='w')
lblTotal.grid(row=5,column=2)
txtTotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Total , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTotal.grid(row=5,column=3)
#-----------------------------------------buttons------------------------------------------
lblTotal = Label(f1,text="---------------------",fg="white")
lblTotal.grid(row=6,columnspan=3)
btnTotal=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="TOTAL", bg="powder blue",command=Ref)
btnTotal.grid(row=7, column=1)
btnreset=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="RESET", bg="powder blue",command=reset)
btnreset.grid(row=7, column=2)
btnexit=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="EXIT", bg="powder blue",command=qexit)
btnexit.grid(row=7, column=3)
def price():
roo = Tk()
roo.geometry("600x220+0+0")
roo.title("Price List")
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5)
lblinfo.grid(row=0, column=0)
lblinfo = Label(roo, font=('aria', 15,'bold'), text="_____________", fg="white", anchor=W)
lblinfo.grid(row=0, column=2)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="PRICE", fg="black", anchor=W)
lblinfo.grid(row=0, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Fries Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="25", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Lunch Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="40", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Burger Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Pizza Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="50", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Cheese Burger", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="30", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Drinks", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=3)
roo.mainloop()
btnprice=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="PRICE", bg="powder blue",command=price)
btnprice.grid(row=7, column=0)
root.mainloop()
| 45.077193
| 142
| 0.639838
|
from tkinter import*
import random
import time
root = Tk()
root.geometry("1600x700+0+0")
root.title("Restaurant Management System")
Tops = Frame(root,bg="white",width = 1600,height=50,relief=SUNKEN)
Tops.pack(side=TOP)
f1 = Frame(root,width = 900,height=700,relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root ,width = 400,height=700,relief=SUNKEN)
f2.pack(side=RIGHT)
localtime=time.asctime(time.localtime(time.time()))
lblinfo = Label(Tops, font=( 'aria' ,30, 'bold' ),text="Restaurant Management System",fg="steel blue",bd=10,anchor='w')
lblinfo.grid(row=0,column=0)
lblinfo = Label(Tops, font=( 'aria' ,20, ),text=localtime,fg="steel blue",anchor=W)
lblinfo.grid(row=1,column=0)
text_Input=StringVar()
operator =""
txtdisplay = Entry(f2,font=('ariel' ,20,'bold'), textvariable=text_Input , bd=5 ,insertwidth=7 ,bg="white",justify='right')
txtdisplay.grid(columnspan=4)
def btnclick(numbers):
global operator
operator=operator + str(numbers)
text_Input.set(operator)
def clrdisplay():
global operator
operator=""
text_Input.set("")
def eqals():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator = ""
def Ref():
x=random.randint(12980, 50876)
randomRef = str(x)
rand.set(randomRef)
cof =float(Fries.get())
colfries= float(Largefries.get())
cob= float(Burger.get())
cofi= float(Filet.get())
cochee= float(Cheese_burger.get())
codr= float(Drinks.get())
costoffries = cof*25
costoflargefries = colfries*40
costofburger = cob*35
costoffilet = cofi*50
costofcheeseburger = cochee*50
costofdrinks = codr*35
costofmeal = "Rp.",str('%.2f'% (costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks))
PayTax=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)*0.33)
Totalcost=(costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)
Ser_Charge=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)/99)
Service="Rp.",str('%.2f'% Ser_Charge)
OverAllCost="Rp.",str( PayTax + Totalcost + Ser_Charge)
PaidTax="Rp.",str('%.2f'% PayTax)
Service_Charge.set(Service)
cost.set(costofmeal)
Tax.set(PaidTax)
Subtotal.set(costofmeal)
Total.set(OverAllCost)
def qexit():
root.destroy()
def reset():
rand.set("")
Fries.set("")
Largefries.set("")
Burger.set("")
Filet.set("")
Subtotal.set("")
Total.set("")
Service_Charge.set("")
Drinks.set("")
Tax.set("")
cost.set("")
Cheese_burger.set("")
btn7=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="7",bg="powder blue", command=lambda: btnclick(7) )
btn7.grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="8",bg="powder blue", command=lambda: btnclick(8) )
btn8.grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="9",bg="powder blue", command=lambda: btnclick(9) )
btn9.grid(row=2,column=2)
Addition=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="+",bg="powder blue", command=lambda: btnclick("+") )
Addition.grid(row=2,column=3)
btn4=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="4",bg="powder blue", command=lambda: btnclick(4) )
btn4.grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="5",bg="powder blue", command=lambda: btnclick(5) )
btn5.grid(row=3,column=1)
btn6=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="6",bg="powder blue", command=lambda: btnclick(6) )
btn6.grid(row=3,column=2)
Substraction=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="-",bg="powder blue", command=lambda: btnclick("-") )
Substraction.grid(row=3,column=3)
btn1=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="1",bg="powder blue", command=lambda: btnclick(1) )
btn1.grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="2",bg="powder blue", command=lambda: btnclick(2) )
btn2.grid(row=4,column=1)
btn3=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="3",bg="powder blue", command=lambda: btnclick(3) )
btn3.grid(row=4,column=2)
multiply=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="*",bg="powder blue", command=lambda: btnclick("*") )
multiply.grid(row=4,column=3)
btn0=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="0",bg="powder blue", command=lambda: btnclick(0) )
btn0.grid(row=5,column=0)
btnc=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="c",bg="powder blue", command=clrdisplay)
btnc.grid(row=5,column=1)
btnequal=Button(f2,padx=16,pady=16,bd=4,width = 16, fg="black", font=('ariel', 20 ,'bold'),text="=",bg="powder blue",command=eqals)
btnequal.grid(columnspan=4)
Decimal=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text=".",bg="powder blue", command=lambda: btnclick(".") )
Decimal.grid(row=5,column=2)
Division=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="/",bg="powder blue", command=lambda: btnclick("/") )
Division.grid(row=5,column=3)
status = Label(f2,font=('aria', 15, 'bold'),width = 16, text="clifter resturant",bd=2,relief=SUNKEN)
status.grid(row=7,columnspan=3)
rand = StringVar()
Fries = StringVar()
Largefries = StringVar()
Burger = StringVar()
Filet = StringVar()
Subtotal = StringVar()
Total = StringVar()
Service_Charge = StringVar()
Drinks = StringVar()
Tax = StringVar()
cost = StringVar()
Cheese_burger = StringVar()
lblreference = Label(f1, font=( 'aria' ,16, 'bold' ),text="Order No.",fg="steel blue",bd=10,anchor='w')
lblreference.grid(row=0,column=0)
txtreference = Entry(f1,font=('ariel' ,16,'bold'), textvariable=rand , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtreference.grid(row=0,column=1)
lblfries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Fries Meal",fg="steel blue",bd=10,anchor='w')
lblfries.grid(row=1,column=0)
txtfries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Fries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtfries.grid(row=1,column=1)
lblLargefries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Lunch Meal",fg="steel blue",bd=10,anchor='w')
lblLargefries.grid(row=2,column=0)
txtLargefries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Largefries , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtLargefries.grid(row=2,column=1)
lblburger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Burger Meal",fg="steel blue",bd=10,anchor='w')
lblburger.grid(row=3,column=0)
txtburger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtburger.grid(row=3,column=1)
lblFilet = Label(f1, font=( 'aria' ,16, 'bold' ),text="Pizza Meal",fg="steel blue",bd=10,anchor='w')
lblFilet.grid(row=4,column=0)
txtFilet = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Filet , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtFilet.grid(row=4,column=1)
lblCheese_burger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cheese burger",fg="steel blue",bd=10,anchor='w')
lblCheese_burger.grid(row=5,column=0)
txtCheese_burger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Cheese_burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtCheese_burger.grid(row=5,column=1)
lblDrinks = Label(f1, font=( 'aria' ,16, 'bold' ),text="Drinks",fg="steel blue",bd=10,anchor='w')
lblDrinks.grid(row=0,column=2)
txtDrinks = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Drinks , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtDrinks.grid(row=0,column=3)
lblcost = Label(f1, font=( 'aria' ,16, 'bold' ),text="cost",fg="steel blue",bd=10,anchor='w')
lblcost.grid(row=1,column=2)
txtcost = Entry(f1,font=('ariel' ,16,'bold'), textvariable=cost , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtcost.grid(row=1,column=3)
lblService_Charge = Label(f1, font=( 'aria' ,16, 'bold' ),text="Service Charge",fg="steel blue",bd=10,anchor='w')
lblService_Charge.grid(row=2,column=2)
txtService_Charge = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Service_Charge , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtService_Charge.grid(row=2,column=3)
lblTax = Label(f1, font=( 'aria' ,16, 'bold' ),text="Tax",fg="steel blue",bd=10,anchor='w')
lblTax.grid(row=3,column=2)
txtTax = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Tax , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTax.grid(row=3,column=3)
lblSubtotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Subtotal",fg="steel blue",bd=10,anchor='w')
lblSubtotal.grid(row=4,column=2)
txtSubtotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Subtotal , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtSubtotal.grid(row=4,column=3)
lblTotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Total",fg="steel blue",bd=10,anchor='w')
lblTotal.grid(row=5,column=2)
txtTotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Total , bd=6,insertwidth=4,bg="powder blue" ,justify='right')
txtTotal.grid(row=5,column=3)
lblTotal = Label(f1,text="---------------------",fg="white")
lblTotal.grid(row=6,columnspan=3)
btnTotal=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="TOTAL", bg="powder blue",command=Ref)
btnTotal.grid(row=7, column=1)
btnreset=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="RESET", bg="powder blue",command=reset)
btnreset.grid(row=7, column=2)
btnexit=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="EXIT", bg="powder blue",command=qexit)
btnexit.grid(row=7, column=3)
def price():
roo = Tk()
roo.geometry("600x220+0+0")
roo.title("Price List")
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5)
lblinfo.grid(row=0, column=0)
lblinfo = Label(roo, font=('aria', 15,'bold'), text="_____________", fg="white", anchor=W)
lblinfo.grid(row=0, column=2)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="PRICE", fg="black", anchor=W)
lblinfo.grid(row=0, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Fries Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="25", fg="steel blue", anchor=W)
lblinfo.grid(row=1, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Lunch Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="40", fg="steel blue", anchor=W)
lblinfo.grid(row=2, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Burger Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=3, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Pizza Meal", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="50", fg="steel blue", anchor=W)
lblinfo.grid(row=4, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Cheese Burger", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="30", fg="steel blue", anchor=W)
lblinfo.grid(row=5, column=3)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Drinks", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=0)
lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W)
lblinfo.grid(row=6, column=3)
roo.mainloop()
btnprice=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="PRICE", bg="powder blue",command=price)
btnprice.grid(row=7, column=0)
root.mainloop()
| true
| true
|
f7035dd21c738a30b253fd556306a70e7a3eae5a
| 8,589
|
py
|
Python
|
graph_objs/layout/scene/xaxis/title/_font.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/layout/scene/xaxis/title/_font.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/layout/scene/xaxis/title/_font.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.scene.xaxis.title"
_path_str = "layout.scene.xaxis.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.new_plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`new_plotly.graph_objs.layout.scene.x
axis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.layout.scene.xaxis.title.Font
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.layout.scene.xaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.50655
| 84
| 0.567703
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.xaxis.title"
_path_str = "layout.scene.xaxis.title.font"
_valid_props = {"color", "family", "size"}
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.layout.scene.xaxis.title.Font
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.layout.scene.xaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| true
| true
|
f7035e5aadcd002fafb902e2e88b8bdb13c19cee
| 2,823
|
py
|
Python
|
qualifier/deploy/cohorte-home/repo/jsonrpclib/utils.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | 1
|
2017-03-04T14:37:15.000Z
|
2017-03-04T14:37:15.000Z
|
qualifier/deploy/cohorte-home/repo/jsonrpclib/utils.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | 4
|
2017-08-21T08:17:14.000Z
|
2018-03-02T13:51:43.000Z
|
qualifier/deploy/cohorte-home/repo/jsonrpclib/utils.py
|
isandlaTech/cohorte-devtools
|
9ba9021369188d2f0ad5c845ef242fd5a7097b57
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Utility methods, for compatibility between Python version
:author: Thomas Calmant
:copyright: Copyright 2015, isandlaTech
:license: Apache License 2.0
:version: 0.2.6
..
Copyright 2015 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 2, 6)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
import sys
# ------------------------------------------------------------------------------
if sys.version_info[0] < 3:
# Python 2
# pylint: disable=E1101
import types
try:
STRING_TYPES = (
types.StringType,
types.UnicodeType
)
except NameError:
# Python built without unicode support
STRING_TYPES = (types.StringType,)
NUMERIC_TYPES = (
types.IntType,
types.LongType,
types.FloatType
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
# pylint: disable=E0602
if type(string) is unicode:
return str(string)
return string
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data)
else:
# Python 3
# pylint: disable=E1101
STRING_TYPES = (
bytes,
str
)
NUMERIC_TYPES = (
int,
float
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8")
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data, "UTF-8")
# ------------------------------------------------------------------------------
# Common
DictType = dict
ListType = list
TupleType = tuple
ITERABLE_TYPES = (
list,
set, frozenset,
tuple
)
VALUE_TYPES = (
bool,
type(None)
)
PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES
| 22.404762
| 80
| 0.561814
|
__version_info__ = (0, 2, 6)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
import sys
if sys.version_info[0] < 3:
import types
try:
STRING_TYPES = (
types.StringType,
types.UnicodeType
)
except NameError:
STRING_TYPES = (types.StringType,)
NUMERIC_TYPES = (
types.IntType,
types.LongType,
types.FloatType
)
def to_bytes(string):
if type(string) is unicode:
return str(string)
return string
def from_bytes(data):
if type(data) is str:
return data
return str(data)
else:
STRING_TYPES = (
bytes,
str
)
NUMERIC_TYPES = (
int,
float
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8")
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data, "UTF-8")
DictType = dict
ListType = list
TupleType = tuple
ITERABLE_TYPES = (
list,
set, frozenset,
tuple
)
VALUE_TYPES = (
bool,
type(None)
)
PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES
| true
| true
|
f7035edc99f19ee433a47167a215f08d1d270643
| 10,949
|
py
|
Python
|
openstack_dashboard/test/api_tests/nova_rest_tests.py
|
Hodorable/0602
|
3b1e4cb7458e4f456bfebc52fc2902205c36cc15
|
[
"Apache-2.0"
] | 1
|
2017-12-07T05:21:58.000Z
|
2017-12-07T05:21:58.000Z
|
openstack_dashboard/test/api_tests/nova_rest_tests.py
|
Hodorable/0602
|
3b1e4cb7458e4f456bfebc52fc2902205c36cc15
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/test/api_tests/nova_rest_tests.py
|
Hodorable/0602
|
3b1e4cb7458e4f456bfebc52fc2902205c36cc15
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from django.conf import settings
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
class NovaRestTestCase(test.TestCase):
#
# Keypairs
#
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
'"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_extras(self, nc, get_extras=None):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request(GET={})
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content,
'{"items": [{"extras": {}, "id": "1"}, '
'{"extras": {}, "id": "2"}]}')
else:
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=None,
get_extras=get_extras)
def test_flavor_list_extras_no(self):
self._test_flavor_list_extras(get_extras=False)
def test_flavor_list_extras_yes(self):
self._test_flavor_list_extras(get_extras=True)
def test_flavor_list_extras_absent(self):
self._test_flavor_list_extras(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def test_flavor_extra_specs(self, nc):
request = self.mock_rest_request()
nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'}
response = nova.FlavorExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
| 40.106227
| 77
| 0.608823
|
import mock
from django.conf import settings
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
class NovaRestTestCase(test.TestCase):
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
'"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
@mock.patch.object(nova.api, 'nova')
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_extras(self, nc, get_extras=None):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request(GET={})
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content,
'{"items": [{"extras": {}, "id": "1"}, '
'{"extras": {}, "id": "2"}]}')
else:
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=None,
get_extras=get_extras)
def test_flavor_list_extras_no(self):
self._test_flavor_list_extras(get_extras=False)
def test_flavor_list_extras_yes(self):
self._test_flavor_list_extras(get_extras=True)
def test_flavor_list_extras_absent(self):
self._test_flavor_list_extras(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def test_flavor_extra_specs(self, nc):
request = self.mock_rest_request()
nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'}
response = nova.FlavorExtraSpecs().get(request, "1")
self.assertStatusCode(response, 200)
nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
| true
| true
|
f7035f37e23c97b58921b48200d5a78a5a0db7b7
| 1,270
|
py
|
Python
|
GatewayPlacement.py
|
vindem/eeplacement
|
11d6da56156ba29d8b2eaea657ed3df37110f666
|
[
"MIT"
] | null | null | null |
GatewayPlacement.py
|
vindem/eeplacement
|
11d6da56156ba29d8b2eaea657ed3df37110f666
|
[
"MIT"
] | null | null | null |
GatewayPlacement.py
|
vindem/eeplacement
|
11d6da56156ba29d8b2eaea657ed3df37110f666
|
[
"MIT"
] | null | null | null |
import IoTSensor
import LORAGateway
class GatewayPlacement:
def __init__(self, sensor_list):
self._sensor_list = sensor_list
self._gateway_list = []
def add_gateway(self, gateway):
self._gateway_list.append(gateway)
def remove_gateway(self, gateway):
self._gateway_list.remove(gateway)
def sensors_covered(self):
curr_placement_coverage = []
for g in self._gateway_list:
curr_gateway_coverage = g.get_coverage(self._sensor_list)
for s in curr_gateway_coverage:
if not s.get_id() in curr_placement_coverage:
curr_placement_coverage.append(s.get_id())
covers = True
for s in self._sensor_list:
if not s.get_id() in curr_placement_coverage:
covers = False
break
return covers
def energy_consumption(self, time):
energy = 0.0
for s in self._sensor_list:
energy = energy + s.get_total_consumption(time, s.get_closest_gateway(self._gateway_list))
for g in self._gateway_list:
energy = energy + g.get_energy_consumption(time)
return energy
def get_gateways_number(self):
return len(self._gateway_list)
| 28.863636
| 102
| 0.638583
|
import IoTSensor
import LORAGateway
class GatewayPlacement:
def __init__(self, sensor_list):
self._sensor_list = sensor_list
self._gateway_list = []
def add_gateway(self, gateway):
self._gateway_list.append(gateway)
def remove_gateway(self, gateway):
self._gateway_list.remove(gateway)
def sensors_covered(self):
curr_placement_coverage = []
for g in self._gateway_list:
curr_gateway_coverage = g.get_coverage(self._sensor_list)
for s in curr_gateway_coverage:
if not s.get_id() in curr_placement_coverage:
curr_placement_coverage.append(s.get_id())
covers = True
for s in self._sensor_list:
if not s.get_id() in curr_placement_coverage:
covers = False
break
return covers
def energy_consumption(self, time):
energy = 0.0
for s in self._sensor_list:
energy = energy + s.get_total_consumption(time, s.get_closest_gateway(self._gateway_list))
for g in self._gateway_list:
energy = energy + g.get_energy_consumption(time)
return energy
def get_gateways_number(self):
return len(self._gateway_list)
| true
| true
|
f7035f4a31b4a9cb5b6bc7dac024872f9c49f775
| 7,873
|
py
|
Python
|
tcfl/target_ext_tunnel.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | null | null | null |
tcfl/target_ext_tunnel.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | null | null | null |
tcfl/target_ext_tunnel.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Create and remove network tunnels to the target via the server
--------------------------------------------------------------
"""
from . import tc
from . import ttb_client
class tunnel(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to create IP tunnels to
targets with IP connectivity.
Use by indicating a default IP address to use for interconnect
*ic* or explicitly indicating it in the :meth:`add` function:
>>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
>>> target.tunnel.add(PORT)
>>> target.tunnel.remove(PORT)
>>> target.tunnel.list()
Note that for tunnels to work, the target has to be acquired and
IP has to be up on it, which might requires it to be connected to
some IP network (it can be a TCF interconnect or any other
network).
"""
def __init__(self, target):
self.target = target
# Tunnels can always be added, even the target is not in an
# interconnect
self.ip_addr = None
def _ip_addr_get(self, ip_addr):
# FIXME: this shall validate the IP address using python-ipaddress
if ip_addr:
return ip_addr
if self.ip_addr:
return self.ip_addr
ip_addr = self.target.rt.get(
'ipv4_addr', self.target.rt.get('ipv6_addr', None))
if ip_addr:
return ip_addr
raise RuntimeError(
"Cannot identify any IPv4 or IPv6 address to use; "
"please set it in "
"`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` "
"or pass it explicitly")
def add(self, port, ip_addr = None, proto = None):
"""
Setup a TCP/UDP/SCTP v4 or v5 tunnel to the target
A local port of the given protocol in the server is fowarded
to the target's port. Teardown with :meth:`remove`.
If the tunnel already exists, it is not recreated, but the
port it uses is returned.
Redirects targets TCP4 port 3000 to server_port in the server
that provides ``target`` (target.kws['server']).
>>> server_name = target.rtb.parsed_url.hostname
>>> server_port = target.tunnel.add(3000)
Now connecting to ``server_name:server_port`` takes you to the
target's port 3000.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
:returns int local_port: port in the server where to connect
to in order to access the target.
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
target = self.target
ip_addr = self._ip_addr_get(ip_addr)
r = target.rtb.rest_tb_target_ip_tunnel_add(
target.rt, ip_addr, port, proto, ticket = target.ticket)
self.target.report_info("%s tunnel added from %s:%d to %s:%d"
% (proto, target.rtb.parsed_url.hostname, r,
ip_addr, port))
return r
def remove(self, port, ip_addr = None, proto = None):
"""
Teardown a TCP/UDP/SCTP v4 or v5 tunnel to the target
previously created with :meth:`add`.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
"""
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
ip_addr = self._ip_addr_get(ip_addr)
target = self.target
target.rtb.rest_tb_target_ip_tunnel_remove(
target.rt, ip_addr, port, proto, ticket = target.ticket)
def list(self):
"""
List existing IP tunnels
:returns: list of tuples (protocol, target-ip-address, port,
port-in-server)
"""
target = self.target
return target.rtb.rest_tb_target_ip_tunnel_list(target.rt,
ticket = target.ticket)
# FIXME: work out tcf creating target_c instances, so it is easier to
# automate creating cmdline wrappers
def cmdline_tunnel_add(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
port = rtb.rest_tb_target_ip_tunnel_add(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
print("%s:%d" % (rtb.parsed_url.hostname, port))
def cmdline_tunnel_remove(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
rtb.rest_tb_target_ip_tunnel_remove(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
def cmdline_tunnel_list(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
tunnels = rtb.rest_tb_target_ip_tunnel_list(rt, ticket = args.ticket)
for tunnel in tunnels:
print("%s %s:%s %s:%s" % (tunnel[0],
rtb.parsed_url.hostname, tunnel[3],
tunnel[1], tunnel[2]))
def cmdline_setup(argsp):
ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None, type = str,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None, type = str,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_add)
ap = argsp.add_parser("tunnel-remove",
help = "remove an existing IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store",
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_remove)
ap = argsp.add_parser("tunnel-list", help = "List existing IP tunnels")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = cmdline_tunnel_list)
| 40.374359
| 79
| 0.579322
|
from . import tc
from . import ttb_client
class tunnel(tc.target_extension_c):
def __init__(self, target):
self.target = target
self.ip_addr = None
def _ip_addr_get(self, ip_addr):
if ip_addr:
return ip_addr
if self.ip_addr:
return self.ip_addr
ip_addr = self.target.rt.get(
'ipv4_addr', self.target.rt.get('ipv6_addr', None))
if ip_addr:
return ip_addr
raise RuntimeError(
"Cannot identify any IPv4 or IPv6 address to use; "
"please set it in "
"`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` "
"or pass it explicitly")
def add(self, port, ip_addr = None, proto = None):
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
target = self.target
ip_addr = self._ip_addr_get(ip_addr)
r = target.rtb.rest_tb_target_ip_tunnel_add(
target.rt, ip_addr, port, proto, ticket = target.ticket)
self.target.report_info("%s tunnel added from %s:%d to %s:%d"
% (proto, target.rtb.parsed_url.hostname, r,
ip_addr, port))
return r
def remove(self, port, ip_addr = None, proto = None):
if proto == None:
proto = 'tcp'
else:
assert isinstance(proto, str)
assert isinstance(port, int)
ip_addr = self._ip_addr_get(ip_addr)
target = self.target
target.rtb.rest_tb_target_ip_tunnel_remove(
target.rt, ip_addr, port, proto, ticket = target.ticket)
def list(self):
target = self.target
return target.rtb.rest_tb_target_ip_tunnel_list(target.rt,
ticket = target.ticket)
def cmdline_tunnel_add(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
port = rtb.rest_tb_target_ip_tunnel_add(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
print("%s:%d" % (rtb.parsed_url.hostname, port))
def cmdline_tunnel_remove(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
rtb.rest_tb_target_ip_tunnel_remove(rt, args.ip_addr,
args.port, args.protocol,
ticket = args.ticket)
def cmdline_tunnel_list(args):
rtb, rt = ttb_client._rest_target_find_by_id(args.target)
tunnels = rtb.rest_tb_target_ip_tunnel_list(rt, ticket = args.ticket)
for tunnel in tunnels:
print("%s %s:%s %s:%s" % (tunnel[0],
rtb.parsed_url.hostname, tunnel[3],
tunnel[1], tunnel[2]))
def cmdline_setup(argsp):
ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None, type = str,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None, type = str,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_add)
ap = argsp.add_parser("tunnel-remove",
help = "remove an existing IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store",
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = cmdline_tunnel_remove)
ap = argsp.add_parser("tunnel-list", help = "List existing IP tunnels")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = cmdline_tunnel_list)
| true
| true
|
f7035f91612f64318ed215c8124c2133a57e85cd
| 1,574
|
py
|
Python
|
11-Numpy Basic Statistics.py
|
mnabavi84/dcamp-intro-python
|
218b67106061d45cfa18a1b1d46487900f9aa539
|
[
"MIT"
] | null | null | null |
11-Numpy Basic Statistics.py
|
mnabavi84/dcamp-intro-python
|
218b67106061d45cfa18a1b1d46487900f9aa539
|
[
"MIT"
] | null | null | null |
11-Numpy Basic Statistics.py
|
mnabavi84/dcamp-intro-python
|
218b67106061d45cfa18a1b1d46487900f9aa539
|
[
"MIT"
] | null | null | null |
# np_baseball is available
# Import numpy
import numpy as np
# Create np_height_in from np_baseball
np_height_in = np_baseball[:,0]
# Print out the mean of np_height_in
print(np.mean(np_height_in))
# Print out the median of np_height_in
print(np.median(np_height_in))
# np_baseball is available
# Import numpy
import numpy as np
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0], np_baseball[:,1])
print("Correlation: " + str(corr))
# heights and positions are available as lists
# Import numpy
import numpy as np
# Convert positions and heights to numpy arrays: np_positions, np_heights
np_positions = np.array(positions)
np_heights = np.array(heights)
# Heights of the goalkeepers: gk_heights
gk_heights = np_heights[np_positions == 'GK']
# Heights of the other players: other_heights
other_heights = np_heights[np_positions != 'GK']
# Print out the median height of goalkeepers. Replace 'None'
print("Median height of goalkeepers: " + str(np.median(gk_heights)))
# Print out the median height of other players. Replace 'None'
print("Median height of other players: " + str(np.median(other_heights)))
| 27.137931
| 74
| 0.719187
|
import numpy as np
np_height_in = np_baseball[:,0]
print(np.mean(np_height_in))
print(np.median(np_height_in))
import numpy as np
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
corr = np.corrcoef(np_baseball[:,0], np_baseball[:,1])
print("Correlation: " + str(corr))
import numpy as np
np_positions = np.array(positions)
np_heights = np.array(heights)
gk_heights = np_heights[np_positions == 'GK']
other_heights = np_heights[np_positions != 'GK']
print("Median height of goalkeepers: " + str(np.median(gk_heights)))
print("Median height of other players: " + str(np.median(other_heights)))
| true
| true
|
f7035fb6f14b4a4c5b91bd1b9f6eb9aa52b46bd1
| 8,853
|
py
|
Python
|
InvenTree/InvenTree/urls.py
|
carlos-riquelme/InvenTree
|
724dd2a9c82e4c10e14bd6aba8f48553b183fef9
|
[
"MIT"
] | 5
|
2020-08-14T06:18:57.000Z
|
2022-01-11T01:22:56.000Z
|
InvenTree/InvenTree/urls.py
|
carlos-riquelme/InvenTree
|
724dd2a9c82e4c10e14bd6aba8f48553b183fef9
|
[
"MIT"
] | 27
|
2021-04-12T22:05:39.000Z
|
2022-03-13T20:33:54.000Z
|
InvenTree/InvenTree/urls.py
|
carlos-riquelme/InvenTree
|
724dd2a9c82e4c10e14bd6aba8f48553b183fef9
|
[
"MIT"
] | 1
|
2021-12-22T12:22:21.000Z
|
2021-12-22T12:22:21.000Z
|
"""
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.urls import path
from django.contrib import admin
from company.urls import company_urls
from company.urls import manufacturer_part_urls
from company.urls import supplier_part_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from plugin.urls import get_plugin_urls
from barcodes.api import barcode_api_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import order_api_urls
from label.api import label_api_urls
from report.api import report_api_urls
from plugin.api import plugin_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import auth_request
from .views import IndexView, SearchView, DatabaseStatsView
from .views import SettingsView, EditUserView, SetPasswordView, CustomEmailView, CustomConnectionsView, CustomPasswordResetFromKeyView
from .views import CustomSessionDeleteView, CustomSessionDeleteOtherView
from .views import CurrencyRefreshView
from .views import AppearanceSelectView, SettingCategorySelectView
from .views import DynamicJsView
from .api import InfoView, NotFoundView
from .api import ActionPluginView
from users.api import user_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^barcode/', include(barcode_api_urls)),
url(r'^settings/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^order/', include(order_api_urls)),
url(r'^label/', include(label_api_urls)),
url(r'^report/', include(report_api_urls)),
url(r'^plugin/', include(plugin_api_urls)),
# User URLs
url(r'^user/', include(user_urls)),
# Plugin endpoints
url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='api-inventree-info'),
# Unknown endpoint
url(r'^.*$', NotFoundView.as_view(), name='api-404'),
]
settings_urls = [
url(r'^i18n/?', include('django.conf.urls.i18n')),
url(r'^appearance/?', AppearanceSelectView.as_view(), name='settings-appearance'),
url(r'^currencies-refresh/', CurrencyRefreshView.as_view(), name='settings-currencies-refresh'),
url(r'^category/', SettingCategorySelectView.as_view(), name='settings-category'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/settings.html'), name='settings'),
]
# These javascript files are served "dynamically" - i.e. rendered on demand
dynamic_javascript_urls = [
url(r'^calendar.js', DynamicJsView.as_view(template_name='js/dynamic/calendar.js'), name='calendar.js'),
url(r'^nav.js', DynamicJsView.as_view(template_name='js/dynamic/nav.js'), name='nav.js'),
url(r'^settings.js', DynamicJsView.as_view(template_name='js/dynamic/settings.js'), name='settings.js'),
]
# These javascript files are pased through the Django translation layer
translated_javascript_urls = [
url(r'^api.js', DynamicJsView.as_view(template_name='js/translated/api.js'), name='api.js'),
url(r'^attachment.js', DynamicJsView.as_view(template_name='js/translated/attachment.js'), name='attachment.js'),
url(r'^barcode.js', DynamicJsView.as_view(template_name='js/translated/barcode.js'), name='barcode.js'),
url(r'^bom.js', DynamicJsView.as_view(template_name='js/translated/bom.js'), name='bom.js'),
url(r'^build.js', DynamicJsView.as_view(template_name='js/translated/build.js'), name='build.js'),
url(r'^company.js', DynamicJsView.as_view(template_name='js/translated/company.js'), name='company.js'),
url(r'^filters.js', DynamicJsView.as_view(template_name='js/translated/filters.js'), name='filters.js'),
url(r'^forms.js', DynamicJsView.as_view(template_name='js/translated/forms.js'), name='forms.js'),
url(r'^helpers.js', DynamicJsView.as_view(template_name='js/translated/helpers.js'), name='helpers.js'),
url(r'^label.js', DynamicJsView.as_view(template_name='js/translated/label.js'), name='label.js'),
url(r'^model_renderers.js', DynamicJsView.as_view(template_name='js/translated/model_renderers.js'), name='model_renderers.js'),
url(r'^modals.js', DynamicJsView.as_view(template_name='js/translated/modals.js'), name='modals.js'),
url(r'^order.js', DynamicJsView.as_view(template_name='js/translated/order.js'), name='order.js'),
url(r'^part.js', DynamicJsView.as_view(template_name='js/translated/part.js'), name='part.js'),
url(r'^report.js', DynamicJsView.as_view(template_name='js/translated/report.js'), name='report.js'),
url(r'^stock.js', DynamicJsView.as_view(template_name='js/translated/stock.js'), name='stock.js'),
url(r'^plugin.js', DynamicJsView.as_view(template_name='js/translated/plugin.js'), name='plugin.js'),
url(r'^tables.js', DynamicJsView.as_view(template_name='js/translated/tables.js'), name='tables.js'),
url(r'^table_filters.js', DynamicJsView.as_view(template_name='js/translated/table_filters.js'), name='table_filters.js'),
]
backendpatterns = [
# "Dynamic" javascript files which are rendered using InvenTree templating.
url(r'^js/dynamic/', include(dynamic_javascript_urls)),
url(r'^js/i18n/', include(translated_javascript_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auth/?', auth_request),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
# 3rd party endpoints
url(r'^markdownx/', include('markdownx.urls')),
]
frontendpatterns = [
url(r'^part/', include(part_urls)),
url(r'^manufacturer-part/', include(manufacturer_part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^stats/', DatabaseStatsView.as_view(), name='stats'),
# plugin urls
get_plugin_urls(), # appends currently loaded plugin urls = None
# admin sites
url(r'^admin/error_log/', include('error_report.urls')),
url(r'^admin/shell/', include('django_admin_shell.urls')),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
# DB user sessions
url(r'^accounts/sessions/other/delete/$', view=CustomSessionDeleteOtherView.as_view(), name='session_delete_other', ),
url(r'^accounts/sessions/(?P<pk>\w+)/delete/$', view=CustomSessionDeleteView.as_view(), name='session_delete', ),
# Single Sign On / allauth
# overrides of urlpatterns
url(r'^accounts/email/', CustomEmailView.as_view(), name='account_email'),
url(r'^accounts/social/connections/', CustomConnectionsView.as_view(), name='socialaccount_connections'),
url(r"^accounts/password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$", CustomPasswordResetFromKeyView.as_view(), name="account_reset_password_from_key"),
url(r'^accounts/', include('allauth_2fa.urls')), # MFA support
url(r'^accounts/', include('allauth.urls')), # included urlpatterns
]
urlpatterns = [
url('', include(frontendpatterns)),
url('', include(backendpatterns)),
]
# Server running in "DEBUG" mode?
if settings.DEBUG:
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Debug toolbar access (only allowed in DEBUG mode)
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug/', include(debug_toolbar.urls)),
] + urlpatterns
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| 43.185366
| 162
| 0.726872
|
from django.conf.urls import url, include
from django.urls import path
from django.contrib import admin
from company.urls import company_urls
from company.urls import manufacturer_part_urls
from company.urls import supplier_part_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from plugin.urls import get_plugin_urls
from barcodes.api import barcode_api_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import order_api_urls
from label.api import label_api_urls
from report.api import report_api_urls
from plugin.api import plugin_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import auth_request
from .views import IndexView, SearchView, DatabaseStatsView
from .views import SettingsView, EditUserView, SetPasswordView, CustomEmailView, CustomConnectionsView, CustomPasswordResetFromKeyView
from .views import CustomSessionDeleteView, CustomSessionDeleteOtherView
from .views import CurrencyRefreshView
from .views import AppearanceSelectView, SettingCategorySelectView
from .views import DynamicJsView
from .api import InfoView, NotFoundView
from .api import ActionPluginView
from users.api import user_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^barcode/', include(barcode_api_urls)),
url(r'^settings/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^order/', include(order_api_urls)),
url(r'^label/', include(label_api_urls)),
url(r'^report/', include(report_api_urls)),
url(r'^plugin/', include(plugin_api_urls)),
url(r'^user/', include(user_urls)),
url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'),
url(r'^$', InfoView.as_view(), name='api-inventree-info'),
url(r'^.*$', NotFoundView.as_view(), name='api-404'),
]
settings_urls = [
url(r'^i18n/?', include('django.conf.urls.i18n')),
url(r'^appearance/?', AppearanceSelectView.as_view(), name='settings-appearance'),
url(r'^currencies-refresh/', CurrencyRefreshView.as_view(), name='settings-currencies-refresh'),
url(r'^category/', SettingCategorySelectView.as_view(), name='settings-category'),
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/settings.html'), name='settings'),
]
dynamic_javascript_urls = [
url(r'^calendar.js', DynamicJsView.as_view(template_name='js/dynamic/calendar.js'), name='calendar.js'),
url(r'^nav.js', DynamicJsView.as_view(template_name='js/dynamic/nav.js'), name='nav.js'),
url(r'^settings.js', DynamicJsView.as_view(template_name='js/dynamic/settings.js'), name='settings.js'),
]
translated_javascript_urls = [
url(r'^api.js', DynamicJsView.as_view(template_name='js/translated/api.js'), name='api.js'),
url(r'^attachment.js', DynamicJsView.as_view(template_name='js/translated/attachment.js'), name='attachment.js'),
url(r'^barcode.js', DynamicJsView.as_view(template_name='js/translated/barcode.js'), name='barcode.js'),
url(r'^bom.js', DynamicJsView.as_view(template_name='js/translated/bom.js'), name='bom.js'),
url(r'^build.js', DynamicJsView.as_view(template_name='js/translated/build.js'), name='build.js'),
url(r'^company.js', DynamicJsView.as_view(template_name='js/translated/company.js'), name='company.js'),
url(r'^filters.js', DynamicJsView.as_view(template_name='js/translated/filters.js'), name='filters.js'),
url(r'^forms.js', DynamicJsView.as_view(template_name='js/translated/forms.js'), name='forms.js'),
url(r'^helpers.js', DynamicJsView.as_view(template_name='js/translated/helpers.js'), name='helpers.js'),
url(r'^label.js', DynamicJsView.as_view(template_name='js/translated/label.js'), name='label.js'),
url(r'^model_renderers.js', DynamicJsView.as_view(template_name='js/translated/model_renderers.js'), name='model_renderers.js'),
url(r'^modals.js', DynamicJsView.as_view(template_name='js/translated/modals.js'), name='modals.js'),
url(r'^order.js', DynamicJsView.as_view(template_name='js/translated/order.js'), name='order.js'),
url(r'^part.js', DynamicJsView.as_view(template_name='js/translated/part.js'), name='part.js'),
url(r'^report.js', DynamicJsView.as_view(template_name='js/translated/report.js'), name='report.js'),
url(r'^stock.js', DynamicJsView.as_view(template_name='js/translated/stock.js'), name='stock.js'),
url(r'^plugin.js', DynamicJsView.as_view(template_name='js/translated/plugin.js'), name='plugin.js'),
url(r'^tables.js', DynamicJsView.as_view(template_name='js/translated/tables.js'), name='tables.js'),
url(r'^table_filters.js', DynamicJsView.as_view(template_name='js/translated/table_filters.js'), name='table_filters.js'),
]
backendpatterns = [
url(r'^js/dynamic/', include(dynamic_javascript_urls)),
url(r'^js/i18n/', include(translated_javascript_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auth/?', auth_request),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
url(r'^markdownx/', include('markdownx.urls')),
]
frontendpatterns = [
url(r'^part/', include(part_urls)),
url(r'^manufacturer-part/', include(manufacturer_part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^stats/', DatabaseStatsView.as_view(), name='stats'),
get_plugin_urls(),
url(r'^admin/error_log/', include('error_report.urls')),
url(r'^admin/shell/', include('django_admin_shell.urls')),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
url(r'^accounts/sessions/other/delete/$', view=CustomSessionDeleteOtherView.as_view(), name='session_delete_other', ),
url(r'^accounts/sessions/(?P<pk>\w+)/delete/$', view=CustomSessionDeleteView.as_view(), name='session_delete', ),
url(r'^accounts/email/', CustomEmailView.as_view(), name='account_email'),
url(r'^accounts/social/connections/', CustomConnectionsView.as_view(), name='socialaccount_connections'),
url(r"^accounts/password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$", CustomPasswordResetFromKeyView.as_view(), name="account_reset_password_from_key"),
url(r'^accounts/', include('allauth_2fa.urls')), url(r'^accounts/', include('allauth.urls')), ]
urlpatterns = [
url('', include(frontendpatterns)),
url('', include(backendpatterns)),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| true
| true
|
f7035fc0d709e7e2bbf5842adca7c69a98bf0b3f
| 3,450
|
py
|
Python
|
experiments/mnist_simple/class_imbalance.py
|
kili-technology/active-learning
|
72dce7d91b988264dd7fa1a972d9af45e9648c4c
|
[
"Apache-2.0"
] | 3
|
2020-09-11T07:30:54.000Z
|
2021-04-17T07:45:05.000Z
|
experiments/mnist_simple/class_imbalance.py
|
kili-technology/active-learning
|
72dce7d91b988264dd7fa1a972d9af45e9648c4c
|
[
"Apache-2.0"
] | null | null | null |
experiments/mnist_simple/class_imbalance.py
|
kili-technology/active-learning
|
72dce7d91b988264dd7fa1a972d9af45e9648c4c
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import al
from al.dataset import mnist
from al.model.model_zoo.simple_cnn import ConvModel
from al.model.mnist import MnistLearner
from al.dataset.mnist import MnistDataset
from al.train.active_train import ActiveTrain
from al.helpers.experiment import set_up_experiment, load_config
from al.experiments import set_up_learner
DATASET = 'mnist'
FOLDER_PATH = os.path.dirname(__file__)
OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(
__file__, FOLDER_PATH, logging_lvl=20)
logger.info('-------------------------')
logger.info('--LAUNCHING EXPERIMENTS--')
logger.info('-------------------------')
config = load_config(FOLDER_PATH, DATASET)
setupper = set_up_learner(DATASET)
config['active_learning']['output_dir'] = OUTPUT_DIR
config['experiment']['logger_name'] = logger_name
model_name = 'simple_cnn'
strategies = ['random_sampling', 'margin_sampling']
repeats = 1
score_data = {}
config['active_learning']['assets_per_query'] = 20
config['active_learning']['n_iter'] = 5
config['active_learning']['init_size'] = 100
config['train_parameters']['batch_size'] = 16
config['train_parameters']['iterations'] = 100
config['experiment']['n_classes'] = 2
raw_dataset, _ = setupper(config, OUTPUT_DIR, logger,
index_train=np.arange(60000))
full_train_dataset = raw_dataset.dataset
first_class = 1
second_class = 2
first_classes = []
second_classes = []
p = 0.1
for i in range(len(full_train_dataset)):
if full_train_dataset[i][1].numpy() == first_class:
first_classes.append(i)
elif full_train_dataset[i][1].numpy() == second_class and np.random.rand() < p:
second_classes.append(i)
train_indices = np.array(first_classes + second_classes)
np.random.permutation(train_indices)
for i in range(repeats):
logger.info('---------------------------')
logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
logger.info('---------------------------')
for strategy in strategies:
dataset, learner = setupper(
config, OUTPUT_DIR, logger, index_train=train_indices)
logger.info('---------------------------')
logger.info(f'----STRATEGY : {strategy}----')
logger.info('---------------------------')
trainer = ActiveTrain(learner, dataset, strategy, logger_name)
scores = trainer.train(
config['train_parameters'], **config['active_learning'])
score_data[(strategy, i)] = scores
logger.info(f'----DONE----\n')
logger.info('---------------------------')
logger.info(f'--------DONE--------')
logger.info('---------------------------\n\n\n')
# data = []
# for (strategy, experiment_number), scores_experiment in score_data.items():
# for step_result in scores_experiment:
# val_step_result = step_result['val']
# step = step_result['step']
# data.append(
# {'strategy': strategy,
# 'experiment': experiment_number,
# 'step': step,
# **val_step_result})
# df = pd.DataFrame(data)
# plot_dir = os.path.join(os.path.dirname(__file__), 'figures')
# plt.figure(num=0, figsize=(12, 5))
# sns.lineplot(x='step', y='accuracy', hue='strategy', data=df)
# plt.ylabel('Accuracy')
# plt.show()
# plt.savefig(os.path.join(plot_dir, 'accuracy_imbalance.png'))
| 32.242991
| 83
| 0.642319
|
import os
import logging
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import al
from al.dataset import mnist
from al.model.model_zoo.simple_cnn import ConvModel
from al.model.mnist import MnistLearner
from al.dataset.mnist import MnistDataset
from al.train.active_train import ActiveTrain
from al.helpers.experiment import set_up_experiment, load_config
from al.experiments import set_up_learner
DATASET = 'mnist'
FOLDER_PATH = os.path.dirname(__file__)
OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(
__file__, FOLDER_PATH, logging_lvl=20)
logger.info('-------------------------')
logger.info('--LAUNCHING EXPERIMENTS--')
logger.info('-------------------------')
config = load_config(FOLDER_PATH, DATASET)
setupper = set_up_learner(DATASET)
config['active_learning']['output_dir'] = OUTPUT_DIR
config['experiment']['logger_name'] = logger_name
model_name = 'simple_cnn'
strategies = ['random_sampling', 'margin_sampling']
repeats = 1
score_data = {}
config['active_learning']['assets_per_query'] = 20
config['active_learning']['n_iter'] = 5
config['active_learning']['init_size'] = 100
config['train_parameters']['batch_size'] = 16
config['train_parameters']['iterations'] = 100
config['experiment']['n_classes'] = 2
raw_dataset, _ = setupper(config, OUTPUT_DIR, logger,
index_train=np.arange(60000))
full_train_dataset = raw_dataset.dataset
first_class = 1
second_class = 2
first_classes = []
second_classes = []
p = 0.1
for i in range(len(full_train_dataset)):
if full_train_dataset[i][1].numpy() == first_class:
first_classes.append(i)
elif full_train_dataset[i][1].numpy() == second_class and np.random.rand() < p:
second_classes.append(i)
train_indices = np.array(first_classes + second_classes)
np.random.permutation(train_indices)
for i in range(repeats):
logger.info('---------------------------')
logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
logger.info('---------------------------')
for strategy in strategies:
dataset, learner = setupper(
config, OUTPUT_DIR, logger, index_train=train_indices)
logger.info('---------------------------')
logger.info(f'----STRATEGY : {strategy}----')
logger.info('---------------------------')
trainer = ActiveTrain(learner, dataset, strategy, logger_name)
scores = trainer.train(
config['train_parameters'], **config['active_learning'])
score_data[(strategy, i)] = scores
logger.info(f'----DONE----\n')
logger.info('---------------------------')
logger.info(f'--------DONE--------')
logger.info('---------------------------\n\n\n')
| true
| true
|
f70361664e2ce8762ac750784a5e6dca9c3eba02
| 148,257
|
py
|
Python
|
distributed/worker.py
|
hercules261188/distributed
|
e5eb40c597dba0358221500fda49d653b7cf1f3a
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/worker.py
|
hercules261188/distributed
|
e5eb40c597dba0358221500fda49d653b7cf1f3a
|
[
"BSD-3-Clause"
] | 1
|
2019-04-25T14:13:48.000Z
|
2019-04-25T16:54:54.000Z
|
distributed/worker.py
|
hercules261188/distributed
|
e5eb40c597dba0358221500fda49d653b7cf1f3a
|
[
"BSD-3-Clause"
] | 2
|
2019-04-24T17:06:29.000Z
|
2020-09-02T10:48:07.000Z
|
from __future__ import annotations
import asyncio
import bisect
import builtins
import concurrent.futures
import errno
import heapq
import logging
import os
import random
import sys
import threading
import warnings
import weakref
from collections import defaultdict, deque, namedtuple
from collections.abc import Hashable, Iterable, MutableMapping
from contextlib import suppress
from datetime import timedelta
from inspect import isawaitable
from pickle import PicklingError
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .client import Client
from tlz import first, keymap, merge, pluck # noqa: F401
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.core import istask
from dask.system import CPU_COUNT
from dask.utils import (
apply,
format_bytes,
funcname,
parse_bytes,
parse_timedelta,
stringify,
typename,
)
from . import comm, preloading, profile, system, utils
from .batched import BatchedSend
from .comm import connect, get_address_host
from .comm.addressing import address_from_user_args, parse_address
from .comm.utils import OFFLOAD_THRESHOLD
from .core import (
CommClosedError,
Status,
coerce_to_address,
error_message,
pingpong,
send_recv,
)
from .diagnostics import nvml
from .diagnostics.plugin import _get_plugin_name
from .diskutils import WorkSpace
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from .proctitle import setproctitle
from .protocol import pickle, to_serialize
from .pubsub import PubSubWorkerExtension
from .security import Security
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .threadpoolexecutor import secede as tpe_secede
from .utils import (
LRU,
TimeoutError,
_maybe_complex,
get_ip,
has_arg,
import_file,
iscoroutinefunction,
json_load_robust,
key_split,
log_errors,
offload,
parse_ports,
silence_logging,
thread_state,
warn_on_duration,
)
from .utils_comm import gather_from_workers, pack_data, retry_operation
from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis
from .versions import get_versions
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
IN_PLAY = ("waiting", "ready", "executing", "long-running")
PENDING = ("waiting", "ready", "constrained")
PROCESSING = ("waiting", "ready", "constrained", "executing", "long-running")
READY = ("ready", "constrained")
DEFAULT_EXTENSIONS = [PubSubWorkerExtension]
DEFAULT_METRICS = {}
DEFAULT_STARTUP_INFORMATION = {}
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"])
class TaskState:
"""Holds volatile state relating to an individual Dask task
* **dependencies**: ``set(TaskState instances)``
The data needed by this key to run
* **dependents**: ``set(TaskState instances)``
The keys that use this dependency.
* **duration**: ``float``
Expected duration the a task
* **priority**: ``tuple``
The priority this task given by the scheduler. Determines run order.
* **state**: ``str``
The current state of the task. One of ["waiting", "ready", "executing",
"fetch", "memory", "flight", "long-running", "rescheduled", "error"]
* **who_has**: ``set(worker)``
Workers that we believe have this data
* **coming_from**: ``str``
The worker that current task data is coming from if task is in flight
* **waiting_for_data**: ``set(keys of dependencies)``
A dynamic version of dependencies. All dependencies that we still don't
have for a particular key.
* **resource_restrictions**: ``{str: number}``
Abstract resources required to run a task
* **exception**: ``str``
The exception caused by running a task if it erred
* **traceback**: ``str``
The exception caused by running a task if it erred
* **type**: ``type``
The type of a particular piece of data
* **suspicious_count**: ``int``
The number of times a dependency has not been where we expected it
* **startstops**: ``[{startstop}]``
Log of transfer, load, and compute times for a task
* **start_time**: ``float``
Time at which task begins running
* **stop_time**: ``float``
Time at which task finishes running
* **metadata**: ``dict``
Metadata related to task. Stored metadata should be msgpack
serializable (e.g. int, string, list, dict).
* **nbytes**: ``int``
The size of a particular piece of data
* **annotations**: ``dict``
Task annotations
Parameters
----------
key: str
runspec: SerializedTask
A named tuple containing the ``function``, ``args``, ``kwargs`` and
``task`` associated with this `TaskState` instance. This defaults to
``None`` and can remain empty if it is a dependency that this worker
will receive from another worker.
"""
def __init__(self, key, runspec=None):
assert key is not None
self.key = key
self.runspec = runspec
self.dependencies = set()
self.dependents = set()
self.duration = None
self.priority = None
self.state = "new"
self.who_has = set()
self.coming_from = None
self.waiting_for_data = set()
self.resource_restrictions = None
self.exception = None
self.exception_text = ""
self.traceback = None
self.traceback_text = ""
self.type = None
self.suspicious_count = 0
self.startstops = list()
self.start_time = None
self.stop_time = None
self.metadata = {}
self.nbytes = None
self.annotations = None
self.scheduler_holds_ref = False
def __repr__(self):
return f"<Task {self.key!r} {self.state}>"
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
class Worker(ServerNode):
"""Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask-worker`` command line application::
$ dask-worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask-worker --help
The rest of this docstring is about the internal state the the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executors:** ``Dict[str, concurrent.futures.Executor]``:
Executors used to perform computation. Always contains the default
executor.
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``rpc``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **total_out_connections**: ``int``
The maximum number of concurrent outgoing requests for data
* **total_in_connections**: ``int``
The maximum number of concurrent incoming requests for data
* **comm_threshold_bytes**: ``int``
As long as the total number of bytes in flight is below this threshold
we will not limit the number of outgoing connections for a single tasks
dependency fetch.
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
These attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **tasks**: ``{key: TaskState}``
The tasks currently executing on this worker (and any dependencies of those tasks)
* **data:** ``{key: object}``:
Prefer using the **host** attribute instead of this, unless
memory_limit and at least one of memory_target_fraction or
memory_spill_fraction values are defined, in that case, this attribute
is a zict.Buffer, from which information on LRU cache can be queried.
* **data.memory:** ``{key: object}``:
Dictionary mapping keys to actual values stored in memory. Only
available if condition for **data** being a zict.Buffer is met.
* **data.disk:** ``{key: object}``:
Dictionary mapping keys to actual values stored on disk. Only
available if condition for **data** being a zict.Buffer is met.
* **data_needed**: deque(keys)
The keys which still require data in order to execute, arranged in a deque
* **ready**: [keys]
Keys that are ready to run. Stored in a LIFO stack
* **constrained**: [keys]
Keys for which we have the data to run, but are waiting on abstract
resources like GPUs. Stored in a FIFO deque
* **executing_count**: ``int``
A count of tasks currently executing on this worker
* **executed_count**: int
A number of tasks that this worker has run in its lifetime
* **long_running**: {keys}
A set of keys of tasks that are running and have started their own
long-running clients.
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: [dep]}``
The data on each worker that we still want, prioritized as a deque
* **in_flight_tasks**: ``int``
A count of the number of tasks that are coming to us in current
peer-to-peer connections
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **waiting_for_data_count**: ``int``
A count of how many tasks are currently waiting for data
Parameters
----------
scheduler_ip: str
scheduler_port: int
ip: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default
nthreads: int, optional
loop: tornado.ioloop.IOLoop
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float
Fraction of memory to try to stay beneath
memory_spill_fraction: float
Fraction of memory at which we start spilling to disk
memory_pause_fraction: float
Fraction of memory at which we stop running new tasks
executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], str
The executor(s) to use. Depending on the type, it has the following meanings:
- Executor instance: The default executor.
- Dict[str, Executor]: mapping names to Executor instances. If the
"default" key isn't in the dict, a "default" executor will be created
using ``ThreadPoolExecutor(nthreads)``.
- Str: The string "offload", which refer to the same thread pool used for
offloading communications. This results in the same thread being used
for deserialization and computation.
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances = weakref.WeakSet()
_initialized_clients = weakref.WeakSet()
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
ncores=None,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
service_ports=None,
service_kwargs=None,
name=None,
reconnect=True,
memory_limit="auto",
executor=None,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
memory_monitor_interval="200ms",
extensions=None,
metrics=DEFAULT_METRICS,
startup_information=DEFAULT_STARTUP_INFORMATION,
data=None,
interface=None,
host=None,
port=None,
protocol=None,
dashboard_address=None,
dashboard=False,
http_prefix="/",
nanny=None,
plugins=(),
low_level_profiler=dask.config.get("distributed.worker.profile.low-level"),
validate=None,
profile_cycle_interval=None,
lifetime=None,
lifetime_stagger=None,
lifetime_restart=None,
**kwargs,
):
self.tasks = dict()
self.waiting_for_data_count = 0
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = deque() # TODO: replace with heap?
self.in_flight_tasks = 0
self.in_flight_workers = dict()
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.comm_threshold_bytes = 10e6
self.comm_nbytes = 0
self._missing_dep_flight = set()
self.threads = dict()
self.active_threads_lock = threading.Lock()
self.active_threads = dict()
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.generation = 0
self.ready = list()
self.constrained = deque()
self.executing_count = 0
self.executed_count = 0
self.long_running = set()
self.recent_messages_log = deque(
maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
)
self.target_message_size = 50e6 # 50 MB
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions = {
# Basic state transitions
("new", "waiting"): self.transition_new_waiting,
("new", "fetch"): self.transition_new_fetch,
("waiting", "ready"): self.transition_waiting_ready,
("fetch", "flight"): self.transition_fetch_flight,
("ready", "executing"): self.transition_ready_executing,
("executing", "memory"): self.transition_executing_done,
("flight", "memory"): self.transition_flight_memory,
("flight", "fetch"): self.transition_flight_fetch,
# Shouldn't be a valid transition but happens nonetheless
("ready", "memory"): self.transition_ready_memory,
# Scheduler intercession (re-assignment)
("fetch", "waiting"): self.transition_fetch_waiting,
("flight", "waiting"): self.transition_flight_waiting,
# Errors, long-running, constrained
("waiting", "error"): self.transition_waiting_done,
("constrained", "executing"): self.transition_constrained_executing,
("executing", "error"): self.transition_executing_done,
("executing", "rescheduled"): self.transition_executing_done,
("executing", "long-running"): self.transition_executing_long_running,
("long-running", "error"): self.transition_executing_done,
("long-running", "memory"): self.transition_executing_done,
("long-running", "rescheduled"): self.transition_executing_done,
}
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
self._setup_logging(logger)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if not local_directory:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
os.makedirs(local_directory, exist_ok=True)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
if preload is None:
preload = dask.config.get("distributed.worker.preload")
if preload_argv is None:
preload_argv = dask.config.get("distributed.worker.preload-argv")
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
self._protocol = protocol
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self.nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources", None)
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.extensions = dict()
if silence_logs:
silence_logging(level=silence_logs)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
self.paused = False
if "memory_target_fraction" in kwargs:
self.memory_target_fraction = kwargs.pop("memory_target_fraction")
else:
self.memory_target_fraction = dask.config.get(
"distributed.worker.memory.target"
)
if "memory_spill_fraction" in kwargs:
self.memory_spill_fraction = kwargs.pop("memory_spill_fraction")
else:
self.memory_spill_fraction = dask.config.get(
"distributed.worker.memory.spill"
)
if "memory_pause_fraction" in kwargs:
self.memory_pause_fraction = kwargs.pop("memory_pause_fraction")
else:
self.memory_pause_fraction = dask.config.get(
"distributed.worker.memory.pause"
)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
from .spill import SpillBuffer
self.data = SpillBuffer(
os.path.join(self.local_directory, "storage"),
target=int(
self.memory_limit
* (self.memory_target_fraction or self.memory_spill_fraction)
)
or sys.maxsize,
)
else:
self.data = dict()
self.actors = {}
self.loop = loop or IOLoop.current()
self.reconnect = reconnect
# Common executors always available
self.executors: dict[str, concurrent.futures.Executor] = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"),
}
if nvml.device_get_count() > 0:
self.executors["gpu"] = ThreadPoolExecutor(
1, thread_name_prefix="Dask-GPU-Threads"
)
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Default-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = dict()
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self.handle_free_keys,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
}
stream_handlers = {
"close": self.close,
"compute-task": self.add_task,
"cancel-compute": self.cancel_compute,
"free-keys": self.handle_free_keys,
"superfluous-data": self.handle_superfluous_data,
"steal-request": self.steal_request,
}
super().__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs,
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
pc = PeriodicCallback(self.heartbeat, 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}), 60000
)
self.periodic_callbacks["keep-alive"] = pc
self._suspicious_count_limit = 10
self._address = contact_address
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
if self.memory_limit:
self._memory_monitoring = False
pc = PeriodicCallback(
self.memory_monitor, self.memory_monitor_interval * 1000
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
self.lifetime = lifetime or dask.config.get(
"distributed.worker.lifetime.duration"
)
lifetime_stagger = lifetime_stagger or dask.config.get(
"distributed.worker.lifetime.stagger"
)
self.lifetime_restart = lifetime_restart or dask.config.get(
"distributed.worker.lifetime.restart"
)
if isinstance(self.lifetime, str):
self.lifetime = parse_timedelta(self.lifetime)
if isinstance(lifetime_stagger, str):
lifetime_stagger = parse_timedelta(lifetime_stagger)
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
return "<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % (
self.__class__.__name__,
self.address,
self.name,
self.status,
len(self.data),
self.executing_count,
self.nthreads,
len(self.ready),
self.in_flight_tasks,
self.waiting_for_data_count,
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic, msg):
self.batched_stream.send(
{
"op": "log-event",
"topic": topic,
"msg": msg,
}
)
@property
def worker_address(self):
"""For API compatibility with Nanny"""
return self.address
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
@property
def executor(self):
return self.executors["default"]
async def get_metrics(self):
out = dict(
executing=self.executing_count,
in_memory=len(self.data),
ready=len(self.ready),
in_flight=self.in_flight_tasks,
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
spilled_nbytes=getattr(self.data, "spilled_total", 0),
)
out.update(self.monitor.recent())
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self, comm=None):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"ncores": self.nthreads, # backwards compatibility
"memory_limit": self.memory_limit,
}
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes={
ts.key: ts.get_nbytes()
for ts in self.tasks.values()
# Only if the task is in memory this is a sensible
# result since otherwise it simply submits the
# default value
if ts.state == "memory"
},
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = Status.running
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError:
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError(f"Unexpected response from register: {response!r}")
else:
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
)
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if self.heartbeat_active:
logger.debug("Heartbeat skipped: channel busy")
return
self.heartbeat_active = True
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - self.tasks[key].start_time
for key in self.active_keys
if key in self.tasks
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
for i in range(10):
if self.status != Status.running:
break
else:
await asyncio.sleep(0.05)
else:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed", exc_info=True)
if not self.reconnect:
await self.close(report=False)
except OSError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status == Status.running:
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
cache_loads.data.clear()
except Exception as e:
logger.exception(e)
raise e
return {"status": "OK", "nbytes": len(data)}
def keys(self, comm=None):
return list(self.data)
async def gather(self, comm=None, who_has=None):
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
self.update_data(data=result, report=False)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "partial-fail", "keys": missing_keys}
else:
return {"status": "OK"}
def get_monitor_info(self, comm=None, recent=False, start=0):
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status in (
Status.closed,
Status.closing,
Status.closing_gracefully,
):
return
assert self.status is Status.undefined, self.status
await super().start()
enable_gc_diagnosis()
thread_state.on_event_loop_thread = True
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host}"
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
for preload in self.preloads:
await preload.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
await asyncio.gather(
*(self.plugin_add(plugin=plugin) for plugin in self._pending_plugins)
)
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in (Status.closed, Status.closing):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in (Status.running, Status.closing_gracefully):
logger.info("Closed worker has not yet started: %s", self.status)
self.status = Status.closing
for preload in self.preloads:
await preload.teardown()
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status == Status.running
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
# necessary since the heursitics of asynchronous are not
# reliable and we might deadlock here
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
# There is still the chance that even with us
# telling the client to be async, itself will decide
# otherwise
c.close()
with suppress(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
self.stop_services()
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue # Never shutdown the offload executor
if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=executor_wait, timeout=timeout)
else:
executor.shutdown(wait=executor_wait)
self.stop()
await self.rpc.close()
self.status = Status.closed
await super().close()
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self, restart=None):
"""Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
if restart is None:
restart = self.lifetime_restart
logger.info("Closing worker gracefully: %s", self.address)
self.status = Status.closing_gracefully
await self.scheduler.retire_workers(workers=[self.address], remove=False)
await self.close(safe=True, nanny=not restart)
async def terminate(self, comm=None, report=True, **kwargs):
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
# Allow same-host connections more liberally
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, worker=self)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
###################
# Local Execution #
###################
def update_data(self, comm=None, data=None, report=True, serializers=None):
for key, value in data.items():
ts = self.tasks.get(key)
if getattr(ts, "state", None) is not None:
self.transition(ts, "memory", value=value)
else:
self.tasks[key] = ts = TaskState(key)
self.put_key_in_memory(ts, value)
ts.priority = None
ts.duration = None
ts.scheduler_holds_ref = True
self.log.append((key, "receive-from-scatter"))
if report:
self.log.append(
("Notifying scheduler about in-memory in update-data", list(data))
)
self.batched_stream.send({"op": "add-keys", "keys": list(data)})
info = {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
return info
def handle_free_keys(self, comm=None, keys=None, reason=None):
"""
Handler to be called by the scheduler.
The given keys are no longer referred to and required by the scheduler.
The worker is now allowed to release the key, if applicable.
This does not guarantee that the memory is released since the worker may
still decide to hold on to the data and task since it is required by an
upstream dependency.
"""
self.log.append(("free-keys", keys, reason))
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
ts.scheduler_holds_ref = False
self.release_key(key, report=False, reason=reason)
def handle_superfluous_data(self, keys=(), reason=None):
"""Stream handler notifying the worker that it might be holding unreferenced, superfluous data.
This should not actually happen during ordinary operations and is only
intended to correct any erroneous state. An example where this is
necessary is if a worker fetches data for a downstream task but that
task is released before the data arrives.
In this case, the scheduler will notify the worker that it may be
holding this unnecessary data, if the worker hasn't released the data itself, already.
This handler does not guarantee the task nor the data to be actually
released but only asks the worker to release the data on a best effort
guarantee. This protects from race conditions where the given keys may
already have been rescheduled for compute in which case the compute
would win and this handler is ignored.
For stronger guarantees, see handler free_keys
"""
self.log.append(("Handle superfluous data", keys, reason))
for key in list(keys):
ts = self.tasks.get(key)
if ts and not ts.scheduler_holds_ref:
self.release_key(key, reason=f"delete data: {reason}", report=False)
logger.debug("Worker %s -- Deleted %d keys", self.name, len(keys))
return "OK"
async def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
###################
# Task Management #
###################
def cancel_compute(self, key, reason):
"""
Cancel a task on a best effort basis. This is only possible while a task
is in state `waiting` or `ready`.
Nothing will happen otherwise.
"""
ts = self.tasks.get(key)
if ts and ts.state in ("waiting", "ready"):
self.log.append((key, "cancel-compute", reason))
ts.scheduler_holds_ref = False
# All possible dependents of TS should not be in state Processing on
# scheduler side and therefore should not be assigned to a worker,
# yet.
assert not ts.dependents
self.release_key(key, reason=reason, report=False)
def add_task(
self,
key,
function=None,
args=None,
kwargs=None,
task=no_value,
who_has=None,
nbytes=None,
priority=None,
duration=None,
resource_restrictions=None,
actor=False,
annotations=None,
**kwargs2,
):
try:
runspec = SerializedTask(function, args, kwargs, task)
if key in self.tasks:
ts = self.tasks[key]
ts.scheduler_holds_ref = True
if ts.state == "memory":
assert key in self.data or key in self.actors
logger.debug(
"Asked to compute pre-existing result: %s: %s", key, ts.state
)
self.send_task_state_to_scheduler(ts)
return
if ts.state in IN_PLAY:
return
if ts.state == "error":
ts.exception = None
ts.exception_text = ""
ts.traceback = None
ts.traceback_text = ""
else:
# This is a scheduler re-assignment
# Either `fetch` -> `waiting` or `flight` -> `waiting`
self.log.append((ts.key, "re-adding key, new TaskState"))
self.transition(ts, "waiting", runspec=runspec)
else:
self.log.append((key, "new"))
self.tasks[key] = ts = TaskState(
key=key, runspec=SerializedTask(function, args, kwargs, task)
)
self.transition(ts, "waiting")
# TODO: move transition of `ts` to end of `add_task`
# This will require a chained recommendation transition system like
# the scheduler
if priority is not None:
priority = tuple(priority) + (self.generation,)
self.generation -= 1
if actor:
self.actors[ts.key] = None
ts.scheduler_holds_ref = True
ts.runspec = runspec
ts.priority = priority
ts.duration = duration
if resource_restrictions:
ts.resource_restrictions = resource_restrictions
ts.annotations = annotations
who_has = who_has or {}
for dependency, workers in who_has.items():
assert workers
if dependency not in self.tasks:
# initial state is "new"
# this dependency does not already exist on worker
self.tasks[dependency] = dep_ts = TaskState(key=dependency)
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
# check to ensure task wasn't already executed and partially released
# # TODO: make this less bad
state = "fetch" if dependency not in self.data else "memory"
# transition from new -> fetch handles adding dependency
# to waiting_for_data
discarded_self = False
if self.address in workers and state == "fetch":
discarded_self = True
workers = set(workers)
workers.discard(self.address)
who_has[dependency] = tuple(workers)
self.transition(dep_ts, state, who_has=workers)
self.log.append(
(
dependency,
"new-dep",
dep_ts.state,
f"requested by {ts.key}",
discarded_self,
)
)
else:
# task was already present on worker
dep_ts = self.tasks[dependency]
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
if dep_ts.state not in ("memory",):
ts.waiting_for_data.add(dep_ts.key)
self.update_who_has(who_has=who_has)
if nbytes is not None:
for key, value in nbytes.items():
self.tasks[key].nbytes = value
if ts.waiting_for_data:
self.data_needed.append(ts.key)
else:
self.transition(ts, "ready")
if self.validate:
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
if who_has:
assert all(self.tasks[dep] in ts.dependencies for dep in who_has)
assert all(self.tasks[dep.key] for dep in ts.dependencies)
for dependency in ts.dependencies:
self.validate_task(dependency)
self.validate_task(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, ts, finish, **kwargs):
if ts is None:
return
start = ts.state
if start == finish:
return
func = self._transitions[start, finish]
self.log.append((ts.key, start, finish))
state = func(ts, **kwargs)
if state and finish != state:
self.log.append((ts.key, start, finish, state))
ts.state = state or finish
if self.validate:
self.validate_task(ts)
self._notify_plugins("transition", ts.key, start, state or finish, **kwargs)
def transition_new_waiting(self, ts):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is not None
assert not ts.who_has
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_new_fetch(self, ts, who_has):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is None
assert who_has
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
ts.who_has.update(who_has)
for w in who_has:
self.has_what[w].add(ts.key)
self.pending_data_per_worker[w].append(ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_waiting(self, ts, runspec):
"""This is a rescheduling transition that occurs after a worker failure.
A task was available from another worker but that worker died and the
scheduler reassigned the task for computation here.
"""
try:
if self.validate:
assert ts.state == "fetch"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_waiting(self, ts, runspec):
"""This is a rescheduling transition that occurs after
a worker failure. A task was in flight from another worker to this
worker when that worker died and the scheduler reassigned the task for
computation here.
"""
try:
if self.validate:
assert ts.state == "flight"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_flight(self, ts, worker=None):
try:
if self.validate:
assert ts.state == "fetch"
assert ts.dependents
ts.coming_from = worker
self.in_flight_tasks += 1
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_fetch(self, ts):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
ts.runspec = None
if not ts.who_has:
if ts.key not in self._missing_dep_flight:
self._missing_dep_flight.add(ts.key)
logger.info("Task %s does not know who has", ts)
self.loop.add_callback(self.handle_missing_dep, ts)
for w in ts.who_has:
self.pending_data_per_worker[w].append(ts.key)
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
if dependent.state == "waiting":
self.data_needed.append(dependent.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_memory(self, ts, value=None):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
self.put_key_in_memory(ts, value)
for dependent in ts.dependents:
try:
dependent.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
self.log.append(("Notifying scheduler about in-memory", ts.key))
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_ready(self, ts):
try:
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts.key not in self.ready
self.has_what[self.address].discard(ts.key)
if ts.resource_restrictions is not None:
self.constrained.append(ts.key)
return "constrained"
else:
heapq.heappush(self.ready, (ts.priority, ts.key))
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_done(self, ts, value=None):
try:
if self.validate:
assert ts.state == "waiting"
assert ts.key not in self.ready
self.waiting_for_data_count -= len(ts.waiting_for_data)
ts.waiting_for_data.clear()
if value is not None:
self.put_key_in_memory(ts, value)
self.send_task_state_to_scheduler(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_executing(self, ts):
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
self.executing_count += 1
self.loop.add_callback(self.execute, ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_error(self, ts):
if self.validate:
assert ts.exception is not None
assert ts.traceback is not None
assert ts.exception_text
assert ts.traceback_text
self.send_task_state_to_scheduler(ts)
def transition_ready_memory(self, ts, value=no_value):
if value is not no_value:
self.put_key_in_memory(ts, value=value)
self.send_task_state_to_scheduler(ts)
def transition_constrained_executing(self, ts):
self.transition_ready_executing(ts)
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] -= quantity
if self.validate:
assert all(v >= 0 for v in self.available_resources.values())
def transition_executing_done(self, ts, value=no_value, report=True):
try:
if self.validate:
assert ts.state == "executing" or ts.key in self.long_running
assert not ts.waiting_for_data
assert ts.key not in self.ready
out = None
if ts.resource_restrictions is not None:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
if ts.state == "executing":
self.executing_count -= 1
self.executed_count += 1
elif ts.state == "long-running":
self.long_running.remove(ts.key)
if value is not no_value:
try:
self.put_key_in_memory(ts, value, transition=False)
except Exception as e:
logger.info("Failed to put key in memory", exc_info=True)
msg = error_message(e)
ts.exception = msg["exception"]
ts.exception_text = msg["exception_text"]
ts.traceback = msg["traceback"]
ts.traceback_text = msg["traceback_text"]
ts.state = "error"
out = "error"
for d in ts.dependents:
d.waiting_for_data.add(ts.key)
if report and self.batched_stream and self.status == Status.running:
self.send_task_state_to_scheduler(ts)
else:
raise CommClosedError
return out
except OSError:
logger.info("Comm closed")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_executing_long_running(self, ts, compute_duration=None):
try:
if self.validate:
assert ts.state == "executing"
self.executing_count -= 1
self.long_running.add(ts.key)
self.batched_stream.send(
{
"op": "long-running",
"key": ts.key,
"compute_duration": compute_duration,
}
)
self.ensure_computing()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def maybe_transition_long_running(self, ts, compute_duration=None):
if ts.state == "executing":
self.transition(ts, "long-running", compute_duration=compute_duration)
def stateof(self, key):
ts = self.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys):
keys = [key.key if isinstance(key, TaskState) else key for key in keys]
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self):
changed = True
try:
while (
changed
and self.data_needed
and len(self.in_flight_workers) < self.total_out_connections
):
changed = False
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
key = self.data_needed[0]
if key not in self.tasks:
self.data_needed.popleft()
changed = True
continue
ts = self.tasks[key]
if ts.state != "waiting":
self.log.append((key, "communication pass"))
self.data_needed.popleft()
changed = True
continue
dependencies = ts.dependencies
if self.validate:
assert all(dep.key in self.tasks for dep in dependencies)
dependencies_fetch = set()
dependencies_missing = set()
for dependency_ts in dependencies:
if dependency_ts.state == "fetch":
if not dependency_ts.who_has:
dependencies_missing.add(dependency_ts)
else:
dependencies_fetch.add(dependency_ts)
del dependencies, dependency_ts
if dependencies_missing:
missing_deps2 = {
dep
for dep in dependencies_missing
if dep.key not in self._missing_dep_flight
}
for dep in missing_deps2:
self._missing_dep_flight.add(dep.key)
if missing_deps2:
logger.info(
"Can't find dependencies %s for key %s",
missing_deps2.copy(),
key,
)
self.loop.add_callback(self.handle_missing_dep, *missing_deps2)
dependencies_fetch -= dependencies_missing
self.log.append(
("gather-dependencies", key, {d.key for d in dependencies_fetch})
)
in_flight = False
while dependencies_fetch and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.comm_threshold_bytes
):
to_gather_ts = dependencies_fetch.pop()
workers = [
w
for w in to_gather_ts.who_has
if w not in self.in_flight_workers
]
if not workers:
in_flight = True
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
to_gather, total_nbytes = self.select_keys_for_gather(
worker, to_gather_ts.key
)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
for d in to_gather:
dependencies_fetch.discard(self.tasks.get(d))
self.transition(self.tasks[d], "flight", worker=worker)
assert not worker == self.address
self.loop.add_callback(
self.gather_dep,
worker=worker,
to_gather=to_gather,
total_nbytes=total_nbytes,
cause=ts,
)
changed = True
if not dependencies_fetch and not in_flight:
self.data_needed.popleft()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def send_task_state_to_scheduler(self, ts):
if ts.key in self.data or self.actors.get(ts.key):
typ = ts.type
if ts.nbytes is None or typ is None:
try:
value = self.data[ts.key]
except KeyError:
value = self.actors[ts.key]
ts.nbytes = sizeof(value)
typ = ts.type = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
# Some types fail pickling (example: _thread.lock objects),
# send their name as a best effort.
typ_serialized = pickle.dumps(typ.__name__, protocol=4)
d = {
"op": "task-finished",
"status": "OK",
"key": ts.key,
"nbytes": ts.nbytes,
"thread": self.threads.get(ts.key),
"type": typ_serialized,
"typename": typename(typ),
"metadata": ts.metadata,
}
elif ts.exception is not None:
d = {
"op": "task-erred",
"status": "error",
"key": ts.key,
"thread": self.threads.get(ts.key),
"exception": ts.exception,
"traceback": ts.traceback,
"exception_text": ts.exception_text,
"traceback_text": ts.traceback_text,
}
else:
logger.error("Key not ready to send to worker, %s: %s", ts.key, ts.state)
return
if ts.startstops:
d["startstops"] = ts.startstops
self.batched_stream.send(d)
def put_key_in_memory(self, ts, value, transition=True):
if ts.key in self.data:
ts.state = "memory"
return
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
self.data[ts.key] = value
ts.state = "memory"
stop = time()
if stop - start > 0.020:
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
if ts.nbytes is None:
ts.nbytes = sizeof(value)
ts.type = type(value)
for dep in ts.dependents:
try:
dep.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
if not dep.waiting_for_data:
self.transition(dep, "ready")
self.log.append((ts.key, "put-in-memory"))
def select_keys_for_gather(self, worker, dep):
assert isinstance(dep, str)
deps = {dep}
total_bytes = self.tasks[dep].get_nbytes()
L = self.pending_data_per_worker[worker]
while L:
d = L.popleft()
ts = self.tasks.get(d)
if ts is None or ts.state != "fetch":
continue
if total_bytes + ts.get_nbytes() > self.target_message_size:
break
deps.add(d)
total_bytes += ts.get_nbytes()
return deps, total_bytes
@property
def total_comm_bytes(self):
warnings.warn(
"The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. "
"Future versions will only support the new name.",
DeprecationWarning,
)
return self.comm_threshold_bytes
async def gather_dep(
self,
worker: str,
to_gather: Iterable[str],
total_nbytes: int,
cause: TaskState,
):
"""Gather dependencies for a task from a worker who has them
Parameters
----------
worker : str
Address of worker to gather dependencies from
to_gather : list
Keys of dependencies to gather from worker -- this is not
necessarily equivalent to the full list of dependencies of ``dep``
as some dependencies may already be present on this worker.
total_nbytes : int
Total number of bytes for all the dependencies in to_gather combined
cause : TaskState
Task we want to gather dependencies for
"""
if self.validate:
self.validate_state()
if self.status != Status.running:
return
with log_errors():
response = {}
to_gather_keys = set()
try:
if self.validate:
self.validate_state()
for dependency_key in to_gather:
dependency_ts = self.tasks.get(dependency_key)
if dependency_ts and dependency_ts.state == "flight":
to_gather_keys.add(dependency_key)
# Keep namespace clean since this func is long and has many
# dep*, *ts* variables
del to_gather, dependency_key, dependency_ts
self.log.append(("request-dep", cause.key, worker, to_gather_keys))
logger.debug(
"Request %d keys for task %s from %s",
len(to_gather_keys),
cause,
worker,
)
start = time()
response = await get_data_from_worker(
self.rpc, to_gather_keys, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
self.log.append(("busy-gather", worker, to_gather_keys))
for key in to_gather_keys:
ts = self.tasks.get(key)
if ts and ts.state == "flight":
self.transition(ts, "fetch")
return
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
total_bytes = sum(
self.tasks[key].get_nbytes()
for key in response["data"]
if key in self.tasks
)
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {
key: self.tasks[key].nbytes
for key in response["data"]
if key in self.tasks
},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1000000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, response["data"].values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(response["data"]))
self.incoming_count += 1
self.log.append(("receive-dep", worker, list(response["data"])))
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
has_what = self.has_what.pop(worker)
self.pending_data_per_worker.pop(worker)
self.log.append(("receive-dep-failed", worker, has_what))
for d in has_what:
ts = self.tasks[d]
ts.who_has.remove(worker)
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
# FIXME: We should not handle keys which were skipped by this coro. to_gather_keys is only a subset
assert set(to_gather_keys).issubset(
set(self.in_flight_workers.get(worker))
)
for d in self.in_flight_workers.pop(worker):
ts = self.tasks.get(d)
try:
if not busy and d in data:
self.transition(ts, "memory", value=data[d])
elif ts is None or ts.state == "executing":
self.log.append(("already-executing", d))
self.release_key(d, reason="already executing at gather")
elif ts.state == "flight" and not ts.dependents:
self.log.append(("flight no-dependents", d))
self.release_key(
d, reason="In-flight task no longer has dependents."
)
elif (
not busy
and d not in data
and ts.dependents
and ts.state != "memory"
):
ts.who_has.discard(worker)
self.has_what[worker].discard(ts.key)
self.log.append(("missing-dep", d))
self.batched_stream.send(
{
"op": "missing-data",
"errant_worker": worker,
"key": d,
}
)
self.transition(ts, "fetch")
elif ts.state not in ("ready", "memory"):
self.transition(ts, "fetch")
else:
logger.debug(
"Unexpected task state encountered for %r after gather_dep",
ts,
)
except Exception as exc:
emsg = error_message(exc)
assert ts is not None, ts
self.log.append(
(ts.key, "except-gather-dep-result", emsg, time())
)
# FIXME: We currently cannot release this task and its
# dependent safely
logger.debug(
"Exception occured while handling `gather_dep` response for %r",
ts,
exc_info=True,
)
if self.validate:
self.validate_state()
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
self.ensure_communicating()
else:
# Exponential backoff to avoid hammering scheduler/worker
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy)
await self.query_who_has(*to_gather_keys)
self.ensure_communicating()
def bad_dep(self, dep):
exc = ValueError(
"Could not find dependent %s. Check worker logs" % str(dep.key)
)
for ts in list(dep.dependents):
msg = error_message(exc)
ts.exception = msg["exception"]
ts.traceback = msg["traceback"]
ts.exception_text = msg["exception_text"]
ts.traceback_text = msg["traceback_text"]
self.transition(ts, "error")
self.release_key(dep.key, reason="bad dep")
async def handle_missing_dep(self, *deps, **kwargs):
self.log.append(("handle-missing", deps))
try:
deps = {dep for dep in deps if dep.dependents}
if not deps:
return
for dep in list(deps):
if (
self._suspicious_count_limit
and dep.suspicious_count > self._suspicious_count_limit
):
deps.remove(dep)
self.bad_dep(dep)
if not deps:
return
for dep in deps:
logger.info(
"Dependent not found: %s %s . Asking scheduler",
dep.key,
dep.suspicious_count,
)
who_has = await retry_operation(
self.scheduler.who_has, keys=list(dep.key for dep in deps)
)
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has)
still_missing = set()
for dep in deps:
dep.suspicious_count += 1
if not who_has.get(dep.key):
logger.info(
"No workers found for %s",
dep.key,
)
self.log.append((dep.key, "no workers found", dep.dependents))
self.release_key(dep.key, reason="Handle missing no workers")
elif self.address in who_has and dep.state != "memory":
still_missing.add(dep)
self.batched_stream.send(
{
"op": "release-worker-data",
"keys": [dep.key],
"worker": self.address,
}
)
else:
logger.debug("New workers found for %s", dep.key)
self.log.append((dep.key, "new workers found"))
for dependent in dep.dependents:
if dep.key in dependent.waiting_for_data:
self.data_needed.append(dependent.key)
if still_missing:
logger.debug(
"Found self referencing who has response from scheduler for keys %s.\n"
"Trying again handle_missing",
deps,
)
await self.handle_missing_dep(*deps)
except Exception:
logger.error("Handle missing dep failed, retrying", exc_info=True)
retries = kwargs.get("retries", 5)
self.log.append(("handle-missing-failed", retries, deps))
if retries > 0:
await self.handle_missing_dep(*deps, retries=retries - 1)
else:
raise
finally:
try:
for dep in deps:
self._missing_dep_flight.remove(dep.key)
except KeyError:
pass
self.ensure_communicating()
async def query_who_has(self, *deps):
with log_errors():
response = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(response)
return response
def update_who_has(self, who_has):
try:
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.tasks:
if self.address in workers and self.tasks[dep].state != "memory":
logger.debug(
"Scheduler claims worker %s holds data for task %s which is not true.",
self.name,
dep,
)
# Do not mutate the input dict. That's rude
workers = set(workers) - {self.address}
self.tasks[dep].who_has.update(workers)
for worker in workers:
self.has_what[worker].add(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def steal_request(self, key):
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(key)
if key in self.tasks:
state = ts.state
else:
state = None
response = {"op": "steal-response", "key": key, "state": state}
self.batched_stream.send(response)
if state in ("ready", "waiting", "constrained"):
# If task is marked as "constrained" we haven't yet assigned it an
# `available_resources` to run on, that happens in
# `transition_constrained_executing`
ts.scheduler_holds_ref = False
self.release_key(ts.key, reason="stolen")
if self.validate:
assert ts.key not in self.tasks
def release_key(
self,
key: Hashable,
cause: TaskState | None = None,
reason: str | None = None,
report: bool = True,
):
try:
if self.validate:
assert not isinstance(key, TaskState)
ts = self.tasks.get(key, None)
# If the scheduler holds a reference which is usually the
# case when it instructed the task to be computed here or if
# data was scattered we must not release it unless the
# scheduler allow us to. See also handle_delete_data and
if ts is None or ts.scheduler_holds_ref:
return
logger.debug(
"Release key %s", {"key": key, "cause": cause, "reason": reason}
)
if cause:
self.log.append((key, "release-key", {"cause": cause}, reason))
else:
self.log.append((key, "release-key", reason))
if key in self.data:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
if key in self.actors:
del self.actors[key]
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
ts.who_has.clear()
if key in self.threads:
del self.threads[key]
if ts.state == "executing":
self.executing_count -= 1
if ts.resource_restrictions is not None:
if ts.state == "executing":
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
for d in ts.dependencies:
d.dependents.discard(ts)
if not d.dependents and d.state in ("flight", "fetch"):
self.release_key(d.key, reason="Dependent released")
if report:
# Inform the scheduler of keys which will have gone missing
# We are releasing them before they have completed
if ts.state in PROCESSING:
# This path is only hit with work stealing
msg = {"op": "release", "key": key, "cause": cause}
else:
# This path is only hit when calling release_key manually
msg = {
"op": "release-worker-data",
"keys": [key],
"worker": self.address,
}
self.batched_stream.send(msg)
self._notify_plugins("release_key", key, ts.state, cause, reason, report)
del self.tasks[key]
except CommClosedError:
pass
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
################
# Execute Task #
################
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(comm=comm, name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def plugin_remove(self, comm=None, name=None):
with log_errors(pdb=False):
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self,
comm=None,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
):
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
apply_function_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, comm=None, actor=None, attribute=None):
try:
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def meets_resource_constraints(self, key: str) -> bool:
ts = self.tasks[key]
if not ts.resource_restrictions:
return True
for resource, needed in ts.resource_restrictions.items():
if self.available_resources[resource] < needed:
return False
return True
async def _maybe_deserialize_task(self, ts):
if not isinstance(ts.runspec, SerializedTask):
return ts.runspec
try:
start = time()
# Offload deserializing large tasks
if sizeof(ts.runspec) > OFFLOAD_THRESHOLD:
function, args, kwargs = await offload(_deserialize, *ts.runspec)
else:
function, args, kwargs = _deserialize(*ts.runspec)
stop = time()
if stop - start > 0.010:
ts.startstops.append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception:
logger.error("Could not deserialize task", exc_info=True)
self.log.append((ts.key, "deserialize-error"))
raise
def ensure_computing(self):
if self.paused:
return
try:
while self.constrained and self.executing_count < self.nthreads:
key = self.constrained[0]
ts = self.tasks.get(key, None)
if ts is None or ts.state != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
self.transition(ts, "executing")
else:
break
while self.ready and self.executing_count < self.nthreads:
priority, key = heapq.heappop(self.ready)
ts = self.tasks.get(key)
if ts is None:
# It is possible for tasks to be released while still remaining on `ready`
# The scheduler might have re-routed to a new worker and told this worker
# to release. If the task has "disappeared" just continue through the heap
continue
elif ts.key in self.data:
self.transition(ts, "memory")
elif ts.state in READY:
self.transition(ts, "executing")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key):
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
return
if key not in self.tasks:
return
ts = self.tasks[key]
if ts.state != "executing":
# This might happen if keys are canceled
logger.debug(
"Trying to execute a task %s which is not in executing state anymore"
% ts
)
return
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.state == "executing"
assert ts.runspec is not None
function, args, kwargs = await self._maybe_deserialize_task(ts)
args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs)
if ts.annotations is not None and "executor" in ts.annotations:
executor = ts.annotations["executor"]
else:
executor = "default"
assert executor in self.executors
assert key == ts.key
self.active_keys.add(ts.key)
try:
e = self.executors[executor]
ts.start_time = time()
if iscoroutinefunction(function):
result = await apply_function_async(
function,
args2,
kwargs2,
self.scheduler_delay,
)
elif "ThreadPoolExecutor" in str(type(e)):
result = await self.loop.run_in_executor(
e,
apply_function,
function,
args2,
kwargs2,
self.execution_state,
ts.key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
result = await self.loop.run_in_executor(
e,
apply_function_simple,
function,
args2,
kwargs2,
self.scheduler_delay,
)
finally:
self.active_keys.discard(ts.key)
# We'll need to check again for the task state since it may have
# changed since the execution was kicked off. In particular, it may
# have been canceled and released already in which case we'll have
# to drop the result immediately
if ts.key not in self.tasks:
logger.debug(
"Dropping result for %s since task has already been released."
% ts.key
)
return
result["key"] = ts.key
value = result.pop("result", None)
ts.startstops.append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[ts.key] = result["thread"]
if result["op"] == "task-finished":
ts.nbytes = result["nbytes"]
ts.type = result["type"]
self.transition(ts, "memory", value=value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
elif isinstance(result.pop("actual-exception"), Reschedule):
self.batched_stream.send({"op": "reschedule", "key": ts.key})
self.transition(ts, "rescheduled", report=False)
self.release_key(ts.key, report=False, reason="Reschedule")
else:
ts.exception = result["exception"]
ts.traceback = result["traceback"]
ts.exception_text = result["exception_text"]
ts.traceback_text = result["traceback_text"]
logger.warning(
"Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %r\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
result["exception"].data,
)
self.transition(ts, "error")
logger.debug("Send compute response to scheduler: %s, %s", ts.key, result)
if self.validate:
assert ts.state != "executing"
assert not ts.waiting_for_data
except Exception as exc:
logger.error(
"Exception during execution of task %s.", ts.key, exc_info=True
)
emsg = error_message(exc)
ts.exception = emsg["exception"]
ts.traceback = emsg["traceback"]
ts.exception_text = emsg["exception_text"]
ts.traceback_text = emsg["traceback_text"]
self.transition(ts, "error")
finally:
self.ensure_computing()
self.ensure_communicating()
def _prepare_args_for_execution(self, ts, args, kwargs):
start = time()
data = {}
for dep in ts.dependencies:
k = dep.key
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor # TODO: create local actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
ts.startstops.append({"action": "disk-read", "start": start, "stop": stop})
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
return args2, kwargs2
##################
# Administrative #
##################
async def memory_monitor(self):
"""Track this process's memory usage and act accordingly
If we rise above 70% memory use, start dumping data to disk.
If we rise above 80% memory use, stop execution of new tasks
"""
if self._memory_monitoring:
return
self._memory_monitoring = True
total = 0
proc = self.monitor.proc
memory = proc.memory_info().rss
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
# Pause worker threads if above 80% memory use
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
# Try to free some memory while in paused state
self._throttled_gc.collect()
if not self.paused:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = True
elif self.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = False
self.ensure_computing()
check_pause(memory)
# Dump data to disk if above 70%
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
logger.debug(
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
start = time()
target = self.memory_limit * self.memory_target_fraction
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Unmanaged memory use is high. This may indicate a memory leak "
"or the memory may not be released to the OS; see "
"https://distributed.dask.org/en/latest/worker.html#memtrim "
"for more information. "
"-- Unmanaged memory: %s -- Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit),
)
break
k, v, weight = self.data.fast.evict()
del k, v
total += weight
count += 1
# If the current buffer is filled with a lot of small values,
# evicting one at a time is very slow and the worker might
# generate new data faster than it is able to evict. Therefore,
# only pass on control if we spent at least 0.5s evicting
if time() - start > 0.5:
await asyncio.sleep(0)
start = time()
memory = proc.memory_info().rss
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
memory = proc.memory_info().rss
check_pause(memory)
if count:
logger.debug(
"Moved %d tasks worth %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
return total
def cycle_profile(self):
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self):
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self, comm=None, start=None, stop=None, key=None, server=False
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(self, comm=None, start=0, stop=None):
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
start = start or 0
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, comm=None, keys=None):
with self.active_threads_lock:
frames = sys._current_frames()
active_threads = self.active_threads.copy()
frames = {k: frames[ident] for ident, k in active_threads.items()}
if keys is not None:
frames = {k: frame for k, frame in frames.items() if k in keys}
result = {k: profile.call_stack(frame) for k, frame in frames.items()}
return result
def _notify_plugins(self, method_name, *args, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception" % name, exc_info=True
)
##############
# Validation #
##############
def validate_task_memory(self, ts):
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key not in self.ready
assert ts.state == "memory"
def validate_task_executing(self, ts):
assert ts.state == "executing"
assert ts.runspec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_ready(self, ts):
assert ts.key in pluck(1, self.ready)
assert ts.key not in self.data
assert ts.state != "executing"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_waiting(self, ts):
assert ts.key not in self.data
assert ts.state == "waiting"
if ts.dependencies and ts.runspec:
assert not all(dep.key in self.data for dep in ts.dependencies)
def validate_task_flight(self, ts):
assert ts.key not in self.data
assert not any(dep.key in self.ready for dep in ts.dependents)
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def validate_task_fetch(self, ts):
assert ts.runspec is None
assert ts.key not in self.data
assert self.address not in ts.who_has # !!!!!!!!
# FIXME This is currently not an invariant since upon comm failure we
# remove the erroneous worker from all who_has and correct the state
# upon the next ensure_communicate
# if not ts.who_has:
# # If we do not know who_has for a fetch task, it must be logged in
# # the missing dep. There should be a handle_missing_dep running for
# # all of these keys
# assert ts.key in self._missing_dep_flight, (
# ts.key,
# self.story(ts),
# self._missing_dep_flight.copy(),
# self.in_flight_workers.copy(),
# )
assert ts.dependents
for w in ts.who_has:
assert ts.key in self.has_what[w]
def validate_task(self, ts):
try:
if ts.state == "memory":
self.validate_task_memory(ts)
elif ts.state == "waiting":
self.validate_task_waiting(ts)
elif ts.state == "ready":
self.validate_task_ready(ts)
elif ts.state == "executing":
self.validate_task_executing(ts)
elif ts.state == "flight":
self.validate_task_flight(ts)
elif ts.state == "fetch":
self.validate_task_fetch(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self):
if self.status != Status.running:
return
try:
for ts in self.tasks.values():
assert ts.state is not None
# check that worker has task
for worker in ts.who_has:
assert ts.key in self.has_what[worker]
# check that deps have a set state and that dependency<->dependent links are there
for dep in ts.dependencies:
# self.tasks was just a dict of tasks
# and this check was originally that the key was in `task_state`
# so we may have popped the key out of `self.tasks` but the
# dependency can still be in `memory` before GC grabs it...?
# Might need better bookkeeping
assert dep.state is not None
assert ts in dep.dependents, ts
for key in ts.waiting_for_data:
ts_wait = self.tasks[key]
assert (
ts_wait.state == "flight"
or ts_wait.state == "fetch"
or ts_wait.key in self._missing_dep_flight
or ts_wait.who_has.issubset(self.in_flight_workers)
)
if ts.state == "memory":
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key in self.data or ts.key in self.actors
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
for ts in self.tasks.values():
self.validate_task(ts)
except Exception as e:
self.loop.add_callback(self.close)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout=None) -> Client:
"""Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from .client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
# must be lazy import otherwise cyclic import
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
# The below conditions should only happen in case a second
# cluster is alive, e.g. if a submitted task spawned its onwn
# LocalCluster, see gh4565
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from .client import Client
asynchronous = self.loop is IOLoop.current()
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self):
"""Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def get_worker() -> Worker:
"""Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(w for w in Worker._instances if w.status == Status.running)
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int or str
Timeout (in seconds) for getting the Client. Defaults to the
``distributed.comm.timeouts.connect`` configuration value.
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client(timeout="10s")
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import Client
try:
client = Client.current() # TODO: assumes the same scheduler
except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
worker.tasks[thread_state.key],
compute_duration=duration,
)
class Reschedule(Exception):
"""Reschedule this task
Raising this exception will stop the current execution of the task and ask
the scheduler to reschedule this task, possibly on a different machine.
This does not guarantee that the task will move onto a different machine.
The scheduler will proceed through its normal heuristics to determine the
optimal machine to accept this task. The machine will likely change if the
load across the cluster has significantly changed since first scheduling
the task.
"""
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None:
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with suppress(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
"""Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_deps
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError:
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
"""Load a function from bytes, cache bytes"""
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
"""Deserialize task inputs and regularize to func, args, kwargs"""
if function is not None:
function = loads_function(function)
if args and isinstance(args, bytes):
args = pickle.loads(args)
if kwargs and isinstance(kwargs, bytes):
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
"""Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7
"""
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
"""Dump a function to bytes, cache functions"""
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func, protocol=4)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func, protocol=4)
return result
def dumps_task(task):
"""Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'
'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}
"""
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
"""Dump an object to bytes, warn if those bytes are large"""
b = dumps(obj, protocol=4)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
msg = apply_function_simple(function, args, kwargs, time_delay)
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_simple(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
async def apply_function_async(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = await function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
thread_state.actor = True
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
"""Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
"""Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
"""Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
if nvml.device_get_count() < 1:
raise RuntimeError
except (Exception, RuntimeError):
pass
else:
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
def print(*args, **kwargs):
"""Dask print function
This prints both wherever this function is run, and also in the user's
client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
msg = {
"args": tuple(stringify(arg) for arg in args),
"kwargs": {k: stringify(v) for k, v in kwargs.items()},
}
worker.log_event("print", msg)
builtins.print(*args, **kwargs)
def warn(*args, **kwargs):
"""Dask warn function
This raises a warning both wherever this function is run, and also
in the user's client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
worker.log_event("warn", {"args": args, "kwargs": kwargs})
warnings.warn(*args, **kwargs)
| 35.476669
| 115
| 0.542983
|
from __future__ import annotations
import asyncio
import bisect
import builtins
import concurrent.futures
import errno
import heapq
import logging
import os
import random
import sys
import threading
import warnings
import weakref
from collections import defaultdict, deque, namedtuple
from collections.abc import Hashable, Iterable, MutableMapping
from contextlib import suppress
from datetime import timedelta
from inspect import isawaitable
from pickle import PicklingError
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .client import Client
from tlz import first, keymap, merge, pluck from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.core import istask
from dask.system import CPU_COUNT
from dask.utils import (
apply,
format_bytes,
funcname,
parse_bytes,
parse_timedelta,
stringify,
typename,
)
from . import comm, preloading, profile, system, utils
from .batched import BatchedSend
from .comm import connect, get_address_host
from .comm.addressing import address_from_user_args, parse_address
from .comm.utils import OFFLOAD_THRESHOLD
from .core import (
CommClosedError,
Status,
coerce_to_address,
error_message,
pingpong,
send_recv,
)
from .diagnostics import nvml
from .diagnostics.plugin import _get_plugin_name
from .diskutils import WorkSpace
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from .proctitle import setproctitle
from .protocol import pickle, to_serialize
from .pubsub import PubSubWorkerExtension
from .security import Security
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .threadpoolexecutor import secede as tpe_secede
from .utils import (
LRU,
TimeoutError,
_maybe_complex,
get_ip,
has_arg,
import_file,
iscoroutinefunction,
json_load_robust,
key_split,
log_errors,
offload,
parse_ports,
silence_logging,
thread_state,
warn_on_duration,
)
from .utils_comm import gather_from_workers, pack_data, retry_operation
from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis
from .versions import get_versions
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
IN_PLAY = ("waiting", "ready", "executing", "long-running")
PENDING = ("waiting", "ready", "constrained")
PROCESSING = ("waiting", "ready", "constrained", "executing", "long-running")
READY = ("ready", "constrained")
DEFAULT_EXTENSIONS = [PubSubWorkerExtension]
DEFAULT_METRICS = {}
DEFAULT_STARTUP_INFORMATION = {}
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"])
class TaskState:
def __init__(self, key, runspec=None):
assert key is not None
self.key = key
self.runspec = runspec
self.dependencies = set()
self.dependents = set()
self.duration = None
self.priority = None
self.state = "new"
self.who_has = set()
self.coming_from = None
self.waiting_for_data = set()
self.resource_restrictions = None
self.exception = None
self.exception_text = ""
self.traceback = None
self.traceback_text = ""
self.type = None
self.suspicious_count = 0
self.startstops = list()
self.start_time = None
self.stop_time = None
self.metadata = {}
self.nbytes = None
self.annotations = None
self.scheduler_holds_ref = False
def __repr__(self):
return f"<Task {self.key!r} {self.state}>"
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
class Worker(ServerNode):
_instances = weakref.WeakSet()
_initialized_clients = weakref.WeakSet()
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
ncores=None,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
service_ports=None,
service_kwargs=None,
name=None,
reconnect=True,
memory_limit="auto",
executor=None,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
memory_monitor_interval="200ms",
extensions=None,
metrics=DEFAULT_METRICS,
startup_information=DEFAULT_STARTUP_INFORMATION,
data=None,
interface=None,
host=None,
port=None,
protocol=None,
dashboard_address=None,
dashboard=False,
http_prefix="/",
nanny=None,
plugins=(),
low_level_profiler=dask.config.get("distributed.worker.profile.low-level"),
validate=None,
profile_cycle_interval=None,
lifetime=None,
lifetime_stagger=None,
lifetime_restart=None,
**kwargs,
):
self.tasks = dict()
self.waiting_for_data_count = 0
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = deque()
self.in_flight_tasks = 0
self.in_flight_workers = dict()
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.comm_threshold_bytes = 10e6
self.comm_nbytes = 0
self._missing_dep_flight = set()
self.threads = dict()
self.active_threads_lock = threading.Lock()
self.active_threads = dict()
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.generation = 0
self.ready = list()
self.constrained = deque()
self.executing_count = 0
self.executed_count = 0
self.long_running = set()
self.recent_messages_log = deque(
maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
)
self.target_message_size = 50e6
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions = {
("new", "waiting"): self.transition_new_waiting,
("new", "fetch"): self.transition_new_fetch,
("waiting", "ready"): self.transition_waiting_ready,
("fetch", "flight"): self.transition_fetch_flight,
("ready", "executing"): self.transition_ready_executing,
("executing", "memory"): self.transition_executing_done,
("flight", "memory"): self.transition_flight_memory,
("flight", "fetch"): self.transition_flight_fetch,
("ready", "memory"): self.transition_ready_memory,
# Scheduler intercession (re-assignment)
("fetch", "waiting"): self.transition_fetch_waiting,
("flight", "waiting"): self.transition_flight_waiting,
# Errors, long-running, constrained
("waiting", "error"): self.transition_waiting_done,
("constrained", "executing"): self.transition_constrained_executing,
("executing", "error"): self.transition_executing_done,
("executing", "rescheduled"): self.transition_executing_done,
("executing", "long-running"): self.transition_executing_long_running,
("long-running", "error"): self.transition_executing_done,
("long-running", "memory"): self.transition_executing_done,
("long-running", "rescheduled"): self.transition_executing_done,
}
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
self._setup_logging(logger)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if not local_directory:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
os.makedirs(local_directory, exist_ok=True)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
if preload is None:
preload = dask.config.get("distributed.worker.preload")
if preload_argv is None:
preload_argv = dask.config.get("distributed.worker.preload-argv")
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
self._protocol = protocol
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self.nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources", None)
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.extensions = dict()
if silence_logs:
silence_logging(level=silence_logs)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
self.paused = False
if "memory_target_fraction" in kwargs:
self.memory_target_fraction = kwargs.pop("memory_target_fraction")
else:
self.memory_target_fraction = dask.config.get(
"distributed.worker.memory.target"
)
if "memory_spill_fraction" in kwargs:
self.memory_spill_fraction = kwargs.pop("memory_spill_fraction")
else:
self.memory_spill_fraction = dask.config.get(
"distributed.worker.memory.spill"
)
if "memory_pause_fraction" in kwargs:
self.memory_pause_fraction = kwargs.pop("memory_pause_fraction")
else:
self.memory_pause_fraction = dask.config.get(
"distributed.worker.memory.pause"
)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
from .spill import SpillBuffer
self.data = SpillBuffer(
os.path.join(self.local_directory, "storage"),
target=int(
self.memory_limit
* (self.memory_target_fraction or self.memory_spill_fraction)
)
or sys.maxsize,
)
else:
self.data = dict()
self.actors = {}
self.loop = loop or IOLoop.current()
self.reconnect = reconnect
# Common executors always available
self.executors: dict[str, concurrent.futures.Executor] = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"),
}
if nvml.device_get_count() > 0:
self.executors["gpu"] = ThreadPoolExecutor(
1, thread_name_prefix="Dask-GPU-Threads"
)
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Default-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = dict()
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self.handle_free_keys,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
}
stream_handlers = {
"close": self.close,
"compute-task": self.add_task,
"cancel-compute": self.cancel_compute,
"free-keys": self.handle_free_keys,
"superfluous-data": self.handle_superfluous_data,
"steal-request": self.steal_request,
}
super().__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs,
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
pc = PeriodicCallback(self.heartbeat, 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}), 60000
)
self.periodic_callbacks["keep-alive"] = pc
self._suspicious_count_limit = 10
self._address = contact_address
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
if self.memory_limit:
self._memory_monitoring = False
pc = PeriodicCallback(
self.memory_monitor, self.memory_monitor_interval * 1000
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
self.lifetime = lifetime or dask.config.get(
"distributed.worker.lifetime.duration"
)
lifetime_stagger = lifetime_stagger or dask.config.get(
"distributed.worker.lifetime.stagger"
)
self.lifetime_restart = lifetime_restart or dask.config.get(
"distributed.worker.lifetime.restart"
)
if isinstance(self.lifetime, str):
self.lifetime = parse_timedelta(self.lifetime)
if isinstance(lifetime_stagger, str):
lifetime_stagger = parse_timedelta(lifetime_stagger)
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
return "<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % (
self.__class__.__name__,
self.address,
self.name,
self.status,
len(self.data),
self.executing_count,
self.nthreads,
len(self.ready),
self.in_flight_tasks,
self.waiting_for_data_count,
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic, msg):
self.batched_stream.send(
{
"op": "log-event",
"topic": topic,
"msg": msg,
}
)
@property
def worker_address(self):
return self.address
@property
def local_dir(self):
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
@property
def executor(self):
return self.executors["default"]
async def get_metrics(self):
out = dict(
executing=self.executing_count,
in_memory=len(self.data),
ready=len(self.ready),
in_flight=self.in_flight_tasks,
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
spilled_nbytes=getattr(self.data, "spilled_total", 0),
)
out.update(self.monitor.recent())
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self, comm=None):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"ncores": self.nthreads, # backwards compatibility
"memory_limit": self.memory_limit,
}
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes={
ts.key: ts.get_nbytes()
for ts in self.tasks.values()
# Only if the task is in memory this is a sensible
# result since otherwise it simply submits the
# default value
if ts.state == "memory"
},
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = Status.running
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError:
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError(f"Unexpected response from register: {response!r}")
else:
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
)
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if self.heartbeat_active:
logger.debug("Heartbeat skipped: channel busy")
return
self.heartbeat_active = True
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - self.tasks[key].start_time
for key in self.active_keys
if key in self.tasks
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
for i in range(10):
if self.status != Status.running:
break
else:
await asyncio.sleep(0.05)
else:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed", exc_info=True)
if not self.reconnect:
await self.close(report=False)
except OSError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status == Status.running:
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
cache_loads.data.clear()
except Exception as e:
logger.exception(e)
raise e
return {"status": "OK", "nbytes": len(data)}
def keys(self, comm=None):
return list(self.data)
async def gather(self, comm=None, who_has=None):
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
self.update_data(data=result, report=False)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "partial-fail", "keys": missing_keys}
else:
return {"status": "OK"}
def get_monitor_info(self, comm=None, recent=False, start=0):
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status in (
Status.closed,
Status.closing,
Status.closing_gracefully,
):
return
assert self.status is Status.undefined, self.status
await super().start()
enable_gc_diagnosis()
thread_state.on_event_loop_thread = True
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host}"
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
for preload in self.preloads:
await preload.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
await asyncio.gather(
*(self.plugin_add(plugin=plugin) for plugin in self._pending_plugins)
)
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in (Status.closed, Status.closing):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in (Status.running, Status.closing_gracefully):
logger.info("Closed worker has not yet started: %s", self.status)
self.status = Status.closing
for preload in self.preloads:
await preload.teardown()
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status == Status.running
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
c.close()
with suppress(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
self.stop_services()
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=executor_wait, timeout=timeout)
else:
executor.shutdown(wait=executor_wait)
self.stop()
await self.rpc.close()
self.status = Status.closed
await super().close()
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self, restart=None):
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
if restart is None:
restart = self.lifetime_restart
logger.info("Closing worker gracefully: %s", self.address)
self.status = Status.closing_gracefully
await self.scheduler.retire_workers(workers=[self.address], remove=False)
await self.close(safe=True, nanny=not restart)
async def terminate(self, comm=None, report=True, **kwargs):
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args )
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, worker=self)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
def update_data(self, comm=None, data=None, report=True, serializers=None):
for key, value in data.items():
ts = self.tasks.get(key)
if getattr(ts, "state", None) is not None:
self.transition(ts, "memory", value=value)
else:
self.tasks[key] = ts = TaskState(key)
self.put_key_in_memory(ts, value)
ts.priority = None
ts.duration = None
ts.scheduler_holds_ref = True
self.log.append((key, "receive-from-scatter"))
if report:
self.log.append(
("Notifying scheduler about in-memory in update-data", list(data))
)
self.batched_stream.send({"op": "add-keys", "keys": list(data)})
info = {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
return info
def handle_free_keys(self, comm=None, keys=None, reason=None):
self.log.append(("free-keys", keys, reason))
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
ts.scheduler_holds_ref = False
self.release_key(key, report=False, reason=reason)
def handle_superfluous_data(self, keys=(), reason=None):
self.log.append(("Handle superfluous data", keys, reason))
for key in list(keys):
ts = self.tasks.get(key)
if ts and not ts.scheduler_holds_ref:
self.release_key(key, reason=f"delete data: {reason}", report=False)
logger.debug("Worker %s -- Deleted %d keys", self.name, len(keys))
return "OK"
async def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
def cancel_compute(self, key, reason):
ts = self.tasks.get(key)
if ts and ts.state in ("waiting", "ready"):
self.log.append((key, "cancel-compute", reason))
ts.scheduler_holds_ref = False
assert not ts.dependents
self.release_key(key, reason=reason, report=False)
def add_task(
self,
key,
function=None,
args=None,
kwargs=None,
task=no_value,
who_has=None,
nbytes=None,
priority=None,
duration=None,
resource_restrictions=None,
actor=False,
annotations=None,
**kwargs2,
):
try:
runspec = SerializedTask(function, args, kwargs, task)
if key in self.tasks:
ts = self.tasks[key]
ts.scheduler_holds_ref = True
if ts.state == "memory":
assert key in self.data or key in self.actors
logger.debug(
"Asked to compute pre-existing result: %s: %s", key, ts.state
)
self.send_task_state_to_scheduler(ts)
return
if ts.state in IN_PLAY:
return
if ts.state == "error":
ts.exception = None
ts.exception_text = ""
ts.traceback = None
ts.traceback_text = ""
else:
self.log.append((ts.key, "re-adding key, new TaskState"))
self.transition(ts, "waiting", runspec=runspec)
else:
self.log.append((key, "new"))
self.tasks[key] = ts = TaskState(
key=key, runspec=SerializedTask(function, args, kwargs, task)
)
self.transition(ts, "waiting")
if priority is not None:
priority = tuple(priority) + (self.generation,)
self.generation -= 1
if actor:
self.actors[ts.key] = None
ts.scheduler_holds_ref = True
ts.runspec = runspec
ts.priority = priority
ts.duration = duration
if resource_restrictions:
ts.resource_restrictions = resource_restrictions
ts.annotations = annotations
who_has = who_has or {}
for dependency, workers in who_has.items():
assert workers
if dependency not in self.tasks:
self.tasks[dependency] = dep_ts = TaskState(key=dependency)
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
# # TODO: make this less bad
state = "fetch" if dependency not in self.data else "memory"
# transition from new -> fetch handles adding dependency
# to waiting_for_data
discarded_self = False
if self.address in workers and state == "fetch":
discarded_self = True
workers = set(workers)
workers.discard(self.address)
who_has[dependency] = tuple(workers)
self.transition(dep_ts, state, who_has=workers)
self.log.append(
(
dependency,
"new-dep",
dep_ts.state,
f"requested by {ts.key}",
discarded_self,
)
)
else:
# task was already present on worker
dep_ts = self.tasks[dependency]
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
if dep_ts.state not in ("memory",):
ts.waiting_for_data.add(dep_ts.key)
self.update_who_has(who_has=who_has)
if nbytes is not None:
for key, value in nbytes.items():
self.tasks[key].nbytes = value
if ts.waiting_for_data:
self.data_needed.append(ts.key)
else:
self.transition(ts, "ready")
if self.validate:
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
if who_has:
assert all(self.tasks[dep] in ts.dependencies for dep in who_has)
assert all(self.tasks[dep.key] for dep in ts.dependencies)
for dependency in ts.dependencies:
self.validate_task(dependency)
self.validate_task(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, ts, finish, **kwargs):
if ts is None:
return
start = ts.state
if start == finish:
return
func = self._transitions[start, finish]
self.log.append((ts.key, start, finish))
state = func(ts, **kwargs)
if state and finish != state:
self.log.append((ts.key, start, finish, state))
ts.state = state or finish
if self.validate:
self.validate_task(ts)
self._notify_plugins("transition", ts.key, start, state or finish, **kwargs)
def transition_new_waiting(self, ts):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is not None
assert not ts.who_has
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_new_fetch(self, ts, who_has):
try:
if self.validate:
assert ts.state == "new"
assert ts.runspec is None
assert who_has
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
ts.who_has.update(who_has)
for w in who_has:
self.has_what[w].add(ts.key)
self.pending_data_per_worker[w].append(ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_waiting(self, ts, runspec):
try:
if self.validate:
assert ts.state == "fetch"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_waiting(self, ts, runspec):
try:
if self.validate:
assert ts.state == "flight"
assert ts.runspec is None
assert runspec is not None
ts.runspec = runspec
# remove any stale entries in `has_what`
for worker in self.has_what.keys():
self.has_what[worker].discard(ts.key)
# clear `who_has` of stale info
ts.who_has.clear()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_fetch_flight(self, ts, worker=None):
try:
if self.validate:
assert ts.state == "fetch"
assert ts.dependents
ts.coming_from = worker
self.in_flight_tasks += 1
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_fetch(self, ts):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
ts.runspec = None
if not ts.who_has:
if ts.key not in self._missing_dep_flight:
self._missing_dep_flight.add(ts.key)
logger.info("Task %s does not know who has", ts)
self.loop.add_callback(self.handle_missing_dep, ts)
for w in ts.who_has:
self.pending_data_per_worker[w].append(ts.key)
for dependent in ts.dependents:
dependent.waiting_for_data.add(ts.key)
if dependent.state == "waiting":
self.data_needed.append(dependent.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_flight_memory(self, ts, value=None):
try:
if self.validate:
assert ts.state == "flight"
self.in_flight_tasks -= 1
ts.coming_from = None
self.put_key_in_memory(ts, value)
for dependent in ts.dependents:
try:
dependent.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
self.log.append(("Notifying scheduler about in-memory", ts.key))
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_ready(self, ts):
try:
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts.key not in self.ready
self.has_what[self.address].discard(ts.key)
if ts.resource_restrictions is not None:
self.constrained.append(ts.key)
return "constrained"
else:
heapq.heappush(self.ready, (ts.priority, ts.key))
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_done(self, ts, value=None):
try:
if self.validate:
assert ts.state == "waiting"
assert ts.key not in self.ready
self.waiting_for_data_count -= len(ts.waiting_for_data)
ts.waiting_for_data.clear()
if value is not None:
self.put_key_in_memory(ts, value)
self.send_task_state_to_scheduler(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_executing(self, ts):
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
self.executing_count += 1
self.loop.add_callback(self.execute, ts.key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_error(self, ts):
if self.validate:
assert ts.exception is not None
assert ts.traceback is not None
assert ts.exception_text
assert ts.traceback_text
self.send_task_state_to_scheduler(ts)
def transition_ready_memory(self, ts, value=no_value):
if value is not no_value:
self.put_key_in_memory(ts, value=value)
self.send_task_state_to_scheduler(ts)
def transition_constrained_executing(self, ts):
self.transition_ready_executing(ts)
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] -= quantity
if self.validate:
assert all(v >= 0 for v in self.available_resources.values())
def transition_executing_done(self, ts, value=no_value, report=True):
try:
if self.validate:
assert ts.state == "executing" or ts.key in self.long_running
assert not ts.waiting_for_data
assert ts.key not in self.ready
out = None
if ts.resource_restrictions is not None:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
if ts.state == "executing":
self.executing_count -= 1
self.executed_count += 1
elif ts.state == "long-running":
self.long_running.remove(ts.key)
if value is not no_value:
try:
self.put_key_in_memory(ts, value, transition=False)
except Exception as e:
logger.info("Failed to put key in memory", exc_info=True)
msg = error_message(e)
ts.exception = msg["exception"]
ts.exception_text = msg["exception_text"]
ts.traceback = msg["traceback"]
ts.traceback_text = msg["traceback_text"]
ts.state = "error"
out = "error"
for d in ts.dependents:
d.waiting_for_data.add(ts.key)
if report and self.batched_stream and self.status == Status.running:
self.send_task_state_to_scheduler(ts)
else:
raise CommClosedError
return out
except OSError:
logger.info("Comm closed")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_executing_long_running(self, ts, compute_duration=None):
try:
if self.validate:
assert ts.state == "executing"
self.executing_count -= 1
self.long_running.add(ts.key)
self.batched_stream.send(
{
"op": "long-running",
"key": ts.key,
"compute_duration": compute_duration,
}
)
self.ensure_computing()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def maybe_transition_long_running(self, ts, compute_duration=None):
if ts.state == "executing":
self.transition(ts, "long-running", compute_duration=compute_duration)
def stateof(self, key):
ts = self.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys):
keys = [key.key if isinstance(key, TaskState) else key for key in keys]
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self):
changed = True
try:
while (
changed
and self.data_needed
and len(self.in_flight_workers) < self.total_out_connections
):
changed = False
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
key = self.data_needed[0]
if key not in self.tasks:
self.data_needed.popleft()
changed = True
continue
ts = self.tasks[key]
if ts.state != "waiting":
self.log.append((key, "communication pass"))
self.data_needed.popleft()
changed = True
continue
dependencies = ts.dependencies
if self.validate:
assert all(dep.key in self.tasks for dep in dependencies)
dependencies_fetch = set()
dependencies_missing = set()
for dependency_ts in dependencies:
if dependency_ts.state == "fetch":
if not dependency_ts.who_has:
dependencies_missing.add(dependency_ts)
else:
dependencies_fetch.add(dependency_ts)
del dependencies, dependency_ts
if dependencies_missing:
missing_deps2 = {
dep
for dep in dependencies_missing
if dep.key not in self._missing_dep_flight
}
for dep in missing_deps2:
self._missing_dep_flight.add(dep.key)
if missing_deps2:
logger.info(
"Can't find dependencies %s for key %s",
missing_deps2.copy(),
key,
)
self.loop.add_callback(self.handle_missing_dep, *missing_deps2)
dependencies_fetch -= dependencies_missing
self.log.append(
("gather-dependencies", key, {d.key for d in dependencies_fetch})
)
in_flight = False
while dependencies_fetch and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.comm_threshold_bytes
):
to_gather_ts = dependencies_fetch.pop()
workers = [
w
for w in to_gather_ts.who_has
if w not in self.in_flight_workers
]
if not workers:
in_flight = True
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
to_gather, total_nbytes = self.select_keys_for_gather(
worker, to_gather_ts.key
)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
for d in to_gather:
dependencies_fetch.discard(self.tasks.get(d))
self.transition(self.tasks[d], "flight", worker=worker)
assert not worker == self.address
self.loop.add_callback(
self.gather_dep,
worker=worker,
to_gather=to_gather,
total_nbytes=total_nbytes,
cause=ts,
)
changed = True
if not dependencies_fetch and not in_flight:
self.data_needed.popleft()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def send_task_state_to_scheduler(self, ts):
if ts.key in self.data or self.actors.get(ts.key):
typ = ts.type
if ts.nbytes is None or typ is None:
try:
value = self.data[ts.key]
except KeyError:
value = self.actors[ts.key]
ts.nbytes = sizeof(value)
typ = ts.type = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
typ_serialized = pickle.dumps(typ.__name__, protocol=4)
d = {
"op": "task-finished",
"status": "OK",
"key": ts.key,
"nbytes": ts.nbytes,
"thread": self.threads.get(ts.key),
"type": typ_serialized,
"typename": typename(typ),
"metadata": ts.metadata,
}
elif ts.exception is not None:
d = {
"op": "task-erred",
"status": "error",
"key": ts.key,
"thread": self.threads.get(ts.key),
"exception": ts.exception,
"traceback": ts.traceback,
"exception_text": ts.exception_text,
"traceback_text": ts.traceback_text,
}
else:
logger.error("Key not ready to send to worker, %s: %s", ts.key, ts.state)
return
if ts.startstops:
d["startstops"] = ts.startstops
self.batched_stream.send(d)
def put_key_in_memory(self, ts, value, transition=True):
if ts.key in self.data:
ts.state = "memory"
return
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
self.data[ts.key] = value
ts.state = "memory"
stop = time()
if stop - start > 0.020:
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
if ts.nbytes is None:
ts.nbytes = sizeof(value)
ts.type = type(value)
for dep in ts.dependents:
try:
dep.waiting_for_data.remove(ts.key)
self.waiting_for_data_count -= 1
except KeyError:
pass
if not dep.waiting_for_data:
self.transition(dep, "ready")
self.log.append((ts.key, "put-in-memory"))
def select_keys_for_gather(self, worker, dep):
assert isinstance(dep, str)
deps = {dep}
total_bytes = self.tasks[dep].get_nbytes()
L = self.pending_data_per_worker[worker]
while L:
d = L.popleft()
ts = self.tasks.get(d)
if ts is None or ts.state != "fetch":
continue
if total_bytes + ts.get_nbytes() > self.target_message_size:
break
deps.add(d)
total_bytes += ts.get_nbytes()
return deps, total_bytes
@property
def total_comm_bytes(self):
warnings.warn(
"The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. "
"Future versions will only support the new name.",
DeprecationWarning,
)
return self.comm_threshold_bytes
async def gather_dep(
self,
worker: str,
to_gather: Iterable[str],
total_nbytes: int,
cause: TaskState,
):
if self.validate:
self.validate_state()
if self.status != Status.running:
return
with log_errors():
response = {}
to_gather_keys = set()
try:
if self.validate:
self.validate_state()
for dependency_key in to_gather:
dependency_ts = self.tasks.get(dependency_key)
if dependency_ts and dependency_ts.state == "flight":
to_gather_keys.add(dependency_key)
del to_gather, dependency_key, dependency_ts
self.log.append(("request-dep", cause.key, worker, to_gather_keys))
logger.debug(
"Request %d keys for task %s from %s",
len(to_gather_keys),
cause,
worker,
)
start = time()
response = await get_data_from_worker(
self.rpc, to_gather_keys, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
self.log.append(("busy-gather", worker, to_gather_keys))
for key in to_gather_keys:
ts = self.tasks.get(key)
if ts and ts.state == "flight":
self.transition(ts, "fetch")
return
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
total_bytes = sum(
self.tasks[key].get_nbytes()
for key in response["data"]
if key in self.tasks
)
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {
key: self.tasks[key].nbytes
for key in response["data"]
if key in self.tasks
},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1000000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, response["data"].values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(response["data"]))
self.incoming_count += 1
self.log.append(("receive-dep", worker, list(response["data"])))
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
has_what = self.has_what.pop(worker)
self.pending_data_per_worker.pop(worker)
self.log.append(("receive-dep-failed", worker, has_what))
for d in has_what:
ts = self.tasks[d]
ts.who_has.remove(worker)
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
assert set(to_gather_keys).issubset(
set(self.in_flight_workers.get(worker))
)
for d in self.in_flight_workers.pop(worker):
ts = self.tasks.get(d)
try:
if not busy and d in data:
self.transition(ts, "memory", value=data[d])
elif ts is None or ts.state == "executing":
self.log.append(("already-executing", d))
self.release_key(d, reason="already executing at gather")
elif ts.state == "flight" and not ts.dependents:
self.log.append(("flight no-dependents", d))
self.release_key(
d, reason="In-flight task no longer has dependents."
)
elif (
not busy
and d not in data
and ts.dependents
and ts.state != "memory"
):
ts.who_has.discard(worker)
self.has_what[worker].discard(ts.key)
self.log.append(("missing-dep", d))
self.batched_stream.send(
{
"op": "missing-data",
"errant_worker": worker,
"key": d,
}
)
self.transition(ts, "fetch")
elif ts.state not in ("ready", "memory"):
self.transition(ts, "fetch")
else:
logger.debug(
"Unexpected task state encountered for %r after gather_dep",
ts,
)
except Exception as exc:
emsg = error_message(exc)
assert ts is not None, ts
self.log.append(
(ts.key, "except-gather-dep-result", emsg, time())
)
logger.debug(
"Exception occured while handling `gather_dep` response for %r",
ts,
exc_info=True,
)
if self.validate:
self.validate_state()
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
self.ensure_communicating()
else:
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy)
await self.query_who_has(*to_gather_keys)
self.ensure_communicating()
def bad_dep(self, dep):
exc = ValueError(
"Could not find dependent %s. Check worker logs" % str(dep.key)
)
for ts in list(dep.dependents):
msg = error_message(exc)
ts.exception = msg["exception"]
ts.traceback = msg["traceback"]
ts.exception_text = msg["exception_text"]
ts.traceback_text = msg["traceback_text"]
self.transition(ts, "error")
self.release_key(dep.key, reason="bad dep")
async def handle_missing_dep(self, *deps, **kwargs):
self.log.append(("handle-missing", deps))
try:
deps = {dep for dep in deps if dep.dependents}
if not deps:
return
for dep in list(deps):
if (
self._suspicious_count_limit
and dep.suspicious_count > self._suspicious_count_limit
):
deps.remove(dep)
self.bad_dep(dep)
if not deps:
return
for dep in deps:
logger.info(
"Dependent not found: %s %s . Asking scheduler",
dep.key,
dep.suspicious_count,
)
who_has = await retry_operation(
self.scheduler.who_has, keys=list(dep.key for dep in deps)
)
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has)
still_missing = set()
for dep in deps:
dep.suspicious_count += 1
if not who_has.get(dep.key):
logger.info(
"No workers found for %s",
dep.key,
)
self.log.append((dep.key, "no workers found", dep.dependents))
self.release_key(dep.key, reason="Handle missing no workers")
elif self.address in who_has and dep.state != "memory":
still_missing.add(dep)
self.batched_stream.send(
{
"op": "release-worker-data",
"keys": [dep.key],
"worker": self.address,
}
)
else:
logger.debug("New workers found for %s", dep.key)
self.log.append((dep.key, "new workers found"))
for dependent in dep.dependents:
if dep.key in dependent.waiting_for_data:
self.data_needed.append(dependent.key)
if still_missing:
logger.debug(
"Found self referencing who has response from scheduler for keys %s.\n"
"Trying again handle_missing",
deps,
)
await self.handle_missing_dep(*deps)
except Exception:
logger.error("Handle missing dep failed, retrying", exc_info=True)
retries = kwargs.get("retries", 5)
self.log.append(("handle-missing-failed", retries, deps))
if retries > 0:
await self.handle_missing_dep(*deps, retries=retries - 1)
else:
raise
finally:
try:
for dep in deps:
self._missing_dep_flight.remove(dep.key)
except KeyError:
pass
self.ensure_communicating()
async def query_who_has(self, *deps):
with log_errors():
response = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(response)
return response
def update_who_has(self, who_has):
try:
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.tasks:
if self.address in workers and self.tasks[dep].state != "memory":
logger.debug(
"Scheduler claims worker %s holds data for task %s which is not true.",
self.name,
dep,
)
workers = set(workers) - {self.address}
self.tasks[dep].who_has.update(workers)
for worker in workers:
self.has_what[worker].add(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def steal_request(self, key):
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(key)
if key in self.tasks:
state = ts.state
else:
state = None
response = {"op": "steal-response", "key": key, "state": state}
self.batched_stream.send(response)
if state in ("ready", "waiting", "constrained"):
# If task is marked as "constrained" we haven't yet assigned it an
ts.scheduler_holds_ref = False
self.release_key(ts.key, reason="stolen")
if self.validate:
assert ts.key not in self.tasks
def release_key(
self,
key: Hashable,
cause: TaskState | None = None,
reason: str | None = None,
report: bool = True,
):
try:
if self.validate:
assert not isinstance(key, TaskState)
ts = self.tasks.get(key, None)
if ts is None or ts.scheduler_holds_ref:
return
logger.debug(
"Release key %s", {"key": key, "cause": cause, "reason": reason}
)
if cause:
self.log.append((key, "release-key", {"cause": cause}, reason))
else:
self.log.append((key, "release-key", reason))
if key in self.data:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
if key in self.actors:
del self.actors[key]
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
ts.who_has.clear()
if key in self.threads:
del self.threads[key]
if ts.state == "executing":
self.executing_count -= 1
if ts.resource_restrictions is not None:
if ts.state == "executing":
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
for d in ts.dependencies:
d.dependents.discard(ts)
if not d.dependents and d.state in ("flight", "fetch"):
self.release_key(d.key, reason="Dependent released")
if report:
if ts.state in PROCESSING:
msg = {"op": "release", "key": key, "cause": cause}
else:
msg = {
"op": "release-worker-data",
"keys": [key],
"worker": self.address,
}
self.batched_stream.send(msg)
self._notify_plugins("release_key", key, ts.state, cause, reason, report)
del self.tasks[key]
except CommClosedError:
pass
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(comm=comm, name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def plugin_remove(self, comm=None, name=None):
with log_errors(pdb=False):
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self,
comm=None,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
):
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
apply_function_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, comm=None, actor=None, attribute=None):
try:
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def meets_resource_constraints(self, key: str) -> bool:
ts = self.tasks[key]
if not ts.resource_restrictions:
return True
for resource, needed in ts.resource_restrictions.items():
if self.available_resources[resource] < needed:
return False
return True
async def _maybe_deserialize_task(self, ts):
if not isinstance(ts.runspec, SerializedTask):
return ts.runspec
try:
start = time()
if sizeof(ts.runspec) > OFFLOAD_THRESHOLD:
function, args, kwargs = await offload(_deserialize, *ts.runspec)
else:
function, args, kwargs = _deserialize(*ts.runspec)
stop = time()
if stop - start > 0.010:
ts.startstops.append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception:
logger.error("Could not deserialize task", exc_info=True)
self.log.append((ts.key, "deserialize-error"))
raise
def ensure_computing(self):
if self.paused:
return
try:
while self.constrained and self.executing_count < self.nthreads:
key = self.constrained[0]
ts = self.tasks.get(key, None)
if ts is None or ts.state != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
self.transition(ts, "executing")
else:
break
while self.ready and self.executing_count < self.nthreads:
priority, key = heapq.heappop(self.ready)
ts = self.tasks.get(key)
if ts is None:
continue
elif ts.key in self.data:
self.transition(ts, "memory")
elif ts.state in READY:
self.transition(ts, "executing")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key):
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
return
if key not in self.tasks:
return
ts = self.tasks[key]
if ts.state != "executing":
logger.debug(
"Trying to execute a task %s which is not in executing state anymore"
% ts
)
return
try:
if self.validate:
assert not ts.waiting_for_data
assert ts.state == "executing"
assert ts.runspec is not None
function, args, kwargs = await self._maybe_deserialize_task(ts)
args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs)
if ts.annotations is not None and "executor" in ts.annotations:
executor = ts.annotations["executor"]
else:
executor = "default"
assert executor in self.executors
assert key == ts.key
self.active_keys.add(ts.key)
try:
e = self.executors[executor]
ts.start_time = time()
if iscoroutinefunction(function):
result = await apply_function_async(
function,
args2,
kwargs2,
self.scheduler_delay,
)
elif "ThreadPoolExecutor" in str(type(e)):
result = await self.loop.run_in_executor(
e,
apply_function,
function,
args2,
kwargs2,
self.execution_state,
ts.key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
result = await self.loop.run_in_executor(
e,
apply_function_simple,
function,
args2,
kwargs2,
self.scheduler_delay,
)
finally:
self.active_keys.discard(ts.key)
# changed since the execution was kicked off. In particular, it may
# have been canceled and released already in which case we'll have
if ts.key not in self.tasks:
logger.debug(
"Dropping result for %s since task has already been released."
% ts.key
)
return
result["key"] = ts.key
value = result.pop("result", None)
ts.startstops.append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[ts.key] = result["thread"]
if result["op"] == "task-finished":
ts.nbytes = result["nbytes"]
ts.type = result["type"]
self.transition(ts, "memory", value=value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
elif isinstance(result.pop("actual-exception"), Reschedule):
self.batched_stream.send({"op": "reschedule", "key": ts.key})
self.transition(ts, "rescheduled", report=False)
self.release_key(ts.key, report=False, reason="Reschedule")
else:
ts.exception = result["exception"]
ts.traceback = result["traceback"]
ts.exception_text = result["exception_text"]
ts.traceback_text = result["traceback_text"]
logger.warning(
"Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %r\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
result["exception"].data,
)
self.transition(ts, "error")
logger.debug("Send compute response to scheduler: %s, %s", ts.key, result)
if self.validate:
assert ts.state != "executing"
assert not ts.waiting_for_data
except Exception as exc:
logger.error(
"Exception during execution of task %s.", ts.key, exc_info=True
)
emsg = error_message(exc)
ts.exception = emsg["exception"]
ts.traceback = emsg["traceback"]
ts.exception_text = emsg["exception_text"]
ts.traceback_text = emsg["traceback_text"]
self.transition(ts, "error")
finally:
self.ensure_computing()
self.ensure_communicating()
def _prepare_args_for_execution(self, ts, args, kwargs):
start = time()
data = {}
for dep in ts.dependencies:
k = dep.key
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
ts.startstops.append({"action": "disk-read", "start": start, "stop": stop})
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
return args2, kwargs2
async def memory_monitor(self):
if self._memory_monitoring:
return
self._memory_monitoring = True
total = 0
proc = self.monitor.proc
memory = proc.memory_info().rss
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
self._throttled_gc.collect()
if not self.paused:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = True
elif self.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = False
self.ensure_computing()
check_pause(memory)
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
logger.debug(
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
start = time()
target = self.memory_limit * self.memory_target_fraction
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Unmanaged memory use is high. This may indicate a memory leak "
"or the memory may not be released to the OS; see "
"https://distributed.dask.org/en/latest/worker.html#memtrim "
"for more information. "
"-- Unmanaged memory: %s -- Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit),
)
break
k, v, weight = self.data.fast.evict()
del k, v
total += weight
count += 1
if time() - start > 0.5:
await asyncio.sleep(0)
start = time()
memory = proc.memory_info().rss
if total > need and memory > target:
self._throttled_gc.collect()
memory = proc.memory_info().rss
check_pause(memory)
if count:
logger.debug(
"Moved %d tasks worth %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
return total
def cycle_profile(self):
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self):
if not self.active_threads: return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self, comm=None, start=None, stop=None, key=None, server=False
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(self, comm=None, start=0, stop=None):
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
start = start or 0
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, comm=None, keys=None):
with self.active_threads_lock:
frames = sys._current_frames()
active_threads = self.active_threads.copy()
frames = {k: frames[ident] for ident, k in active_threads.items()}
if keys is not None:
frames = {k: frame for k, frame in frames.items() if k in keys}
result = {k: profile.call_stack(frame) for k, frame in frames.items()}
return result
def _notify_plugins(self, method_name, *args, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception" % name, exc_info=True
)
def validate_task_memory(self, ts):
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key not in self.ready
assert ts.state == "memory"
def validate_task_executing(self, ts):
assert ts.state == "executing"
assert ts.runspec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_ready(self, ts):
assert ts.key in pluck(1, self.ready)
assert ts.key not in self.data
assert ts.state != "executing"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_waiting(self, ts):
assert ts.key not in self.data
assert ts.state == "waiting"
if ts.dependencies and ts.runspec:
assert not all(dep.key in self.data for dep in ts.dependencies)
def validate_task_flight(self, ts):
assert ts.key not in self.data
assert not any(dep.key in self.ready for dep in ts.dependents)
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def validate_task_fetch(self, ts):
assert ts.runspec is None
assert ts.key not in self.data
assert self.address not in ts.who_has
assert ts.dependents
for w in ts.who_has:
assert ts.key in self.has_what[w]
def validate_task(self, ts):
try:
if ts.state == "memory":
self.validate_task_memory(ts)
elif ts.state == "waiting":
self.validate_task_waiting(ts)
elif ts.state == "ready":
self.validate_task_ready(ts)
elif ts.state == "executing":
self.validate_task_executing(ts)
elif ts.state == "flight":
self.validate_task_flight(ts)
elif ts.state == "fetch":
self.validate_task_fetch(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self):
if self.status != Status.running:
return
try:
for ts in self.tasks.values():
assert ts.state is not None
for worker in ts.who_has:
assert ts.key in self.has_what[worker]
for dep in ts.dependencies:
assert dep.state is not None
assert ts in dep.dependents, ts
for key in ts.waiting_for_data:
ts_wait = self.tasks[key]
assert (
ts_wait.state == "flight"
or ts_wait.state == "fetch"
or ts_wait.key in self._missing_dep_flight
or ts_wait.who_has.issubset(self.in_flight_workers)
)
if ts.state == "memory":
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key in self.data or ts.key in self.actors
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
for ts in self.tasks.values():
self.validate_task(ts)
except Exception as e:
self.loop.add_callback(self.close)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout=None) -> Client:
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from .client import default_client
client = default_client()
except ValueError: pass
else:
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from .client import Client
asynchronous = self.loop is IOLoop.current()
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self):
return self.active_threads[threading.get_ident()]
def get_worker() -> Worker:
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(w for w in Worker._instances if w.status == Status.running)
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import Client
try:
client = Client.current() except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
worker = get_worker()
tpe_secede() duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
worker.tasks[thread_state.key],
compute_duration=duration,
)
class Reschedule(Exception):
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None:
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with suppress(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError:
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
if function is not None:
function = loads_function(function)
if args and isinstance(args, bytes):
args = pickle.loads(args)
if kwargs and isinstance(kwargs, bytes):
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func, protocol=4)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: result = pickle.dumps(func, protocol=4)
return result
def dumps_task(task):
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
b = dumps(obj, protocol=4)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
msg = apply_function_simple(function, args, kwargs, time_delay)
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_simple(
function,
args,
kwargs,
time_delay,
):
ident = threading.get_ident()
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
async def apply_function_async(
function,
args,
kwargs,
time_delay,
):
ident = threading.get_ident()
start = time()
try:
result = await function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
thread_state.actor = True
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
if nvml.device_get_count() < 1:
raise RuntimeError
except (Exception, RuntimeError):
pass
else:
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
def print(*args, **kwargs):
try:
worker = get_worker()
except ValueError:
pass
else:
msg = {
"args": tuple(stringify(arg) for arg in args),
"kwargs": {k: stringify(v) for k, v in kwargs.items()},
}
worker.log_event("print", msg)
builtins.print(*args, **kwargs)
def warn(*args, **kwargs):
try:
worker = get_worker()
except ValueError:
pass
else:
worker.log_event("warn", {"args": args, "kwargs": kwargs})
warnings.warn(*args, **kwargs)
| true
| true
|
f70361fd762b86d8ee7498605605e80662106df7
| 1,692
|
py
|
Python
|
samples/generated_samples/vmmigration_v1_generated_vm_migration_create_utilization_report_async.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/vmmigration_v1_generated_vm_migration_create_utilization_report_async.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | 10
|
2021-11-18T10:47:48.000Z
|
2022-03-07T15:48:54.000Z
|
samples/generated_samples/vmmigration_v1_generated_vm_migration_create_utilization_report_async.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | 1
|
2022-01-29T08:15:02.000Z
|
2022-01-29T08:15:02.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateUtilizationReport
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async]
from google.cloud import vmmigration_v1
async def sample_create_utilization_report():
# Create a client
client = vmmigration_v1.VmMigrationAsyncClient()
# Initialize request argument(s)
request = vmmigration_v1.CreateUtilizationReportRequest(
parent="parent_value",
utilization_report_id="utilization_report_id_value",
)
# Make the request
operation = client.create_utilization_report(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async]
| 33.176471
| 85
| 0.767139
|
from google.cloud import vmmigration_v1
async def sample_create_utilization_report():
client = vmmigration_v1.VmMigrationAsyncClient()
request = vmmigration_v1.CreateUtilizationReportRequest(
parent="parent_value",
utilization_report_id="utilization_report_id_value",
)
operation = client.create_utilization_report(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
print(response)
| true
| true
|
f7036210c9a2fc427322f417e11fddc8b19b2186
| 12,556
|
py
|
Python
|
gtsfm/common/gtsfm_data.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | null | null | null |
gtsfm/common/gtsfm_data.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | null | null | null |
gtsfm/common/gtsfm_data.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | 1
|
2021-09-23T13:08:49.000Z
|
2021-09-23T13:08:49.000Z
|
"""Class to hold the tracks and cameras of a 3D scene.
This can be the output of either data association or of bundle adjustment.
Authors: Ayush Baid, John Lambert, Xiaolong Wu
"""
import itertools
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack
import gtsfm.utils.graph as graph_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.reprojection as reproj_utils
logger = logger_utils.get_logger()
EQUALITY_TOLERANCE = 1e-5
PRINT_NUM_SIG_FIGS = 2
class GtsfmData:
"""Class containing cameras and tracks, essentially describing the complete 3D scene.
This class is needed over GTSAM's SfmData type because GTSAM's type does not allow for non-contiguous cameras.
The situation of non-contiguous cameras can exists because of failures in front-end.
"""
def __init__(self, number_images: int) -> None:
"""Initializes the class.
Args:
number_images: number of images/cameras in the scene.
"""
self._cameras: Dict[int, PinholeCameraCal3Bundler] = {}
self._tracks: List[SfmTrack] = []
self._number_images = number_images
def __eq__(self, other: object) -> bool:
"""Checks equality with the other object."""
if not isinstance(other, GtsfmData):
return False
if self._number_images != other.number_images():
return False
for i, cam in self._cameras.items():
other_cam = other.get_camera(i)
if not cam.equals(other_cam, EQUALITY_TOLERANCE):
return False
for j in range(self.number_tracks()):
track = self.get_track(j)
other_track = other.get_track(j)
if track.number_measurements() != other_track.number_measurements():
return False
for k in range(track.number_measurements()):
i, uv = track.measurement(k)
other_i, other_uv = other_track.measurement(k)
if i != other_i:
return False
if not np.allclose(uv, other_uv):
return False
return True
def number_images(self) -> int:
"""Getter for the number of images.
Returns:
Number of images.
"""
return self._number_images
def number_tracks(self) -> int:
"""Getter for the number of tracks.
Returns:
Number of tracks.
"""
return len(self._tracks)
def get_valid_camera_indices(self) -> List[int]:
"""Getter for image indices where there is a valid (not None) camera.
Returns:
List of indices with a valid camera.
"""
return list(self._cameras.keys())
def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]:
"""Getter for camera.
Args:
index: the image index to fetch the camera for.
Returns:
The camera if it is a valid one, None otherwise.
"""
return self._cameras.get(index)
def get_camera_poses(self) -> List[Optional[Pose3]]:
"""Getter for camera poses wTi.
This function returns the pose for all cameras (equal to number_images in GtsfmData), even if they were not
computed by the pipeline.
Returns:
camera poses as a list, each representing wTi
"""
cameras = [self.get_camera(i) for i in range(self.number_images())]
poses = [camera.pose() if camera is not None else None for camera in cameras]
return poses
def get_track(self, index: int) -> SfmTrack:
"""Getter for the track.
Args:
index: track index to fetch.
Returns:
Requested track.
"""
return self._tracks[index]
def add_track(self, track: SfmTrack) -> bool:
"""Add a track, after checking if all the cameras in the track are already added.
Args:
track: track to add.
Returns:
Flag indicating the success of adding operation.
"""
# check if all cameras are already added
for j in range(track.number_measurements()):
i, _ = track.measurement(j)
if i not in self._cameras:
return False
self._tracks.append(track)
return True
def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None:
"""Adds a camera.
Args:
index: the index associated with this camera.
camera: camera object to it.
Raises:
ValueError: if the camera to be added is not a valid camera object.
"""
if camera is None:
raise ValueError("Camera cannot be None, should be a valid camera")
self._cameras[index] = camera
def get_track_length_statistics(self) -> Tuple[float, float]:
"""Compute mean and median lengths of all the tracks.
Returns:
Mean track length.
Median track length.
"""
if self.number_tracks() == 0:
return 0, 0
track_lengths = self.get_track_lengths()
return np.mean(track_lengths), np.median(track_lengths)
def get_track_lengths(self) -> np.ndarray:
"""Get an array containing the lengths of all tracks.
Returns:
Array containing all track lengths.
"""
if self.number_tracks() == 0:
return np.array([], dtype=np.uint32)
track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())]
return np.array(track_lengths, dtype=np.uint32)
def select_largest_connected_component(self) -> "GtsfmData":
"""Selects the subset of data belonging to the largest connected component of the graph where the edges are
between cameras which feature in the same track.
Returns:
New GtSfmData object with the subset of tracks and cameras.
"""
camera_edges = []
for sfm_track in self._tracks:
cameras_in_use = []
for m_idx in range(sfm_track.number_measurements()):
i, _ = sfm_track.measurement(m_idx)
cameras_in_use.append(i)
# Recreate track connectivity from track information
# For example: a track has cameras [0, 2, 5]. In that case we will add pairs (0, 2), (0, 5), (2, 5)
camera_edges += list(itertools.combinations(cameras_in_use, 2))
if len(camera_edges) == 0:
return GtsfmData(self._number_images)
cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges)
logger.info(
"Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)".format(
len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images
)
)
return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc)
@classmethod
def from_selected_cameras(cls, gtsfm_data: "GtsfmData", camera_indices: List[int]) -> "GtsfmData":
"""Selects the cameras in the input list and the tracks associated with those cameras.
Args:
gtsfm_data: data to pick the cameras from.
camera_indices: camera indices to select and keep in the new data.
Returns:
New object with the selected cameras and associated tracks.
"""
new_data = cls(gtsfm_data.number_images())
for i in gtsfm_data.get_valid_camera_indices():
if i in camera_indices:
new_data.add_camera(i, gtsfm_data.get_camera(i))
new_camera_indices = new_data.get_valid_camera_indices()
# add tracks which have all the camera present in new data
for j in range(gtsfm_data.number_tracks()):
track = gtsfm_data.get_track(j)
is_valid = True
for k in range(track.number_measurements()):
i, _ = track.measurement(k)
if i not in new_camera_indices:
is_valid = False
break
if is_valid:
new_data.add_track(track)
return new_data
def get_scene_reprojection_errors(self) -> np.ndarray:
"""Get the scene reprojection errors for all 3D points and all associated measurements.
Returns:
Reprojection errors as a 1D numpy array.
"""
scene_reproj_errors: List[float] = []
for track in self._tracks:
track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
scene_reproj_errors.extend(track_errors)
return np.array(scene_reproj_errors)
def aggregate_metrics(self) -> Dict[str, Any]:
"""Aggregate metrics about the reprojection errors and 3d track lengths (summary stats).
Args:
ba_data: bundle adjustment result
Returns:
dictionary containing metrics of bundle adjustment result
"""
track_lengths_3d = self.get_track_lengths()
scene_reproj_errors = self.get_scene_reprojection_errors()
convert_to_rounded_float = lambda x: float(np.round(x, 3))
stats_dict = {}
stats_dict["number_tracks"] = self.number_tracks()
stats_dict["3d_track_lengths"] = {
"min": convert_to_rounded_float(track_lengths_3d.min()),
"mean": convert_to_rounded_float(np.mean(track_lengths_3d)),
"median": convert_to_rounded_float(np.median(track_lengths_3d)),
"max": convert_to_rounded_float(track_lengths_3d.max()),
}
stats_dict["reprojection_errors"] = {
"min": convert_to_rounded_float(np.min(scene_reproj_errors)),
"mean": convert_to_rounded_float(np.mean(scene_reproj_errors)),
"median": convert_to_rounded_float(np.median(scene_reproj_errors)),
"max": convert_to_rounded_float(np.max(scene_reproj_errors)),
}
return stats_dict
def get_avg_scene_reprojection_error(self) -> float:
"""Get average reprojection error for all 3d points in the entire scene
Returns:
Average of reprojection errors for every 3d point to its 2d measurements
"""
scene_reproj_errors = self.get_scene_reprojection_errors()
scene_avg_reproj_error = np.mean(scene_reproj_errors)
return scene_avg_reproj_error
def log_scene_reprojection_error_stats(self) -> None:
"""Logs reprojection error stats for all 3d points in the entire scene."""
scene_reproj_errors = self.get_scene_reprojection_errors()
logger.info("Min scene reproj error: %.3f", np.min(scene_reproj_errors))
logger.info("Avg scene reproj error: %.3f", np.mean(scene_reproj_errors))
logger.info("Median scene reproj error: %.3f", np.median(scene_reproj_errors))
logger.info("Max scene reproj error: %.3f", np.max(scene_reproj_errors))
def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool:
"""Validates a track based on reprojection errors and cheirality checks.
Args:
track: track with 3D landmark and measurements.
reproj_err_thresh: reprojection err threshold for each measurement.
Returns:
validity of the track.
"""
errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
# track is valid as all measurements have error below the threshold
cheirality_success = np.all(~np.isnan(errors))
return np.all(errors < reproj_err_thresh) and cheirality_success
def filter_landmarks(self, reproj_err_thresh: float = 5) -> "GtsfmData":
"""Filters out landmarks with high reprojection error
Args:
reproj_err_thresh: reprojection err threshold for each measurement.
"""
# TODO: move this function to utils or GTSAM
filtered_data = GtsfmData(self.number_images())
# add all the cameras
for i in self.get_valid_camera_indices():
filtered_data.add_camera(i, self.get_camera(i))
for j in range(self.number_tracks()):
track = self.get_track(j)
if self.__validate_track(track, reproj_err_thresh):
filtered_data.add_track(track)
return filtered_data
| 36.08046
| 116
| 0.634358
|
import itertools
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack
import gtsfm.utils.graph as graph_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.reprojection as reproj_utils
logger = logger_utils.get_logger()
EQUALITY_TOLERANCE = 1e-5
PRINT_NUM_SIG_FIGS = 2
class GtsfmData:
def __init__(self, number_images: int) -> None:
self._cameras: Dict[int, PinholeCameraCal3Bundler] = {}
self._tracks: List[SfmTrack] = []
self._number_images = number_images
def __eq__(self, other: object) -> bool:
if not isinstance(other, GtsfmData):
return False
if self._number_images != other.number_images():
return False
for i, cam in self._cameras.items():
other_cam = other.get_camera(i)
if not cam.equals(other_cam, EQUALITY_TOLERANCE):
return False
for j in range(self.number_tracks()):
track = self.get_track(j)
other_track = other.get_track(j)
if track.number_measurements() != other_track.number_measurements():
return False
for k in range(track.number_measurements()):
i, uv = track.measurement(k)
other_i, other_uv = other_track.measurement(k)
if i != other_i:
return False
if not np.allclose(uv, other_uv):
return False
return True
def number_images(self) -> int:
return self._number_images
def number_tracks(self) -> int:
return len(self._tracks)
def get_valid_camera_indices(self) -> List[int]:
return list(self._cameras.keys())
def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]:
return self._cameras.get(index)
def get_camera_poses(self) -> List[Optional[Pose3]]:
cameras = [self.get_camera(i) for i in range(self.number_images())]
poses = [camera.pose() if camera is not None else None for camera in cameras]
return poses
def get_track(self, index: int) -> SfmTrack:
return self._tracks[index]
def add_track(self, track: SfmTrack) -> bool:
for j in range(track.number_measurements()):
i, _ = track.measurement(j)
if i not in self._cameras:
return False
self._tracks.append(track)
return True
def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None:
if camera is None:
raise ValueError("Camera cannot be None, should be a valid camera")
self._cameras[index] = camera
def get_track_length_statistics(self) -> Tuple[float, float]:
if self.number_tracks() == 0:
return 0, 0
track_lengths = self.get_track_lengths()
return np.mean(track_lengths), np.median(track_lengths)
def get_track_lengths(self) -> np.ndarray:
if self.number_tracks() == 0:
return np.array([], dtype=np.uint32)
track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())]
return np.array(track_lengths, dtype=np.uint32)
def select_largest_connected_component(self) -> "GtsfmData":
camera_edges = []
for sfm_track in self._tracks:
cameras_in_use = []
for m_idx in range(sfm_track.number_measurements()):
i, _ = sfm_track.measurement(m_idx)
cameras_in_use.append(i)
camera_edges += list(itertools.combinations(cameras_in_use, 2))
if len(camera_edges) == 0:
return GtsfmData(self._number_images)
cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges)
logger.info(
"Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)".format(
len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images
)
)
return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc)
@classmethod
def from_selected_cameras(cls, gtsfm_data: "GtsfmData", camera_indices: List[int]) -> "GtsfmData":
new_data = cls(gtsfm_data.number_images())
for i in gtsfm_data.get_valid_camera_indices():
if i in camera_indices:
new_data.add_camera(i, gtsfm_data.get_camera(i))
new_camera_indices = new_data.get_valid_camera_indices()
for j in range(gtsfm_data.number_tracks()):
track = gtsfm_data.get_track(j)
is_valid = True
for k in range(track.number_measurements()):
i, _ = track.measurement(k)
if i not in new_camera_indices:
is_valid = False
break
if is_valid:
new_data.add_track(track)
return new_data
def get_scene_reprojection_errors(self) -> np.ndarray:
scene_reproj_errors: List[float] = []
for track in self._tracks:
track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
scene_reproj_errors.extend(track_errors)
return np.array(scene_reproj_errors)
def aggregate_metrics(self) -> Dict[str, Any]:
track_lengths_3d = self.get_track_lengths()
scene_reproj_errors = self.get_scene_reprojection_errors()
convert_to_rounded_float = lambda x: float(np.round(x, 3))
stats_dict = {}
stats_dict["number_tracks"] = self.number_tracks()
stats_dict["3d_track_lengths"] = {
"min": convert_to_rounded_float(track_lengths_3d.min()),
"mean": convert_to_rounded_float(np.mean(track_lengths_3d)),
"median": convert_to_rounded_float(np.median(track_lengths_3d)),
"max": convert_to_rounded_float(track_lengths_3d.max()),
}
stats_dict["reprojection_errors"] = {
"min": convert_to_rounded_float(np.min(scene_reproj_errors)),
"mean": convert_to_rounded_float(np.mean(scene_reproj_errors)),
"median": convert_to_rounded_float(np.median(scene_reproj_errors)),
"max": convert_to_rounded_float(np.max(scene_reproj_errors)),
}
return stats_dict
def get_avg_scene_reprojection_error(self) -> float:
scene_reproj_errors = self.get_scene_reprojection_errors()
scene_avg_reproj_error = np.mean(scene_reproj_errors)
return scene_avg_reproj_error
def log_scene_reprojection_error_stats(self) -> None:
scene_reproj_errors = self.get_scene_reprojection_errors()
logger.info("Min scene reproj error: %.3f", np.min(scene_reproj_errors))
logger.info("Avg scene reproj error: %.3f", np.mean(scene_reproj_errors))
logger.info("Median scene reproj error: %.3f", np.median(scene_reproj_errors))
logger.info("Max scene reproj error: %.3f", np.max(scene_reproj_errors))
def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool:
errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
cheirality_success = np.all(~np.isnan(errors))
return np.all(errors < reproj_err_thresh) and cheirality_success
def filter_landmarks(self, reproj_err_thresh: float = 5) -> "GtsfmData":
filtered_data = GtsfmData(self.number_images())
for i in self.get_valid_camera_indices():
filtered_data.add_camera(i, self.get_camera(i))
for j in range(self.number_tracks()):
track = self.get_track(j)
if self.__validate_track(track, reproj_err_thresh):
filtered_data.add_track(track)
return filtered_data
| true
| true
|
f70362c2929c6b6316a7720a7d8f1029c8e72172
| 514
|
py
|
Python
|
garrafas.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
garrafas.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
garrafas.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
'''n = 99
p = 'garrafas'
while n > 0:
if n == 1:
p = 'garrafa'
print(f'{n} {p} de cerveja no muro!')
print(f'{n} {p} no muro!')
print('Se uma garrafa cair no chão')
print('Quantas restarão?')
n -= 1
print('Fim da canção!')'''
p = 'garrafas'
for c in range(99, 0, -1):
if c == 1:
p = 'garrafa'
print(f'{c} {p} de cerveja no muro!')
print(f'{c} {p} no muro!')
print('Se uma garrafa cair no chão')
print('Quantas restarão?')
print('Fim da canção!')
| 17.724138
| 41
| 0.523346
|
p = 'garrafas'
for c in range(99, 0, -1):
if c == 1:
p = 'garrafa'
print(f'{c} {p} de cerveja no muro!')
print(f'{c} {p} no muro!')
print('Se uma garrafa cair no chão')
print('Quantas restarão?')
print('Fim da canção!')
| true
| true
|
f703638a4adc495ba1f71364ea8f35a506615362
| 681
|
py
|
Python
|
wagtailcommerce/orders/migrations/0015_order_cart.py
|
theplusagency/wagtail-commerce
|
6047170f29199ccaf2778534976ab0970c2877e7
|
[
"BSD-3-Clause"
] | 3
|
2019-04-12T15:38:43.000Z
|
2019-09-22T10:23:20.000Z
|
wagtailcommerce/orders/migrations/0015_order_cart.py
|
wagtailcommerce/wagtailcommerce
|
308ed8348483806c16062d09a7e69ec44d9a2e73
|
[
"BSD-3-Clause"
] | null | null | null |
wagtailcommerce/orders/migrations/0015_order_cart.py
|
wagtailcommerce/wagtailcommerce
|
308ed8348483806c16062d09a7e69ec44d9a2e73
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-17 21:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcommerce_carts', '0004_cart_coupon'),
('wagtailcommerce_orders', '0014_auto_20180130_1050'),
]
operations = [
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='wagtailcommerce_carts.Cart', verbose_name='cart'),
),
]
| 29.608696
| 182
| 0.674009
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcommerce_carts', '0004_cart_coupon'),
('wagtailcommerce_orders', '0014_auto_20180130_1050'),
]
operations = [
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='wagtailcommerce_carts.Cart', verbose_name='cart'),
),
]
| true
| true
|
f70366774f99f64ea4a2e9819e5d6781e9782677
| 4,249
|
py
|
Python
|
prosodic_daa/sample/pyhlm_sample_murakami.py
|
EmergentSystemLabStudent/Prosodic-DAA
|
068af5db337ed977c059e788353414d3aa9a8ac8
|
[
"MIT"
] | null | null | null |
prosodic_daa/sample/pyhlm_sample_murakami.py
|
EmergentSystemLabStudent/Prosodic-DAA
|
068af5db337ed977c059e788353414d3aa9a8ac8
|
[
"MIT"
] | null | null | null |
prosodic_daa/sample/pyhlm_sample_murakami.py
|
EmergentSystemLabStudent/Prosodic-DAA
|
068af5db337ed977c059e788353414d3aa9a8ac8
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython
from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates
from pyhlm.word_model import LetterHSMM, LetterHSMMPython
import pyhsmm
import warnings
from tqdm import trange
warnings.filterwarnings('ignore')
import time
#%%
def load_datas(dataset_dir):
data = []
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
files = names
for name in names:
mfcc = np.loadtxt(dataset_dir + "DATA/" + name + ".txt")
delta = np.loadtxt(dataset_dir + "DATA/" + name + "_d.txt")
delta_delta = np.loadtxt(dataset_dir + "DATA/" + name + "_dd.txt")
data.append(np.hstack((mfcc, np.hstack((delta,delta_delta)))))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur[:-1])
unpacked[d-1] = 1.0
return unpacked
def save_stateseq(model, dataset_dir):
# Save sampled states sequences.
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq)
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq)
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored))
def save_params(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
#%%
if not os.path.exists('results'):
os.mkdir('results')
if not os.path.exists('parameters'):
os.mkdir('parameters')
if not os.path.exists('summary_files'):
os.mkdir('summary_files')
#%%
dataset_dir = "murakami_dataset/"
#%%
thread_num = 64
pre_train_iter = 1
train_iter = 100
trunc = 120
obs_dim = 9
letter_upper = 50
word_upper = 50
model_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10}
obs_hypparams = {
'mu_0':np.zeros(obs_dim),
'sigma_0':np.identity(obs_dim),
'kappa_0':0.01,
'nu_0':obs_dim+2
}
dur_hypparams = {
'alpha_0':200,
'beta_0':10
}
#%%
letter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)]
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)]
length_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3)
#%%
letter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(model_hypparams, letter_hsmm, dur_distns, length_distn)
#%%
files = np.loadtxt(dataset_dir + "files.txt", dtype=str)
datas = load_datas(dataset_dir)
#%% Pre training.
for d in datas:
letter_hsmm.add_data(d, trunc=trunc)
for t in trange(pre_train_iter):
letter_hsmm.resample_model(num_procs=1)
letter_hsmm.states_list = []
#%%
print("Add datas...")
for d in datas:
model.add_data(d, trunc=trunc, generate=False)
model.resample_states(num_procs=thread_num)
# # or
# for d in datas:
# model.add_data(d, trunc=trunc, initialize_from_prior=False)
print("Done!")
#%% Save init params and pyper params
with open("parameters/hypparams.txt", "w") as f:
f.write(str(model.hypparams))
save_params(0, model)
save_loglikelihood(model)
#%%
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model, dataset_dir)
save_loglikelihood(model)
save_params(t+1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print("log_likelihood:{}".format(model.log_likelihood()))
print("resample_model:{}".format(resample_model_time))
| 31.014599
| 133
| 0.701106
|
import os
import numpy as np
from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython
from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates
from pyhlm.word_model import LetterHSMM, LetterHSMMPython
import pyhsmm
import warnings
from tqdm import trange
warnings.filterwarnings('ignore')
import time
def load_datas(dataset_dir):
data = []
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
files = names
for name in names:
mfcc = np.loadtxt(dataset_dir + "DATA/" + name + ".txt")
delta = np.loadtxt(dataset_dir + "DATA/" + name + "_d.txt")
delta_delta = np.loadtxt(dataset_dir + "DATA/" + name + "_dd.txt")
data.append(np.hstack((mfcc, np.hstack((delta,delta_delta)))))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur[:-1])
unpacked[d-1] = 1.0
return unpacked
def save_stateseq(model, dataset_dir):
names = np.loadtxt(dataset_dir + "files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq)
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq)
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored))
def save_params(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
if not os.path.exists('results'):
os.mkdir('results')
if not os.path.exists('parameters'):
os.mkdir('parameters')
if not os.path.exists('summary_files'):
os.mkdir('summary_files')
dataset_dir = "murakami_dataset/"
thread_num = 64
pre_train_iter = 1
train_iter = 100
trunc = 120
obs_dim = 9
letter_upper = 50
word_upper = 50
model_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10}
obs_hypparams = {
'mu_0':np.zeros(obs_dim),
'sigma_0':np.identity(obs_dim),
'kappa_0':0.01,
'nu_0':obs_dim+2
}
dur_hypparams = {
'alpha_0':200,
'beta_0':10
}
letter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)]
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)]
length_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3)
letter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(model_hypparams, letter_hsmm, dur_distns, length_distn)
files = np.loadtxt(dataset_dir + "files.txt", dtype=str)
datas = load_datas(dataset_dir)
for d in datas:
letter_hsmm.add_data(d, trunc=trunc)
for t in trange(pre_train_iter):
letter_hsmm.resample_model(num_procs=1)
letter_hsmm.states_list = []
print("Add datas...")
for d in datas:
model.add_data(d, trunc=trunc, generate=False)
model.resample_states(num_procs=thread_num)
print("Done!")
with open("parameters/hypparams.txt", "w") as f:
f.write(str(model.hypparams))
save_params(0, model)
save_loglikelihood(model)
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model, dataset_dir)
save_loglikelihood(model)
save_params(t+1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print("log_likelihood:{}".format(model.log_likelihood()))
print("resample_model:{}".format(resample_model_time))
| true
| true
|
f70367a6041c8fa21742be5df4b0ac208ee79ceb
| 1,324
|
py
|
Python
|
src/client/__init__.py
|
rokups/Launchpad
|
661388f65a59de3842368b6315e121ecf8ccf1bf
|
[
"MIT"
] | 7
|
2018-01-21T04:42:54.000Z
|
2021-07-09T04:53:28.000Z
|
src/client/__init__.py
|
rokups/Launchpad
|
661388f65a59de3842368b6315e121ecf8ccf1bf
|
[
"MIT"
] | 1
|
2017-08-05T13:01:02.000Z
|
2017-08-05T13:01:02.000Z
|
src/client/__init__.py
|
rokups/Launchpad
|
661388f65a59de3842368b6315e121ecf8ccf1bf
|
[
"MIT"
] | 1
|
2019-02-17T11:23:56.000Z
|
2019-02-17T11:23:56.000Z
|
#
# MIT License
#
# Copyright 2017 Launchpad project contributors (see COPYRIGHT.md)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
This is a client implementation. This module is appended to zip file with python interpreter and sent together with
bootloader executable to be executed on remote machine.
"""
| 47.285714
| 115
| 0.780967
| true
| true
|
|
f7036901558a058bb17bfb56404b309d8c4938f7
| 463
|
py
|
Python
|
dashboard/views.py
|
mahanfarzaneh2000/DjangoEcommerce
|
f844f60fd4eac6c7513196037cd908df3ba01983
|
[
"CC0-1.0"
] | 1
|
2020-11-01T11:35:12.000Z
|
2020-11-01T11:35:12.000Z
|
dashboard/views.py
|
mahanfarzaneh2000/DjangoEcommerce
|
f844f60fd4eac6c7513196037cd908df3ba01983
|
[
"CC0-1.0"
] | null | null | null |
dashboard/views.py
|
mahanfarzaneh2000/DjangoEcommerce
|
f844f60fd4eac6c7513196037cd908df3ba01983
|
[
"CC0-1.0"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import View
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from cart.models import Order
class Dashboard(View):
def get(self,*args,**kwargs):
order_qs = Order.objects.filter(user=self.request.user,orderd=True)
context = {
'orders':order_qs,
}
return render(self.request,'dashboard/index.html',context=context)
| 35.615385
| 75
| 0.725702
|
from django.shortcuts import render
from django.views.generic import View
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from cart.models import Order
class Dashboard(View):
def get(self,*args,**kwargs):
order_qs = Order.objects.filter(user=self.request.user,orderd=True)
context = {
'orders':order_qs,
}
return render(self.request,'dashboard/index.html',context=context)
| true
| true
|
f7036a98810d92fc6296922c227dede332735048
| 6,263
|
py
|
Python
|
people_admin/tests/test_views.py
|
morden35/openstates.org
|
21f0a78a43f04f835b858fdaf4f755d99920e2d7
|
[
"MIT"
] | 1
|
2022-01-17T11:54:28.000Z
|
2022-01-17T11:54:28.000Z
|
people_admin/tests/test_views.py
|
washabstract/openstates.org
|
dc541ae5cd09dd3b3db623178bf32a03d0246f01
|
[
"MIT"
] | null | null | null |
people_admin/tests/test_views.py
|
washabstract/openstates.org
|
dc541ae5cd09dd3b3db623178bf32a03d0246f01
|
[
"MIT"
] | null | null | null |
import pytest
from testutils.factories import create_test_person
from django.contrib.auth.models import User, Permission
from openstates.data.models import Person, Organization
from people_admin.models import UnmatchedName, NameStatus, DeltaSet
from people_admin.views import MATCHER_PERM, EDIT_PERM, RETIRE_PERM
import json
@pytest.fixture
def admin_user():
u = User.objects.create(username="admin")
user_permissions = list(
Permission.objects.filter(
codename__in=[
p.split(".")[1] for p in (MATCHER_PERM, EDIT_PERM, RETIRE_PERM)
]
).values_list("id", flat=True)
)
u.user_permissions.set(user_permissions)
return u
@pytest.mark.django_db
def test_apply_match_matches(client, django_assert_num_queries, kansas, admin_user):
p = Person.objects.create(name="Samuel L. Jackson")
# kansas is a test fixture, it has some fake data attached we can use
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=1, session=session, name="Sam Jackson", sponsorships_count=5, votes_count=5
)
apply_data = {
"match_data": {"unmatchedId": 1, "button": "Match", "matchedId": p.id}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(apply_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.MATCHED_PERSON
assert matched.matched_person_id == p.id
@pytest.mark.django_db
def test_apply_match_ignore(client, django_assert_num_queries, kansas, admin_user):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=2, session=session, name="Eva Green", sponsorships_count=16, votes_count=7
)
match_data = {"match_data": {"unmatchedId": 2, "button": "Ignore", "matchedId": ""}}
client.force_login(admin_user)
with django_assert_num_queries(6):
# client can be used to mock GET/POST/etc.
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.IGNORED
@pytest.mark.django_db
def test_apply_match_source_error(
client, django_assert_num_queries, kansas, admin_user
):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=3,
session=session,
name="David Tennant",
sponsorships_count=10,
votes_count=2,
)
match_data = {
"match_data": {"unmatchedId": 3, "button": "Source Error", "matchedId": ""}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
# get refreshed object from database
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.SOURCE_ERROR
@pytest.mark.django_db
def test_apply_match_404(client, django_assert_num_queries, admin_user):
client.force_login(admin_user)
with django_assert_num_queries(5):
match_data = {
"match_data": {"unmatchedId": 9999, "button": "Match", "matchedId": "1"}
}
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_people_list(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
senate = Organization.objects.get(name="Kansas Senate")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
sam.identifiers.create(scheme="twitter", identifier="@SamuelLJackson")
sam.contact_details.create(
value="555-555-5555", type="voice", note="Capitol Office"
)
create_test_person("Bosephorous Fogg", org=house, party="Republican", district="2")
create_test_person("Cran Crumble", org=senate, party="Republican", district="A")
client.force_login(admin_user)
with django_assert_num_queries(7):
resp = client.get("/admin/people/ks/")
assert resp.status_code == 200
people = resp.context["context"]["current_people"]
assert len(people) == 3
sam_data = [p for p in people if p["name"] == "Sam Jackson"][0]
assert sam_data["district"] == "1"
assert sam_data["twitter"] == "@SamuelLJackson"
assert sam_data["capitol_voice"] == "555-555-5555"
@pytest.mark.django_db
def test_retire_person(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
retire_data = {
"id": sam.id,
"name": sam.name,
"reason": "ran for new office",
"retirementDate": "2021-01-01",
"isDead": False,
"vacantSeat": True,
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/retire/",
json.dumps(retire_data),
content_type="application/json",
)
assert resp.status_code == 200
ds = DeltaSet.objects.get()
assert "retire Sam Jackson" == ds.name
assert ds.person_retirements.all().count() == 1
retirement = ds.person_retirements.get()
assert retirement.person_id == sam.id
assert retirement.reason == "ran for new office"
assert retirement.date == "2021-01-01"
assert retirement.is_vacant
assert retirement.is_dead is False
| 35.384181
| 88
| 0.669647
|
import pytest
from testutils.factories import create_test_person
from django.contrib.auth.models import User, Permission
from openstates.data.models import Person, Organization
from people_admin.models import UnmatchedName, NameStatus, DeltaSet
from people_admin.views import MATCHER_PERM, EDIT_PERM, RETIRE_PERM
import json
@pytest.fixture
def admin_user():
u = User.objects.create(username="admin")
user_permissions = list(
Permission.objects.filter(
codename__in=[
p.split(".")[1] for p in (MATCHER_PERM, EDIT_PERM, RETIRE_PERM)
]
).values_list("id", flat=True)
)
u.user_permissions.set(user_permissions)
return u
@pytest.mark.django_db
def test_apply_match_matches(client, django_assert_num_queries, kansas, admin_user):
p = Person.objects.create(name="Samuel L. Jackson")
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=1, session=session, name="Sam Jackson", sponsorships_count=5, votes_count=5
)
apply_data = {
"match_data": {"unmatchedId": 1, "button": "Match", "matchedId": p.id}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(apply_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.MATCHED_PERSON
assert matched.matched_person_id == p.id
@pytest.mark.django_db
def test_apply_match_ignore(client, django_assert_num_queries, kansas, admin_user):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=2, session=session, name="Eva Green", sponsorships_count=16, votes_count=7
)
match_data = {"match_data": {"unmatchedId": 2, "button": "Ignore", "matchedId": ""}}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.IGNORED
@pytest.mark.django_db
def test_apply_match_source_error(
client, django_assert_num_queries, kansas, admin_user
):
session = kansas.legislative_sessions.get(identifier="2020")
UnmatchedName.objects.create(
id=3,
session=session,
name="David Tennant",
sponsorships_count=10,
votes_count=2,
)
match_data = {
"match_data": {"unmatchedId": 3, "button": "Source Error", "matchedId": ""}
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 200
assert resp.json() == {"status": "success"}
matched = UnmatchedName.objects.get()
assert matched.status == NameStatus.SOURCE_ERROR
@pytest.mark.django_db
def test_apply_match_404(client, django_assert_num_queries, admin_user):
client.force_login(admin_user)
with django_assert_num_queries(5):
match_data = {
"match_data": {"unmatchedId": 9999, "button": "Match", "matchedId": "1"}
}
resp = client.post(
"/admin/people/matcher/update/",
json.dumps(match_data),
content_type="application/json",
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_people_list(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
senate = Organization.objects.get(name="Kansas Senate")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
sam.identifiers.create(scheme="twitter", identifier="@SamuelLJackson")
sam.contact_details.create(
value="555-555-5555", type="voice", note="Capitol Office"
)
create_test_person("Bosephorous Fogg", org=house, party="Republican", district="2")
create_test_person("Cran Crumble", org=senate, party="Republican", district="A")
client.force_login(admin_user)
with django_assert_num_queries(7):
resp = client.get("/admin/people/ks/")
assert resp.status_code == 200
people = resp.context["context"]["current_people"]
assert len(people) == 3
sam_data = [p for p in people if p["name"] == "Sam Jackson"][0]
assert sam_data["district"] == "1"
assert sam_data["twitter"] == "@SamuelLJackson"
assert sam_data["capitol_voice"] == "555-555-5555"
@pytest.mark.django_db
def test_retire_person(client, django_assert_num_queries, admin_user, kansas):
house = Organization.objects.get(name="Kansas House")
sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1")
retire_data = {
"id": sam.id,
"name": sam.name,
"reason": "ran for new office",
"retirementDate": "2021-01-01",
"isDead": False,
"vacantSeat": True,
}
client.force_login(admin_user)
with django_assert_num_queries(6):
resp = client.post(
"/admin/people/retire/",
json.dumps(retire_data),
content_type="application/json",
)
assert resp.status_code == 200
ds = DeltaSet.objects.get()
assert "retire Sam Jackson" == ds.name
assert ds.person_retirements.all().count() == 1
retirement = ds.person_retirements.get()
assert retirement.person_id == sam.id
assert retirement.reason == "ran for new office"
assert retirement.date == "2021-01-01"
assert retirement.is_vacant
assert retirement.is_dead is False
| true
| true
|
f7036aa4f3fc1fce05496b25d2d43eb341c65925
| 9,659
|
py
|
Python
|
imageops/imageops/server.py
|
EdgeGallery/toolchain
|
c7b6e99a754e45372155f54ead8a3860f72f78a4
|
[
"Apache-2.0"
] | 18
|
2021-03-12T09:18:41.000Z
|
2021-03-12T10:15:34.000Z
|
imageops/imageops/server.py
|
EdgeGallery/toolchain
|
c7b6e99a754e45372155f54ead8a3860f72f78a4
|
[
"Apache-2.0"
] | null | null | null |
imageops/imageops/server.py
|
EdgeGallery/toolchain
|
c7b6e99a754e45372155f54ead8a3860f72f78a4
|
[
"Apache-2.0"
] | null | null | null |
"""
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
from pathlib import Path
from imageops.utils import Utils
from imageops.logger import Logger
class Server(object):
"""
Backend server for imageops API
The request_id is the only input param which used to identify this request
"""
logger = Logger(__name__).get_logger()
def __init__(self, request_id=None):
"""
Init Server class
"""
if not request_id:
msg = 'Lacking request_id.'
self.logger.error(msg)
raise ValueError(msg)
self.request_id = str(request_id)
if not os.getenv('TMP_PATH'):
msg = 'No TMP_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.tmp_path = os.getenv('TMP_PATH')
if not os.getenv('IMAGE_PATH'):
msg = 'No IMAGE_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.image_path = os.getenv('IMAGE_PATH')
self.check_record_file = 'check_info.json'
self.compress_record_file = 'compress_status.log'
self.check_rc = {0: 'Check Completed, the image is (now) consistent',
1: 'Check completed, image is corrupted',
2: 'Check completed, image has leaked clusters, but is not corrupted',
3: 'Check failed',
4: 'Check in Progress',
5: 'Check Exiting because of not support this type of image',
6: 'Check Time Out'}
self.compress_rc = {0: 'Compress Completed',
1: 'Compress In Progress',
2: 'Compress Failed',
3: 'Compress Exiting because of No enouth space left',
4: 'Compress Time Out'}
def check_vm_image(self, input_image=None):
"""
Check the input vm image to get it's checksum and basic info such as type and size
"""
self.logger.info('Start to check VM image %s ...', input_image)
if not input_image:
msg = 'No image is given to do the check.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Given image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
check_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(check_record_path)
check_record_file = os.path.join(check_record_path, self.check_record_file)
check_info = {'checkResult': 4}
check_info['imageInfo'] = Utils.check_cmd_exec(input_image, check_record_file)
check_info['checksum'] = Utils.get_md5_checksum(input_image, check_record_file)
Utils.write_json_file(check_record_file, check_info)
status = 0
msg = 'Check In Progress'
except Exception as exception:
status = 1
msg = 'Check Failed'
check_info = {'checkResult': 99}
Utils.write_json_file(check_record_file, check_info)
self.logger.error(exception)
self.logger.info('Check image %s, status: %s, msg: %s', input_image, status, msg)
return status, msg
def get_check_status(self):
"""
Get the status of one check with the request ID
"""
self.logger.info('Start to get check status...')
check_info = {}
try:
check_record_file = os.path.join(self.tmp_path,
self.request_id,
self.check_record_file)
check_info = Utils.read_json_file(check_record_file)
self.logger.debug(check_info)
if not check_info.get('imageInfo'):
return 4, self.check_rc[4], check_info
image_info = check_info.get('imageInfo')
if image_info.get('filename'):
file_name = image_info.get('filename').split('/')[-1]
check_info['imageInfo']['filename'] = file_name
if check_info.get('checkResult') == 4 or not check_info.get('checksum'):
return 4, self.check_rc[4], check_info
if check_info.get('checkResult') == 99:
return 3, self.check_rc[3], check_info
if check_info.get('checkResult') == 100:
return 6, self.check_rc[6], check_info
if check_info.get('checkResult') == 63:
return 5, self.check_rc[5], check_info
if check_info.get('checkResult') == 0:
return 0, self.check_rc[0], check_info
if check_info.get('checkResult') == 2:
return 1, self.check_rc[1], check_info
if check_info.get('checkResult') == 3:
return 2, self.check_rc[2], check_info
return 3, self.check_rc[3], check_info
except IOError as io_exception:
self.logger.exception(io_exception)
return 3, '{}, {}'.format(self.check_rc[3], 'nonexistent request ID'), check_info
except Exception:
return 3, self.check_rc[3], check_info
def compress_vm_image(self, input_image=None, output_image=None):
"""
Compress the input vm image to get a slim one which is sparsify
Also can transfer raw image to qcow2 one
"""
self.logger.info('Start to compress vm image %s ...', input_image)
if not input_image:
msg = 'No image is given.'
self.logger.error(msg)
raise ValueError(msg)
if not output_image:
msg = 'No output image path is given.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
compress_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(compress_record_path)
compress_record_file = os.path.join(compress_record_path, self.compress_record_file)
self.logger.info('Start to compress image %s ...', input_image)
if not Utils.check_compress_requires(input_image, self.tmp_path):
self.logger.error('Free disk space under %s is not enough to compress image %s',
self.tmp_path, input_image)
status = 1
msg = '{}'.format(self.compress_rc.get(3))
Utils.append_write_plain_file(compress_record_file, msg)
else:
self.logger.info('Free disk space under %s is enough to compress image %s',
self.tmp_path, input_image)
Utils.compress_cmd_exec(input_image, output_image, compress_record_file)
status = 0
msg = '{}'.format('Compress In Progress')
except Exception as exception:
self.logger.error(exception)
status = 1
msg = '{}'.format(self.compress_rc.get(2))
Utils.append_write_plain_file(compress_record_file, msg)
self.logger.info('Compress image %s with status: %s and msg: %s', input_image, status, msg)
return status, msg
def get_compress_status(self):
"""
Get the status of one compress with the request ID
"""
self.logger.info('Start to get status of compress image ...')
try:
compress_record_file = os.path.join(self.tmp_path,
self.request_id,
self.compress_record_file)
with open(compress_record_file, 'r') as compress_file:
for line in compress_file:
if self.compress_rc[0] in line:
self.logger.info(self.compress_rc[0])
return 0, self.compress_rc[0], 1
for item in [2, 3, 4]:
if self.compress_rc[item] in line:
self.logger.error(self.compress_rc[item])
return item, self.compress_rc[item], 0
except IOError as io_exception:
self.logger.exception(io_exception)
return 2, '{}, {}'.format(self.compress_rc[2], 'nonexistent request ID'), 0
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
try:
compress_rate = Utils.get_compress_rate(compress_record_file)
self.logger.info(self.compress_rc[1])
return 1, self.compress_rc[1], compress_rate
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
| 41.813853
| 99
| 0.575422
|
import os
from pathlib import Path
from imageops.utils import Utils
from imageops.logger import Logger
class Server(object):
logger = Logger(__name__).get_logger()
def __init__(self, request_id=None):
if not request_id:
msg = 'Lacking request_id.'
self.logger.error(msg)
raise ValueError(msg)
self.request_id = str(request_id)
if not os.getenv('TMP_PATH'):
msg = 'No TMP_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.tmp_path = os.getenv('TMP_PATH')
if not os.getenv('IMAGE_PATH'):
msg = 'No IMAGE_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.image_path = os.getenv('IMAGE_PATH')
self.check_record_file = 'check_info.json'
self.compress_record_file = 'compress_status.log'
self.check_rc = {0: 'Check Completed, the image is (now) consistent',
1: 'Check completed, image is corrupted',
2: 'Check completed, image has leaked clusters, but is not corrupted',
3: 'Check failed',
4: 'Check in Progress',
5: 'Check Exiting because of not support this type of image',
6: 'Check Time Out'}
self.compress_rc = {0: 'Compress Completed',
1: 'Compress In Progress',
2: 'Compress Failed',
3: 'Compress Exiting because of No enouth space left',
4: 'Compress Time Out'}
def check_vm_image(self, input_image=None):
self.logger.info('Start to check VM image %s ...', input_image)
if not input_image:
msg = 'No image is given to do the check.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Given image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
check_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(check_record_path)
check_record_file = os.path.join(check_record_path, self.check_record_file)
check_info = {'checkResult': 4}
check_info['imageInfo'] = Utils.check_cmd_exec(input_image, check_record_file)
check_info['checksum'] = Utils.get_md5_checksum(input_image, check_record_file)
Utils.write_json_file(check_record_file, check_info)
status = 0
msg = 'Check In Progress'
except Exception as exception:
status = 1
msg = 'Check Failed'
check_info = {'checkResult': 99}
Utils.write_json_file(check_record_file, check_info)
self.logger.error(exception)
self.logger.info('Check image %s, status: %s, msg: %s', input_image, status, msg)
return status, msg
def get_check_status(self):
self.logger.info('Start to get check status...')
check_info = {}
try:
check_record_file = os.path.join(self.tmp_path,
self.request_id,
self.check_record_file)
check_info = Utils.read_json_file(check_record_file)
self.logger.debug(check_info)
if not check_info.get('imageInfo'):
return 4, self.check_rc[4], check_info
image_info = check_info.get('imageInfo')
if image_info.get('filename'):
file_name = image_info.get('filename').split('/')[-1]
check_info['imageInfo']['filename'] = file_name
if check_info.get('checkResult') == 4 or not check_info.get('checksum'):
return 4, self.check_rc[4], check_info
if check_info.get('checkResult') == 99:
return 3, self.check_rc[3], check_info
if check_info.get('checkResult') == 100:
return 6, self.check_rc[6], check_info
if check_info.get('checkResult') == 63:
return 5, self.check_rc[5], check_info
if check_info.get('checkResult') == 0:
return 0, self.check_rc[0], check_info
if check_info.get('checkResult') == 2:
return 1, self.check_rc[1], check_info
if check_info.get('checkResult') == 3:
return 2, self.check_rc[2], check_info
return 3, self.check_rc[3], check_info
except IOError as io_exception:
self.logger.exception(io_exception)
return 3, '{}, {}'.format(self.check_rc[3], 'nonexistent request ID'), check_info
except Exception:
return 3, self.check_rc[3], check_info
def compress_vm_image(self, input_image=None, output_image=None):
self.logger.info('Start to compress vm image %s ...', input_image)
if not input_image:
msg = 'No image is given.'
self.logger.error(msg)
raise ValueError(msg)
if not output_image:
msg = 'No output image path is given.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
compress_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(compress_record_path)
compress_record_file = os.path.join(compress_record_path, self.compress_record_file)
self.logger.info('Start to compress image %s ...', input_image)
if not Utils.check_compress_requires(input_image, self.tmp_path):
self.logger.error('Free disk space under %s is not enough to compress image %s',
self.tmp_path, input_image)
status = 1
msg = '{}'.format(self.compress_rc.get(3))
Utils.append_write_plain_file(compress_record_file, msg)
else:
self.logger.info('Free disk space under %s is enough to compress image %s',
self.tmp_path, input_image)
Utils.compress_cmd_exec(input_image, output_image, compress_record_file)
status = 0
msg = '{}'.format('Compress In Progress')
except Exception as exception:
self.logger.error(exception)
status = 1
msg = '{}'.format(self.compress_rc.get(2))
Utils.append_write_plain_file(compress_record_file, msg)
self.logger.info('Compress image %s with status: %s and msg: %s', input_image, status, msg)
return status, msg
def get_compress_status(self):
self.logger.info('Start to get status of compress image ...')
try:
compress_record_file = os.path.join(self.tmp_path,
self.request_id,
self.compress_record_file)
with open(compress_record_file, 'r') as compress_file:
for line in compress_file:
if self.compress_rc[0] in line:
self.logger.info(self.compress_rc[0])
return 0, self.compress_rc[0], 1
for item in [2, 3, 4]:
if self.compress_rc[item] in line:
self.logger.error(self.compress_rc[item])
return item, self.compress_rc[item], 0
except IOError as io_exception:
self.logger.exception(io_exception)
return 2, '{}, {}'.format(self.compress_rc[2], 'nonexistent request ID'), 0
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
try:
compress_rate = Utils.get_compress_rate(compress_record_file)
self.logger.info(self.compress_rc[1])
return 1, self.compress_rc[1], compress_rate
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
| true
| true
|
f7036ad171f84f32479e698abddcec505adae5dc
| 2,307
|
py
|
Python
|
examples/modal_beamforming_open_circular_array.py
|
trojanjay/sfa-numpy
|
bff5737ef429f31228d20a9e1d0ce7d46d3080d3
|
[
"MIT"
] | 39
|
2017-09-22T10:30:22.000Z
|
2022-03-29T15:56:22.000Z
|
examples/modal_beamforming_open_circular_array.py
|
trojanjay/sfa-numpy
|
bff5737ef429f31228d20a9e1d0ce7d46d3080d3
|
[
"MIT"
] | 16
|
2017-11-14T13:02:44.000Z
|
2021-04-01T09:53:47.000Z
|
examples/modal_beamforming_open_circular_array.py
|
trojanjay/sfa-numpy
|
bff5737ef429f31228d20a9e1d0ce7d46d3080d3
|
[
"MIT"
] | 11
|
2017-12-08T23:54:45.000Z
|
2021-01-06T21:06:47.000Z
|
"""
Compute the plane wave decomposition for an incident broadband plane wave
on an open circular array using a modal beamformer of finite order.
"""
import numpy as np
import matplotlib.pyplot as plt
import micarray
from micarray.util import db
Nsf = 50 # order of the incident sound field
N = 30 # order of modal beamformer/microphone array
pw_angle = 1.23 * np.pi # incidence angle of plane wave
pol_pwd = np.linspace(0, 2*np.pi, 180, endpoint=False) # angles for plane wave decomposition
k = np.linspace(0, 20, 100) # wavenumber vector
r = 1 # radius of array
# get uniform grid (microphone positions) of order N
pol, weights = micarray.modal.angular.grid_equal_polar_angle(N)
# pressure on the surface of an open cylinder for an incident plane wave
Bn = micarray.modal.radial.circular_pw(Nsf, k, r, setup='open')
D = micarray.modal.radial.circ_diagonal_mode_mat(Bn)
Psi_p = micarray.modal.angular.cht_matrix(Nsf, pol)
Psi_pw = micarray.modal.angular.cht_matrix(Nsf, pw_angle)
p = np.matmul(np.matmul(Psi_p, D), np.conj(Psi_pw.T))
p = np.squeeze(p)
# incident plane wave exhibiting infinite spatial bandwidth
# p = np.exp(1j * k[:, np.newaxis]*r * np.cos(pol - pw_angle))
# plane wave decomposition using modal beamforming
Bn = micarray.modal.radial.circular_pw(N, k, r, setup='open')
Dn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip')
D = micarray.modal.radial.circ_diagonal_mode_mat(Dn)
Psi_p = micarray.modal.angular.cht_matrix(N, pol, weights)
Psi_q = micarray.modal.angular.cht_matrix(N, pol_pwd)
A_pwd = np.matmul(np.matmul(Psi_q, D), np.conj(Psi_p.T))
q_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(p, 2)))
q_pwd_t = np.fft.fftshift(np.fft.irfft(q_pwd, axis=0), axes=0)
# visualize plane wave decomposition (aka beampattern)
plt.figure()
plt.pcolormesh(k, pol_pwd/np.pi, db(q_pwd.T), vmin=-40)
plt.colorbar()
plt.xlabel(r'$kr$')
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (frequency domain)')
plt.savefig('modal_beamforming_open_circular_array_fd.png')
plt.figure()
plt.pcolormesh(range(2*len(k)-2), pol_pwd/np.pi, db(q_pwd_t.T), vmin=-40)
plt.colorbar()
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (time domain)')
plt.savefig('modal_beamforming_open_circular_array_td.png')
| 40.473684
| 93
| 0.748591
|
import numpy as np
import matplotlib.pyplot as plt
import micarray
from micarray.util import db
Nsf = 50 N = 30 pw_angle = 1.23 * np.pi pol_pwd = np.linspace(0, 2*np.pi, 180, endpoint=False) k = np.linspace(0, 20, 100) r = 1
pol, weights = micarray.modal.angular.grid_equal_polar_angle(N)
Bn = micarray.modal.radial.circular_pw(Nsf, k, r, setup='open')
D = micarray.modal.radial.circ_diagonal_mode_mat(Bn)
Psi_p = micarray.modal.angular.cht_matrix(Nsf, pol)
Psi_pw = micarray.modal.angular.cht_matrix(Nsf, pw_angle)
p = np.matmul(np.matmul(Psi_p, D), np.conj(Psi_pw.T))
p = np.squeeze(p)
Bn = micarray.modal.radial.circular_pw(N, k, r, setup='open')
Dn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip')
D = micarray.modal.radial.circ_diagonal_mode_mat(Dn)
Psi_p = micarray.modal.angular.cht_matrix(N, pol, weights)
Psi_q = micarray.modal.angular.cht_matrix(N, pol_pwd)
A_pwd = np.matmul(np.matmul(Psi_q, D), np.conj(Psi_p.T))
q_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(p, 2)))
q_pwd_t = np.fft.fftshift(np.fft.irfft(q_pwd, axis=0), axes=0)
plt.figure()
plt.pcolormesh(k, pol_pwd/np.pi, db(q_pwd.T), vmin=-40)
plt.colorbar()
plt.xlabel(r'$kr$')
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (frequency domain)')
plt.savefig('modal_beamforming_open_circular_array_fd.png')
plt.figure()
plt.pcolormesh(range(2*len(k)-2), pol_pwd/np.pi, db(q_pwd_t.T), vmin=-40)
plt.colorbar()
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (time domain)')
plt.savefig('modal_beamforming_open_circular_array_td.png')
| true
| true
|
f7036b111cbc0c4e8d9ebaa6a764738d5e9e5c12
| 3,864
|
py
|
Python
|
eulexistdb/manager.py
|
emory-libraries/eulexistdb
|
7b74f4ec9e7486e493060d4012098613c2b9c85e
|
[
"Apache-2.0"
] | 9
|
2015-05-09T07:18:21.000Z
|
2021-02-26T16:27:08.000Z
|
eulexistdb/manager.py
|
emory-libraries/eulexistdb
|
7b74f4ec9e7486e493060d4012098613c2b9c85e
|
[
"Apache-2.0"
] | 14
|
2016-05-20T12:19:09.000Z
|
2019-09-05T20:59:36.000Z
|
eulexistdb/manager.py
|
emory-libraries/eulexistdb
|
7b74f4ec9e7486e493060d4012098613c2b9c85e
|
[
"Apache-2.0"
] | 3
|
2016-05-23T17:20:08.000Z
|
2017-12-20T13:23:06.000Z
|
# file eulexistdb/manager.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from eulexistdb.db import ExistDB
from eulexistdb.query import QuerySet
class Manager(object):
"""
Connect an :class:`~eulexistdb.models.XmlModel` to an
:class:`~eulexistdb.db.ExistDB` for easy querying.
Typically each :class:`~eulexistdb.models.XmlModel` will
have one or more ``Manager`` members. Like Django ``Manager`` objects
these offer a convenient way to access model-based queries. Like Django
``Manager`` objects, developers can `derive a child class`_ and override
:meth:`get_query_set` to modify the default ``QuerySet``. Unlike Django,
this implementation does not currently provide a default ``Manager`` for
every ``XmlModel``.
Developers should consult :class:`eulexistdb.query.QuerySet` for a
complete list of its methods. ``Manager`` directly exposes these
methods, forwarding them to the ``QuerySet`` returned by its own
:meth:`get_query_set`.
.. _derive a child class: http://docs.djangoproject.com/en/1.1/topics/db/managers/#modifying-initial-manager-querysets
"""
def __init__(self, xpath):
self.xpath = xpath
# NOTE: model needs to be patched in to a real XmlModel class after
# the fact. currently this is handled by XmlModelType metaclass
# logic.
self.model = None
def get_query_set(self):
"""
Get the default :class:`eulexistdb.db.QuerySet` returned
by this ``Manager``. Typically this returns a ``QuerySet`` based on
the ``Manager``'s `xpath`, evaluated in the
``settings.EXISTDB_ROOT_COLLECTION`` on a default
:class:`eulexistdb.db.ExistDB`.
This is a convenient point for developers to customize an object's
managers. Deriving a child class from Manager and overriding or
extending this method is a handy way to create custom queries
accessible from an :class:`~eulexistdb.models.XmlModel`.
"""
if hasattr(settings, 'EXISTDB_FULLTEXT_OPTIONS'):
fulltext_opts = settings.EXISTDB_FULLTEXT_OPTIONS
else:
fulltext_opts = {}
return QuerySet(model=self.model, xpath=self.xpath, using=ExistDB(),
collection=settings.EXISTDB_ROOT_COLLECTION,
fulltext_options=fulltext_opts)
#######################
# PROXIES TO QUERYSET #
#######################
def count(self):
return self.get_query_set().count()
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def or_filter(self, *args, **kwargs):
return self.get_query_set().or_filter(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def also(self, *args, **kwargs):
return self.get_query_set().also(*args, **kwargs)
def distinct(self):
return self.get_query_set().distinct()
def all(self):
return self.get_query_set().all()
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
| 36.8
| 122
| 0.667443
|
from django.conf import settings
from eulexistdb.db import ExistDB
from eulexistdb.query import QuerySet
class Manager(object):
def __init__(self, xpath):
self.xpath = xpath
self.model = None
def get_query_set(self):
if hasattr(settings, 'EXISTDB_FULLTEXT_OPTIONS'):
fulltext_opts = settings.EXISTDB_FULLTEXT_OPTIONS
else:
fulltext_opts = {}
return QuerySet(model=self.model, xpath=self.xpath, using=ExistDB(),
collection=settings.EXISTDB_ROOT_COLLECTION,
fulltext_options=fulltext_opts)
def count(self):
return self.get_query_set().count()
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def or_filter(self, *args, **kwargs):
return self.get_query_set().or_filter(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def also(self, *args, **kwargs):
return self.get_query_set().also(*args, **kwargs)
def distinct(self):
return self.get_query_set().distinct()
def all(self):
return self.get_query_set().all()
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
| true
| true
|
f7036c5fbe4bafb0e1af4388a84aaf25e75f6c0c
| 623
|
py
|
Python
|
14-flashcardsContador/1-versaoTerminal/0-versoesAntigas/flashcardsContador2/Soma.py
|
jonasht/Python
|
2affe509ce9619f745ee645ff3a120485bf403bc
|
[
"MIT"
] | null | null | null |
14-flashcardsContador/1-versaoTerminal/0-versoesAntigas/flashcardsContador2/Soma.py
|
jonasht/Python
|
2affe509ce9619f745ee645ff3a120485bf403bc
|
[
"MIT"
] | null | null | null |
14-flashcardsContador/1-versaoTerminal/0-versoesAntigas/flashcardsContador2/Soma.py
|
jonasht/Python
|
2affe509ce9619f745ee645ff3a120485bf403bc
|
[
"MIT"
] | null | null | null |
class Soma:
def __init__(self):
self.numeroDeCartas = list()
def set_numeroDeCartas(self, numero):
if numero == '':
numero = '1'
numero = numero[:]
self.numeroDeCartas.extend(numero)
def get_numeroDeCartas(self):
return self.numeroDeCartas
def ConverterPInt(self, converter):
convertidos = []
for c in converter:
convertidos.append(int(c))
return convertidos
def Somar(self):
return sum(self.ConverterPInt(self.get_numeroDeCartas()))
| 23.074074
| 65
| 0.539326
|
class Soma:
def __init__(self):
self.numeroDeCartas = list()
def set_numeroDeCartas(self, numero):
if numero == '':
numero = '1'
numero = numero[:]
self.numeroDeCartas.extend(numero)
def get_numeroDeCartas(self):
return self.numeroDeCartas
def ConverterPInt(self, converter):
convertidos = []
for c in converter:
convertidos.append(int(c))
return convertidos
def Somar(self):
return sum(self.ConverterPInt(self.get_numeroDeCartas()))
| true
| true
|
f7036c87ca207cfcc12ce67719024edf8b098396
| 4,026
|
py
|
Python
|
ralamb.py
|
uclyyu/over9000
|
42db9fa6ac5a9a2e177f1f9a9a660bee9cd5d587
|
[
"Apache-2.0"
] | 411
|
2019-08-25T12:24:34.000Z
|
2022-03-25T03:31:38.000Z
|
optimizers/ralamb.py
|
AryanRaj315/EAD2020
|
9e2e0aa4be9da941372a21ea627c38a3eb7be617
|
[
"MIT"
] | 23
|
2019-08-26T13:14:45.000Z
|
2021-07-04T19:00:15.000Z
|
optimizers/ralamb.py
|
AryanRaj315/EAD2020
|
9e2e0aa4be9da941372a21ea627c38a3eb7be617
|
[
"MIT"
] | 60
|
2019-08-25T12:31:46.000Z
|
2022-02-10T09:39:20.000Z
|
import torch, math
from torch.optim.optimizer import Optimizer
# RAdam + LARS
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)
else:
radam_step.add_(-radam_step_size * group['lr'], exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| 40.26
| 181
| 0.504223
|
import torch, math
from torch.optim.optimizer import Optimizer
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma >= 5:
radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)
else:
radam_step.add_(-radam_step_size * group['lr'], exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| true
| true
|
f7036cde771e158378a1bdef64092434538782a5
| 18,321
|
py
|
Python
|
mlir/test/Bindings/Python/ir_operation.py
|
MochalovaAn/llvm
|
528aa5ca4aa9df447dc3497ef19da3b124e88d7d
|
[
"Apache-2.0"
] | null | null | null |
mlir/test/Bindings/Python/ir_operation.py
|
MochalovaAn/llvm
|
528aa5ca4aa9df447dc3497ef19da3b124e88d7d
|
[
"Apache-2.0"
] | null | null | null |
mlir/test/Bindings/Python/ir_operation.py
|
MochalovaAn/llvm
|
528aa5ca4aa9df447dc3497ef19da3b124e88d7d
|
[
"Apache-2.0"
] | null | null | null |
# RUN: %PYTHON %s | FileCheck %s
import gc
import io
import itertools
from mlir.ir import *
def run(f):
print("\nTEST:", f.__name__)
f()
gc.collect()
assert Context._get_live_count() == 0
# Verify iterator based traversal of the op/region/block hierarchy.
# CHECK-LABEL: TEST: testTraverseOpRegionBlockIterators
def testTraverseOpRegionBlockIterators():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
op = module.operation
assert op.context is ctx
# Get the block using iterators off of the named collections.
regions = list(op.regions)
blocks = list(regions[0].blocks)
# CHECK: MODULE REGIONS=1 BLOCKS=1
print(f"MODULE REGIONS={len(regions)} BLOCKS={len(blocks)}")
# Should verify.
# CHECK: .verify = True
print(f".verify = {module.operation.verify()}")
# Get the regions and blocks from the default collections.
default_regions = list(op)
default_blocks = list(default_regions[0])
# They should compare equal regardless of how obtained.
assert default_regions == regions
assert default_blocks == blocks
# Should be able to get the operations from either the named collection
# or the block.
operations = list(blocks[0].operations)
default_operations = list(blocks[0])
assert default_operations == operations
def walk_operations(indent, op):
for i, region in enumerate(op):
print(f"{indent}REGION {i}:")
for j, block in enumerate(region):
print(f"{indent} BLOCK {j}:")
for k, child_op in enumerate(block):
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: func
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: %0 = "custom.addi"
# CHECK: OP 1: return
walk_operations("", op)
run(testTraverseOpRegionBlockIterators)
# Verify index based traversal of the op/region/block hierarchy.
# CHECK-LABEL: TEST: testTraverseOpRegionBlockIndices
def testTraverseOpRegionBlockIndices():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
def walk_operations(indent, op):
for i in range(len(op.regions)):
region = op.regions[i]
print(f"{indent}REGION {i}:")
for j in range(len(region.blocks)):
block = region.blocks[j]
print(f"{indent} BLOCK {j}:")
for k in range(len(block.operations)):
child_op = block.operations[k]
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: func
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: %0 = "custom.addi"
# CHECK: OP 1: return
walk_operations("", module.operation)
run(testTraverseOpRegionBlockIndices)
# CHECK-LABEL: TEST: testBlockArgumentList
def testBlockArgumentList():
with Context() as ctx:
module = Module.parse(r"""
func @f1(%arg0: i32, %arg1: f64, %arg2: index) {
return
}
""", ctx)
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
assert len(entry_block.arguments) == 3
# CHECK: Argument 0, type i32
# CHECK: Argument 1, type f64
# CHECK: Argument 2, type index
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
new_type = IntegerType.get_signless(8 * (arg.arg_number + 1))
arg.set_type(new_type)
# CHECK: Argument 0, type i8
# CHECK: Argument 1, type i16
# CHECK: Argument 2, type i24
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
run(testBlockArgumentList)
# CHECK-LABEL: TEST: testOperationOperands
def testOperationOperands():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) {
%0 = "test.producer"() : () -> i64
"test.consumer"(%arg0, %0) : (i32, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[1]
assert len(consumer.operands) == 2
# CHECK: Operand 0, type i32
# CHECK: Operand 1, type i64
for i, operand in enumerate(consumer.operands):
print(f"Operand {i}, type {operand.type}")
run(testOperationOperands)
# CHECK-LABEL: TEST: testOperationOperandsSlice
def testOperationOperandsSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
%0 = "test.producer0"() : () -> i64
%1 = "test.producer1"() : () -> i64
%2 = "test.producer2"() : () -> i64
%3 = "test.producer3"() : () -> i64
%4 = "test.producer4"() : () -> i64
"test.consumer"(%0, %1, %2, %3, %4) : (i64, i64, i64, i64, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[5]
assert len(consumer.operands) == 5
for left, right in zip(consumer.operands, consumer.operands[::-1][::-1]):
assert left == right
# CHECK: test.producer0
# CHECK: test.producer1
# CHECK: test.producer2
# CHECK: test.producer3
# CHECK: test.producer4
full_slice = consumer.operands[:]
for operand in full_slice:
print(operand)
# CHECK: test.producer0
# CHECK: test.producer1
first_two = consumer.operands[0:2]
for operand in first_two:
print(operand)
# CHECK: test.producer3
# CHECK: test.producer4
last_two = consumer.operands[3:]
for operand in last_two:
print(operand)
# CHECK: test.producer0
# CHECK: test.producer2
# CHECK: test.producer4
even = consumer.operands[::2]
for operand in even:
print(operand)
# CHECK: test.producer2
fourth = consumer.operands[::2][1::2]
for operand in fourth:
print(operand)
run(testOperationOperandsSlice)
# CHECK-LABEL: TEST: testDetachedOperation
def testDetachedOperation():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create(
"custom.op1", results=[i32, i32], regions=1, attributes={
"foo": StringAttr.get("foo_value"),
"bar": StringAttr.get("bar_value"),
})
# CHECK: %0:2 = "custom.op1"() ( {
# CHECK: }) {bar = "bar_value", foo = "foo_value"} : () -> (si32, si32)
print(op1)
# TODO: Check successors once enough infra exists to do it properly.
run(testDetachedOperation)
# CHECK-LABEL: TEST: testOperationInsertionPoint
def testOperationInsertionPoint():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
# Create test op.
with Location.unknown(ctx):
op1 = Operation.create("custom.op1")
op2 = Operation.create("custom.op2")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
ip.insert(op2)
# CHECK: func @f1
# CHECK: "custom.op1"()
# CHECK: "custom.op2"()
# CHECK: %0 = "custom.addi"
print(module)
# Trying to add a previously added op should raise.
try:
ip.insert(op1)
except ValueError:
pass
else:
assert False, "expected insert of attached op to raise"
run(testOperationInsertionPoint)
# CHECK-LABEL: TEST: testOperationWithRegion
def testOperationWithRegion():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create("custom.op1", regions=1)
block = op1.regions[0].blocks.append(i32, i32)
# CHECK: "custom.op1"() ( {
# CHECK: ^bb0(%arg0: si32, %arg1: si32): // no predecessors
# CHECK: "custom.terminator"() : () -> ()
# CHECK: }) : () -> ()
terminator = Operation.create("custom.terminator")
ip = InsertionPoint(block)
ip.insert(terminator)
print(op1)
# Now add the whole operation to another op.
# TODO: Verify lifetime hazard by nulling out the new owning module and
# accessing op1.
# TODO: Also verify accessing the terminator once both parents are nulled
# out.
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
# CHECK: func @f1
# CHECK: "custom.op1"()
# CHECK: "custom.terminator"
# CHECK: %0 = "custom.addi"
print(module)
run(testOperationWithRegion)
# CHECK-LABEL: TEST: testOperationResultList
def testOperationResultList():
ctx = Context()
module = Module.parse(r"""
func @f1() {
%0:3 = call @f2() : () -> (i32, f64, index)
return
}
func private @f2() -> (i32, f64, index)
""", ctx)
caller = module.body.operations[0]
call = caller.regions[0].blocks[0].operations[0]
assert len(call.results) == 3
# CHECK: Result 0, type i32
# CHECK: Result 1, type f64
# CHECK: Result 2, type index
for res in call.results:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultList)
# CHECK-LABEL: TEST: testOperationResultListSlice
def testOperationResultListSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
"some.op"() : () -> (i1, i2, i3, i4, i5)
return
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
producer = entry_block.operations[0]
assert len(producer.results) == 5
for left, right in zip(producer.results, producer.results[::-1][::-1]):
assert left == right
assert left.result_number == right.result_number
# CHECK: Result 0, type i1
# CHECK: Result 1, type i2
# CHECK: Result 2, type i3
# CHECK: Result 3, type i4
# CHECK: Result 4, type i5
full_slice = producer.results[:]
for res in full_slice:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 1, type i2
# CHECK: Result 2, type i3
# CHECK: Result 3, type i4
middle = producer.results[1:4]
for res in middle:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 1, type i2
# CHECK: Result 3, type i4
odd = producer.results[1::2]
for res in odd:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 3, type i4
# CHECK: Result 1, type i2
inverted_middle = producer.results[-2:0:-2]
for res in inverted_middle:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultListSlice)
# CHECK-LABEL: TEST: testOperationAttributes
def testOperationAttributes():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
"some.op"() { some.attribute = 1 : i8,
other.attribute = 3.0,
dependent = "text" } : () -> ()
""", ctx)
op = module.body.operations[0]
assert len(op.attributes) == 3
iattr = IntegerAttr(op.attributes["some.attribute"])
fattr = FloatAttr(op.attributes["other.attribute"])
sattr = StringAttr(op.attributes["dependent"])
# CHECK: Attribute type i8, value 1
print(f"Attribute type {iattr.type}, value {iattr.value}")
# CHECK: Attribute type f64, value 3.0
print(f"Attribute type {fattr.type}, value {fattr.value}")
# CHECK: Attribute value text
print(f"Attribute value {sattr.value}")
# We don't know in which order the attributes are stored.
# CHECK-DAG: NamedAttribute(dependent="text")
# CHECK-DAG: NamedAttribute(other.attribute=3.000000e+00 : f64)
# CHECK-DAG: NamedAttribute(some.attribute=1 : i8)
for attr in op.attributes:
print(str(attr))
# Check that exceptions are raised as expected.
try:
op.attributes["does_not_exist"]
except KeyError:
pass
else:
assert False, "expected KeyError on accessing a non-existent attribute"
try:
op.attributes[42]
except IndexError:
pass
else:
assert False, "expected IndexError on accessing an out-of-bounds attribute"
run(testOperationAttributes)
# CHECK-LABEL: TEST: testOperationPrint
def testOperationPrint():
ctx = Context()
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%0 = constant dense<[1, 2, 3, 4]> : tensor<4xi32>
return %arg0 : i32
}
""", ctx)
# Test print to stdout.
# CHECK: return %arg0 : i32
module.operation.print()
# Test print to text file.
f = io.StringIO()
# CHECK: <class 'str'>
# CHECK: return %arg0 : i32
module.operation.print(file=f)
str_value = f.getvalue()
print(str_value.__class__)
print(f.getvalue())
# Test print to binary file.
f = io.BytesIO()
# CHECK: <class 'bytes'>
# CHECK: return %arg0 : i32
module.operation.print(file=f, binary=True)
bytes_value = f.getvalue()
print(bytes_value.__class__)
print(bytes_value)
# Test get_asm with options.
# CHECK: value = opaque<"_", "0xDEADBEEF"> : tensor<4xi32>
# CHECK: "std.return"(%arg0) : (i32) -> () -:4:7
module.operation.print(large_elements_limit=2, enable_debug_info=True,
pretty_debug_info=True, print_generic_op_form=True, use_local_scope=True)
run(testOperationPrint)
# CHECK-LABEL: TEST: testKnownOpView
def testKnownOpView():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
%1 = "custom.f32"() : () -> f32
%2 = "custom.f32"() : () -> f32
%3 = addf %1, %2 : f32
""")
print(module)
# addf should map to a known OpView class in the std dialect.
# We know the OpView for it defines an 'lhs' attribute.
addf = module.body.operations[2]
# CHECK: <mlir.dialects._std_ops_gen._AddFOp object
print(repr(addf))
# CHECK: "custom.f32"()
print(addf.lhs)
# One of the custom ops should resolve to the default OpView.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
# Check again to make sure negative caching works.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
run(testKnownOpView)
# CHECK-LABEL: TEST: testSingleResultProperty
def testSingleResultProperty():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
"custom.no_result"() : () -> ()
%0:2 = "custom.two_result"() : () -> (f32, f32)
%1 = "custom.one_result"() : () -> f32
""")
print(module)
try:
module.body.operations[0].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.no_result which has 0 results
print(e)
else:
assert False, "Expected exception"
try:
module.body.operations[1].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.two_result which has 2 results
print(e)
else:
assert False, "Expected exception"
# CHECK: %1 = "custom.one_result"() : () -> f32
print(module.body.operations[2])
run(testSingleResultProperty)
# CHECK-LABEL: TEST: testPrintInvalidOperation
def testPrintInvalidOperation():
ctx = Context()
with Location.unknown(ctx):
module = Operation.create("module", regions=2)
# This module has two region and is invalid verify that we fallback
# to the generic printer for safety.
block = module.regions[0].blocks.append()
# CHECK: // Verification failed, printing generic form
# CHECK: "module"() ( {
# CHECK: }) : () -> ()
print(module)
# CHECK: .verify = False
print(f".verify = {module.operation.verify()}")
run(testPrintInvalidOperation)
# CHECK-LABEL: TEST: testCreateWithInvalidAttributes
def testCreateWithInvalidAttributes():
ctx = Context()
with Location.unknown(ctx):
try:
Operation.create("module", attributes={None:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={42:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":ctx})
except Exception as e:
# CHECK: Invalid attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":None})
except Exception as e:
# CHECK: Found an invalid (`None`?) attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
run(testCreateWithInvalidAttributes)
# CHECK-LABEL: TEST: testOperationName
def testOperationName():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
%0 = "custom.op1"() : () -> f32
%1 = "custom.op2"() : () -> i32
%2 = "custom.op1"() : () -> f32
""", ctx)
# CHECK: custom.op1
# CHECK: custom.op2
# CHECK: custom.op1
for op in module.body.operations:
print(op.operation.name)
run(testOperationName)
# CHECK-LABEL: TEST: testCapsuleConversions
def testCapsuleConversions():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
m = Operation.create("custom.op1").operation
m_capsule = m._CAPIPtr
assert '"mlir.ir.Operation._CAPIPtr"' in repr(m_capsule)
m2 = Operation._CAPICreate(m_capsule)
assert m2 is m
run(testCapsuleConversions)
| 29.693679
| 129
| 0.645871
|
import gc
import io
import itertools
from mlir.ir import *
def run(f):
print("\nTEST:", f.__name__)
f()
gc.collect()
assert Context._get_live_count() == 0
def testTraverseOpRegionBlockIterators():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
op = module.operation
assert op.context is ctx
regions = list(op.regions)
blocks = list(regions[0].blocks)
print(f"MODULE REGIONS={len(regions)} BLOCKS={len(blocks)}")
print(f".verify = {module.operation.verify()}")
default_regions = list(op)
default_blocks = list(default_regions[0])
assert default_regions == regions
assert default_blocks == blocks
operations = list(blocks[0].operations)
default_operations = list(blocks[0])
assert default_operations == operations
def walk_operations(indent, op):
for i, region in enumerate(op):
print(f"{indent}REGION {i}:")
for j, block in enumerate(region):
print(f"{indent} BLOCK {j}:")
for k, child_op in enumerate(block):
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
walk_operations("", op)
run(testTraverseOpRegionBlockIterators)
def testTraverseOpRegionBlockIndices():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
def walk_operations(indent, op):
for i in range(len(op.regions)):
region = op.regions[i]
print(f"{indent}REGION {i}:")
for j in range(len(region.blocks)):
block = region.blocks[j]
print(f"{indent} BLOCK {j}:")
for k in range(len(block.operations)):
child_op = block.operations[k]
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
walk_operations("", module.operation)
run(testTraverseOpRegionBlockIndices)
def testBlockArgumentList():
with Context() as ctx:
module = Module.parse(r"""
func @f1(%arg0: i32, %arg1: f64, %arg2: index) {
return
}
""", ctx)
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
assert len(entry_block.arguments) == 3
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
new_type = IntegerType.get_signless(8 * (arg.arg_number + 1))
arg.set_type(new_type)
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
run(testBlockArgumentList)
def testOperationOperands():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) {
%0 = "test.producer"() : () -> i64
"test.consumer"(%arg0, %0) : (i32, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[1]
assert len(consumer.operands) == 2
for i, operand in enumerate(consumer.operands):
print(f"Operand {i}, type {operand.type}")
run(testOperationOperands)
def testOperationOperandsSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
%0 = "test.producer0"() : () -> i64
%1 = "test.producer1"() : () -> i64
%2 = "test.producer2"() : () -> i64
%3 = "test.producer3"() : () -> i64
%4 = "test.producer4"() : () -> i64
"test.consumer"(%0, %1, %2, %3, %4) : (i64, i64, i64, i64, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[5]
assert len(consumer.operands) == 5
for left, right in zip(consumer.operands, consumer.operands[::-1][::-1]):
assert left == right
full_slice = consumer.operands[:]
for operand in full_slice:
print(operand)
first_two = consumer.operands[0:2]
for operand in first_two:
print(operand)
last_two = consumer.operands[3:]
for operand in last_two:
print(operand)
even = consumer.operands[::2]
for operand in even:
print(operand)
fourth = consumer.operands[::2][1::2]
for operand in fourth:
print(operand)
run(testOperationOperandsSlice)
def testDetachedOperation():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create(
"custom.op1", results=[i32, i32], regions=1, attributes={
"foo": StringAttr.get("foo_value"),
"bar": StringAttr.get("bar_value"),
})
print(op1)
run(testDetachedOperation)
def testOperationInsertionPoint():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
with Location.unknown(ctx):
op1 = Operation.create("custom.op1")
op2 = Operation.create("custom.op2")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
ip.insert(op2)
print(module)
try:
ip.insert(op1)
except ValueError:
pass
else:
assert False, "expected insert of attached op to raise"
run(testOperationInsertionPoint)
def testOperationWithRegion():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create("custom.op1", regions=1)
block = op1.regions[0].blocks.append(i32, i32)
terminator = Operation.create("custom.terminator")
ip = InsertionPoint(block)
ip.insert(terminator)
print(op1)
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
print(module)
run(testOperationWithRegion)
def testOperationResultList():
ctx = Context()
module = Module.parse(r"""
func @f1() {
%0:3 = call @f2() : () -> (i32, f64, index)
return
}
func private @f2() -> (i32, f64, index)
""", ctx)
caller = module.body.operations[0]
call = caller.regions[0].blocks[0].operations[0]
assert len(call.results) == 3
for res in call.results:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultList)
def testOperationResultListSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
"some.op"() : () -> (i1, i2, i3, i4, i5)
return
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
producer = entry_block.operations[0]
assert len(producer.results) == 5
for left, right in zip(producer.results, producer.results[::-1][::-1]):
assert left == right
assert left.result_number == right.result_number
full_slice = producer.results[:]
for res in full_slice:
print(f"Result {res.result_number}, type {res.type}")
middle = producer.results[1:4]
for res in middle:
print(f"Result {res.result_number}, type {res.type}")
odd = producer.results[1::2]
for res in odd:
print(f"Result {res.result_number}, type {res.type}")
inverted_middle = producer.results[-2:0:-2]
for res in inverted_middle:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultListSlice)
def testOperationAttributes():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
"some.op"() { some.attribute = 1 : i8,
other.attribute = 3.0,
dependent = "text" } : () -> ()
""", ctx)
op = module.body.operations[0]
assert len(op.attributes) == 3
iattr = IntegerAttr(op.attributes["some.attribute"])
fattr = FloatAttr(op.attributes["other.attribute"])
sattr = StringAttr(op.attributes["dependent"])
print(f"Attribute type {iattr.type}, value {iattr.value}")
print(f"Attribute type {fattr.type}, value {fattr.value}")
print(f"Attribute value {sattr.value}")
# CHECK-DAG: NamedAttribute(dependent="text")
# CHECK-DAG: NamedAttribute(other.attribute=3.000000e+00 : f64)
# CHECK-DAG: NamedAttribute(some.attribute=1 : i8)
for attr in op.attributes:
print(str(attr))
# Check that exceptions are raised as expected.
try:
op.attributes["does_not_exist"]
except KeyError:
pass
else:
assert False, "expected KeyError on accessing a non-existent attribute"
try:
op.attributes[42]
except IndexError:
pass
else:
assert False, "expected IndexError on accessing an out-of-bounds attribute"
run(testOperationAttributes)
# CHECK-LABEL: TEST: testOperationPrint
def testOperationPrint():
ctx = Context()
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%0 = constant dense<[1, 2, 3, 4]> : tensor<4xi32>
return %arg0 : i32
}
""", ctx)
# Test print to stdout.
# CHECK: return %arg0 : i32
module.operation.print()
# Test print to text file.
f = io.StringIO()
# CHECK: <class 'str'>
# CHECK: return %arg0 : i32
module.operation.print(file=f)
str_value = f.getvalue()
print(str_value.__class__)
print(f.getvalue())
# Test print to binary file.
f = io.BytesIO()
# CHECK: <class 'bytes'>
# CHECK: return %arg0 : i32
module.operation.print(file=f, binary=True)
bytes_value = f.getvalue()
print(bytes_value.__class__)
print(bytes_value)
# Test get_asm with options.
# CHECK: value = opaque<"_", "0xDEADBEEF"> : tensor<4xi32>
# CHECK: "std.return"(%arg0) : (i32) -> () -:4:7
module.operation.print(large_elements_limit=2, enable_debug_info=True,
pretty_debug_info=True, print_generic_op_form=True, use_local_scope=True)
run(testOperationPrint)
# CHECK-LABEL: TEST: testKnownOpView
def testKnownOpView():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
%1 = "custom.f32"() : () -> f32
%2 = "custom.f32"() : () -> f32
%3 = addf %1, %2 : f32
""")
print(module)
# addf should map to a known OpView class in the std dialect.
# We know the OpView for it defines an 'lhs' attribute.
addf = module.body.operations[2]
# CHECK: <mlir.dialects._std_ops_gen._AddFOp object
print(repr(addf))
# CHECK: "custom.f32"()
print(addf.lhs)
# One of the custom ops should resolve to the default OpView.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
# Check again to make sure negative caching works.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
run(testKnownOpView)
# CHECK-LABEL: TEST: testSingleResultProperty
def testSingleResultProperty():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
"custom.no_result"() : () -> ()
%0:2 = "custom.two_result"() : () -> (f32, f32)
%1 = "custom.one_result"() : () -> f32
""")
print(module)
try:
module.body.operations[0].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.no_result which has 0 results
print(e)
else:
assert False, "Expected exception"
try:
module.body.operations[1].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.two_result which has 2 results
print(e)
else:
assert False, "Expected exception"
# CHECK: %1 = "custom.one_result"() : () -> f32
print(module.body.operations[2])
run(testSingleResultProperty)
# CHECK-LABEL: TEST: testPrintInvalidOperation
def testPrintInvalidOperation():
ctx = Context()
with Location.unknown(ctx):
module = Operation.create("module", regions=2)
# This module has two region and is invalid verify that we fallback
# to the generic printer for safety.
block = module.regions[0].blocks.append()
# CHECK: // Verification failed, printing generic form
# CHECK: "module"() ( {
# CHECK: }) : () -> ()
print(module)
# CHECK: .verify = False
print(f".verify = {module.operation.verify()}")
run(testPrintInvalidOperation)
# CHECK-LABEL: TEST: testCreateWithInvalidAttributes
def testCreateWithInvalidAttributes():
ctx = Context()
with Location.unknown(ctx):
try:
Operation.create("module", attributes={None:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={42:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":ctx})
except Exception as e:
# CHECK: Invalid attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":None})
except Exception as e:
# CHECK: Found an invalid (`None`?) attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
run(testCreateWithInvalidAttributes)
# CHECK-LABEL: TEST: testOperationName
def testOperationName():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
%0 = "custom.op1"() : () -> f32
%1 = "custom.op2"() : () -> i32
%2 = "custom.op1"() : () -> f32
""", ctx)
# CHECK: custom.op1
# CHECK: custom.op2
# CHECK: custom.op1
for op in module.body.operations:
print(op.operation.name)
run(testOperationName)
# CHECK-LABEL: TEST: testCapsuleConversions
def testCapsuleConversions():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
m = Operation.create("custom.op1").operation
m_capsule = m._CAPIPtr
assert '"mlir.ir.Operation._CAPIPtr"' in repr(m_capsule)
m2 = Operation._CAPICreate(m_capsule)
assert m2 is m
run(testCapsuleConversions)
| true
| true
|
f7036da9f3477fb29d1f73794bda2fccb598b879
| 8,016
|
py
|
Python
|
docs/conf.py
|
tdi/pyPEPA
|
db15073487331b66e8ab60a1d05cb158070e0d54
|
[
"Apache-2.0"
] | 4
|
2015-05-12T09:30:45.000Z
|
2021-11-30T23:09:51.000Z
|
docs/conf.py
|
tdi/pyPEPA
|
db15073487331b66e8ab60a1d05cb158070e0d54
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
tdi/pyPEPA
|
db15073487331b66e8ab60a1d05cb158070e0d54
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pypepa documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 18 15:33:13 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../pypepa'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pypepa'
copyright = '2013, Dariusz Dwornikowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pypepadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pypepa.tex', 'pypepa Documentation',
'Dariusz Dwornikowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pypepa', 'pypepa Documentation',
['Dariusz Dwornikowski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pypepa', 'pypepa Documentation',
'Dariusz Dwornikowski', 'pypepa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.064
| 80
| 0.715694
|
import sys, os
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'pypepa'
copyright = '2013, Dariusz Dwornikowski'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pypepadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pypepa.tex', 'pypepa Documentation',
'Dariusz Dwornikowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pypepa', 'pypepa Documentation',
['Dariusz Dwornikowski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pypepa', 'pypepa Documentation',
'Dariusz Dwornikowski', 'pypepa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true
| true
|
f7036df95921f8dea17f6d6274c42b3d1551879a
| 153
|
py
|
Python
|
StatsClass/urls.py
|
nelliesnoodles/my-first-blog
|
e552ea38891ebe005316487ae32a324659ad6367
|
[
"MIT"
] | null | null | null |
StatsClass/urls.py
|
nelliesnoodles/my-first-blog
|
e552ea38891ebe005316487ae32a324659ad6367
|
[
"MIT"
] | 5
|
2019-12-13T17:37:55.000Z
|
2021-06-10T20:59:32.000Z
|
StatsClass/urls.py
|
nelliesnoodles/My-Website
|
e552ea38891ebe005316487ae32a324659ad6367
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('StatsClass', views.index),
path('BasicProbability', views.basic_prob),
]
| 19.125
| 47
| 0.712418
|
from django.urls import path
from . import views
urlpatterns = [
path('StatsClass', views.index),
path('BasicProbability', views.basic_prob),
]
| true
| true
|
f7036e9ffbe44cdcb5926a139df525aedf532378
| 262
|
py
|
Python
|
common/signature/__init__.py
|
lukius/mts
|
96d3d8b28742a474aca67bfcb079577c878bbb4c
|
[
"MIT"
] | 2
|
2015-04-04T01:44:11.000Z
|
2017-11-04T11:59:27.000Z
|
common/signature/__init__.py
|
lukius/mts
|
96d3d8b28742a474aca67bfcb079577c878bbb4c
|
[
"MIT"
] | null | null | null |
common/signature/__init__.py
|
lukius/mts
|
96d3d8b28742a474aca67bfcb079577c878bbb4c
|
[
"MIT"
] | null | null | null |
class DigitalSignatureScheme(object):
def get_public_key(self):
return self.public_key
def sign(self, messsage):
raise NotImplementedError
def verify(self, message, signature):
raise NotImplementedError
| 23.818182
| 41
| 0.648855
|
class DigitalSignatureScheme(object):
def get_public_key(self):
return self.public_key
def sign(self, messsage):
raise NotImplementedError
def verify(self, message, signature):
raise NotImplementedError
| true
| true
|
f7036eb900de1e0cf354ec91df0aa273644cb847
| 10,409
|
py
|
Python
|
t2c/segmentation.py
|
micbia/tools21cm
|
72081e94e4d83511380baacce427d79d13da2fa5
|
[
"MIT"
] | 1
|
2020-10-08T12:34:05.000Z
|
2020-10-08T12:34:05.000Z
|
t2c/segmentation.py
|
micbia/tools21cm
|
72081e94e4d83511380baacce427d79d13da2fa5
|
[
"MIT"
] | null | null | null |
t2c/segmentation.py
|
micbia/tools21cm
|
72081e94e4d83511380baacce427d79d13da2fa5
|
[
"MIT"
] | null | null | null |
"""
Created by Michele Bianco, 9 July 2021
"""
import numpy as np, pkg_resources
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None):
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,labels, logits)
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %(logits.get_shape(), labels.get_shape()))
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels)
return tf.reduce_mean(balanced_cross_entropy)
def balanced_cross_entropy(y_true, y_pred):
"""
To decrease the number of false negatives, set beta>1. To decrease the number of false positives, set beta<1.
"""
beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon())
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
y_pred = K.log(y_pred / (1 - y_pred))
return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta)
def iou(y_true, y_pred):
"""
Return the Intersection over Union (IoU) for a given label.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
label: the label to return the IoU for
Returns:
the IoU for the given label
"""
intersection = K.sum(K.abs(y_true * y_pred))
#intersection = K.sum(y_true * y_pred)
union = K.sum(y_true) + K.sum(y_pred) - intersection
# avoid divide by zero - if the union is zero, return 1, otherwise, return the intersection over union
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def dice_coef(y_true, y_pred, smooth=1):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
################################################################
class segunet21cm:
def __init__(self, tta=1, verbose=False):
""" SegU-Net: segmentation of 21cm images with U-shape network (Bianco et al. 2021, https://arxiv.org/abs/2102.06713)
- tta (int): default 0 (super-fast, no pixel-error map) implement the error map
with time-test aumentated techique in the prediction process
- verbose (bool): default False, activate verbosity
Description:
tta = 0 : fast (~7 sec), it tends to be a few percent less accurate (<2%) then the other two cases, no pixel-error map (no TTA manipulation)
tta = 1 : medium (~17 sec), accurate and preferable than tta=0, with pixel-error map (3 samples)
tta = 2 : slow (~10 min), accurate, with pixel-error map (~100 samples)
Returns:
- X_seg (ndarray) : recovered binary field (1 = neutral and 0 = ionized regions)
- X_err (ndarray) : pixel-error map of the recovered binary field
Example:
$ from tools21cm import segmentation
$ seg = segmentation.segunet21cm(tta=1, verbose=True) # load model (need to be done once)
$ Xseg, Xseg_err = seg.prediction(x=dT3)
Print of the Network's Configuration file:
[TRAINING]
BATCH_SIZE = 64
AUGMENT = NOISESMT
IMG_SHAPE = 128, 128
CHAN_SIZE = 256
DROPOUT = 0.05
KERNEL_SIZE = 3
EPOCHS = 100
LOSS = balanced_cross_entropy
METRICS = iou, dice_coef, binary_accuracy, binary_crossentropy
LR = 1e-3
RECOMP = False
GPUS = 2
PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/inputs/data2D_128_030920/
[RESUME]
RESUME_PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/outputs/new/02-10T23-52-36_128slice/
BEST_EPOCH = 56
RESUME_EPOCH = 66
"""
self.TTA = tta
self.VERBOSE = verbose
if(self.TTA == 2):
# slow
self.MANIP = self.IndependentOperations(verbose=self.VERBOSE)
elif(self.TTA == 1):
# fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
elif(self.TTA == 0):
# super-fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
self.NR_MANIP = len(self.MANIP)
# load model
MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5')
if (os.path.exists(MODEL_NAME)):
pass
else:
if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME)
MODEL_EPOCH = 56
METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef}
self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS)
if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME)
def UniqueRows(self, arr):
""" Remove duplicate row array in 2D data
- arr (narray): array with duplicate row
Example:
>> d = np.array([[0,1,2],[0,1,2],[0,0,0],[0,0,2],[0,1,2]])
>> UniqueRows(d)
array([[0, 0, 0],
[0, 0, 2],
[0, 1, 2]])
"""
arr = np.array(arr)
if(arr.ndim == 2):
arr = np.ascontiguousarray(arr)
unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1]))
new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1]))
elif(arr.ndim == 1):
new_arr = np.array(list(dict.fromkeys(arr)))
return new_arr
def IndependentOperations(self, verbose=False):
''' How many unique manipulations (horzontal and vertical flip, rotation, etc...)
can we operate on a cube?
Each indipendent operation is considered as an additional rappresentation
of the same coeval data, so that it can be considered for errorbar with SegU-Net '''
data = np.array(range(3**3)).reshape((3,3,3))
func = [lambda a: a,
np.fliplr,
np.flipud,
lambda a: np.flipud(np.fliplr(a)),
lambda a: np.fliplr(np.flipud(a))]
axis = [0,1,2]
angl_rot = [0,1,2,3]
tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size))
tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))}
i = 0
for f in func:
cube = f(data)
for rotax in axis:
ax_tup = [0,1,2]
ax_tup.remove(rotax)
for rot in angl_rot:
tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten()
# function, axis of rotation, angle of rotation, slice index
tot_operations['opt%d' %i] = [f, rotax, rot]
i += 1
uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int)
uniq_operations = {}
for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat):
for itmdf, tot_mdf in enumerate(tot_manipl_data_flat):
if(all(uniq_mdf == tot_mdf)):
uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf]
break
assert uniq_manipl_data_flat.shape[0] == len(uniq_operations)
if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations)))
return uniq_operations
def prediction(self, x):
img_shape = x.shape
if(self.TTA == 2):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 1):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 0):
X_tta = np.zeros((np.append(len(self.MANIP), img_shape)))
if(self.VERBOSE):
loop = tqdm(range(len(self.MANIP)))
else:
loop = range(len(self.MANIP))
for iopt in loop:
opt, rotax, rot = self.MANIP['opt%d' %iopt]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
cube = np.rot90(opt(x), k=rot, axes=ax_tup)
X = cube[np.newaxis, ..., np.newaxis]
for j in range(img_shape[0]):
if(self.TTA == 0):
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
else:
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze()
for itta in range(X_tta.shape[0]):
opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup))
X_seg = np.round(np.mean(X_tta, axis=0))
X_err = np.std(X_tta, axis=0)
return X_seg, X_err
| 40.819608
| 159
| 0.58142
|
import numpy as np, pkg_resources
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None):
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,labels, logits)
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %(logits.get_shape(), labels.get_shape()))
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels)
return tf.reduce_mean(balanced_cross_entropy)
def balanced_cross_entropy(y_true, y_pred):
beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon())
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
y_pred = K.log(y_pred / (1 - y_pred))
return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta)
def iou(y_true, y_pred):
intersection = K.sum(K.abs(y_true * y_pred))
union = K.sum(y_true) + K.sum(y_pred) - intersection
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def dice_coef(y_true, y_pred, smooth=1):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
class segunet21cm:
def __init__(self, tta=1, verbose=False):
self.TTA = tta
self.VERBOSE = verbose
if(self.TTA == 2):
self.MANIP = self.IndependentOperations(verbose=self.VERBOSE)
elif(self.TTA == 1):
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
elif(self.TTA == 0):
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
self.NR_MANIP = len(self.MANIP)
MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5')
if (os.path.exists(MODEL_NAME)):
pass
else:
if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME)
MODEL_EPOCH = 56
METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef}
self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS)
if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME)
def UniqueRows(self, arr):
arr = np.array(arr)
if(arr.ndim == 2):
arr = np.ascontiguousarray(arr)
unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1]))
new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1]))
elif(arr.ndim == 1):
new_arr = np.array(list(dict.fromkeys(arr)))
return new_arr
def IndependentOperations(self, verbose=False):
data = np.array(range(3**3)).reshape((3,3,3))
func = [lambda a: a,
np.fliplr,
np.flipud,
lambda a: np.flipud(np.fliplr(a)),
lambda a: np.fliplr(np.flipud(a))]
axis = [0,1,2]
angl_rot = [0,1,2,3]
tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size))
tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))}
i = 0
for f in func:
cube = f(data)
for rotax in axis:
ax_tup = [0,1,2]
ax_tup.remove(rotax)
for rot in angl_rot:
tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten()
tot_operations['opt%d' %i] = [f, rotax, rot]
i += 1
uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int)
uniq_operations = {}
for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat):
for itmdf, tot_mdf in enumerate(tot_manipl_data_flat):
if(all(uniq_mdf == tot_mdf)):
uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf]
break
assert uniq_manipl_data_flat.shape[0] == len(uniq_operations)
if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations)))
return uniq_operations
def prediction(self, x):
img_shape = x.shape
if(self.TTA == 2):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 1):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 0):
X_tta = np.zeros((np.append(len(self.MANIP), img_shape)))
if(self.VERBOSE):
loop = tqdm(range(len(self.MANIP)))
else:
loop = range(len(self.MANIP))
for iopt in loop:
opt, rotax, rot = self.MANIP['opt%d' %iopt]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
cube = np.rot90(opt(x), k=rot, axes=ax_tup)
X = cube[np.newaxis, ..., np.newaxis]
for j in range(img_shape[0]):
if(self.TTA == 0):
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
else:
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze()
for itta in range(X_tta.shape[0]):
opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup))
X_seg = np.round(np.mean(X_tta, axis=0))
X_err = np.std(X_tta, axis=0)
return X_seg, X_err
| true
| true
|
f7036f30e8f6a74391031fa260de951dacb90cb4
| 4,427
|
py
|
Python
|
sunshine_conversations_client/model/user_truncated.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 4
|
2020-09-27T14:28:25.000Z
|
2022-02-02T13:51:29.000Z
|
sunshine_conversations_client/model/user_truncated.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:18:58.000Z
|
2021-12-04T07:55:23.000Z
|
sunshine_conversations_client/model/user_truncated.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 5
|
2020-11-07T02:08:18.000Z
|
2021-12-07T17:10:23.000Z
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class UserTruncated(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'external_id': 'str'
}
attribute_map = {
'id': 'id',
'external_id': 'externalId'
}
nulls = set()
def __init__(self, id=None, external_id=Undefined(), local_vars_configuration=None): # noqa: E501
"""UserTruncated - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._external_id = None
self.discriminator = None
if id is not None:
self.id = id
self.external_id = external_id
@property
def id(self):
"""Gets the id of this UserTruncated. # noqa: E501
The unique ID of the user. # noqa: E501
:return: The id of this UserTruncated. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserTruncated.
The unique ID of the user. # noqa: E501
:param id: The id of this UserTruncated. # noqa: E501
:type: str
"""
self._id = id
@property
def external_id(self):
"""Gets the external_id of this UserTruncated. # noqa: E501
An optional ID that can also be used to retrieve the user. # noqa: E501
:return: The external_id of this UserTruncated. # noqa: E501
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""Sets the external_id of this UserTruncated.
An optional ID that can also be used to retrieve the user. # noqa: E501
:param external_id: The external_id of this UserTruncated. # noqa: E501
:type: str
"""
if type(external_id) is Undefined:
external_id = None
self.nulls.discard("external_id")
elif external_id is None:
self.nulls.add("external_id")
else:
self.nulls.discard("external_id")
self._external_id = external_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserTruncated):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserTruncated):
return True
return self.to_dict() != other.to_dict()
| 27.66875
| 102
| 0.574656
|
import pprint
import re
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class UserTruncated(object):
openapi_types = {
'id': 'str',
'external_id': 'str'
}
attribute_map = {
'id': 'id',
'external_id': 'externalId'
}
nulls = set()
def __init__(self, id=None, external_id=Undefined(), local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._external_id = None
self.discriminator = None
if id is not None:
self.id = id
self.external_id = external_id
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, external_id):
if type(external_id) is Undefined:
external_id = None
self.nulls.discard("external_id")
elif external_id is None:
self.nulls.add("external_id")
else:
self.nulls.discard("external_id")
self._external_id = external_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UserTruncated):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, UserTruncated):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f7036f5c4bd73b8f7b2a333ac6993a89266119a0
| 1,581
|
py
|
Python
|
preprocessing/main.py
|
jrobertojunior/face-parsing.PyTorch
|
d34f39c9ae9726ac8eaf39ecff824a14ec4e15b9
|
[
"MIT"
] | null | null | null |
preprocessing/main.py
|
jrobertojunior/face-parsing.PyTorch
|
d34f39c9ae9726ac8eaf39ecff824a14ec4e15b9
|
[
"MIT"
] | null | null | null |
preprocessing/main.py
|
jrobertojunior/face-parsing.PyTorch
|
d34f39c9ae9726ac8eaf39ecff824a14ec4e15b9
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
import os
def preprocess(labels_path, sep_labels_path):
# list all files on labels_path
labels_filenames = os.listdir(labels_path)
count = 0
for label_filename in labels_filenames:
label_path = os.path.join(labels_path, label_filename)
print(f'segmenting {label_filename}')
masks = segment_labels(label_path)
for att in masks:
mask = masks[att]
path = f"{sep_labels_path}/{label_filename[:-4]}_{att}.png"
print(f'{count} - writing {path}')
cv.imwrite(path, mask)
count += 1
# cv.imwrite(f'{label_filename[:-4]}_{mask}', mask)
def segment_labels(label_path):
atts = {
"background": (0, 0, 0),
"mouth": (255, 0, 0),
"eyes": (0, 255, 0),
"nose": (0, 0, 255),
"face": (128, 128, 128),
"hair": (255, 255, 0),
"eyebrows": (255, 0, 255),
"ears": (0, 255, 255),
"teeth": (255, 255, 255),
"beard": (255, 192, 192),
"sunglasses": (0, 128, 128),
}
label = cv.imread(label_path)
mask = np.zeros(label.shape, dtype=np.uint8)
masks = {}
for att in atts:
color = atts[att]
mask = cv.inRange(label, color, color)
masks[att] = mask
# cv.imshow(att, mask)
# cv.waitKey(0)
# cv.imwrite(f"{sep_labels_path}/{label_path[:-4]}_{att}.png", mask)
return masks
# separate_masks("./labels.png")
preprocess("./organized_dataset/labels", "./organized_dataset/segmented_labels")
| 25.918033
| 80
| 0.563567
|
import cv2 as cv
import numpy as np
import os
def preprocess(labels_path, sep_labels_path):
labels_filenames = os.listdir(labels_path)
count = 0
for label_filename in labels_filenames:
label_path = os.path.join(labels_path, label_filename)
print(f'segmenting {label_filename}')
masks = segment_labels(label_path)
for att in masks:
mask = masks[att]
path = f"{sep_labels_path}/{label_filename[:-4]}_{att}.png"
print(f'{count} - writing {path}')
cv.imwrite(path, mask)
count += 1
def segment_labels(label_path):
atts = {
"background": (0, 0, 0),
"mouth": (255, 0, 0),
"eyes": (0, 255, 0),
"nose": (0, 0, 255),
"face": (128, 128, 128),
"hair": (255, 255, 0),
"eyebrows": (255, 0, 255),
"ears": (0, 255, 255),
"teeth": (255, 255, 255),
"beard": (255, 192, 192),
"sunglasses": (0, 128, 128),
}
label = cv.imread(label_path)
mask = np.zeros(label.shape, dtype=np.uint8)
masks = {}
for att in atts:
color = atts[att]
mask = cv.inRange(label, color, color)
masks[att] = mask
return masks
preprocess("./organized_dataset/labels", "./organized_dataset/segmented_labels")
| true
| true
|
f7036f7636cc20ea4dc6d9b8460cfa888a315805
| 39,822
|
py
|
Python
|
netqasm/sdk/epr_socket.py
|
QuTech-Delft/netqasm
|
954162542c441d5663ecb9a8801eab94efd4ef2e
|
[
"MIT"
] | 6
|
2021-11-10T15:03:59.000Z
|
2022-02-16T19:35:01.000Z
|
netqasm/sdk/epr_socket.py
|
QuTech-Delft/netqasm
|
954162542c441d5663ecb9a8801eab94efd4ef2e
|
[
"MIT"
] | 13
|
2021-11-26T09:19:46.000Z
|
2022-03-29T09:21:42.000Z
|
netqasm/sdk/epr_socket.py
|
QuTech-Delft/netqasm
|
954162542c441d5663ecb9a8801eab94efd4ef2e
|
[
"MIT"
] | 4
|
2021-11-19T15:46:17.000Z
|
2022-01-23T18:59:15.000Z
|
"""EPR Socket interface."""
from __future__ import annotations
import abc
import logging
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, Union
from netqasm.logging.glob import get_netqasm_logger
from netqasm.qlink_compat import (
EPRRole,
EPRType,
LinkLayerOKTypeK,
LinkLayerOKTypeM,
LinkLayerOKTypeR,
RandomBasis,
TimeUnit,
)
from netqasm.sdk.build_epr import EprMeasBasis, basis_to_rotation
from netqasm.sdk.builder import EntRequestParams, EprKeepResult, EprMeasureResult
from netqasm.sdk.futures import RegFuture
from .qubit import FutureQubit, Qubit
if TYPE_CHECKING:
from netqasm.sdk import connection
T_LinkLayerOkList = Union[
List[LinkLayerOKTypeK], List[LinkLayerOKTypeM], List[LinkLayerOKTypeR]
]
class EPRSocket(abc.ABC):
"""EPR socket class. Used to generate entanglement with a remote node.
An EPR socket represents a connection with a single remote node through which
EPR pairs can be generated. Its main interfaces are the `create` and `recv`
methods. A typical use case for two nodes is that they both create an EPR socket
to the other node, and during the protocol, one of the nodes does `create`
operations on its socket while the other node does `recv` operations.
A `create` operation asks the network stack to initiate generation of EPR pairs
with the remote node. Depending on the type of generation, the result of this
operation can be qubit objects or measurement outcomes.
A `recv` operation asks the network stack to wait for the remote node to initiate
generation of EPR pairs. Again, the result can be qubit objects or measurement
outcomes.
Each `create` operation on one node must be matched by a `recv` operation on the
other node. Since "creating" and "receiving" must happen at the same time, a node
that is doing a `create` operation on its socket cannot advance until the other
node does the corresponding `recv`. This is different from classical network
sockets where a "send" operation (roughly anologous to `create` in an EPR socket)
does not block on the remote node receiving it.
An EPR socket is identified by a triple consisting of (1) the remote node ID,
(2) the local socket ID and (3) the remote socket ID.
Two nodes that want to generate EPR pairs with each other should make sure that the
IDs in their local sockets match.
"""
def __init__(
self,
remote_app_name: str,
epr_socket_id: int = 0,
remote_epr_socket_id: int = 0,
min_fidelity: int = 100,
):
"""Create an EPR socket. It still needs to be registered with the network
stack separately.
Registering and opening the EPR socket is currently done automatically by the
connection that uses this EPR socket, specifically when a context is opened
with that connection.
:param remote_app_name: name of the remote party (i.e. the role, like "client",
not necessarily the node name like "delft")
:param epr_socket_id: local socket ID, defaults to 0
:param remote_epr_socket_id: remote socket ID, defaults to 0. Note that this
must match with the local socket ID of the remote node's EPR socket.
:param min_fidelity: minimum desired fidelity for EPR pairs generated over this
socket, in percentages (i.e. range 0-100). Defaults to 100.
"""
self._conn: Optional[connection.BaseNetQASMConnection] = None
self._remote_app_name: str = remote_app_name
self._remote_node_id: Optional[
int
] = None # Gets set when the connection is set
self._epr_socket_id: int = epr_socket_id
self._remote_epr_socket_id: int = remote_epr_socket_id
if (
not isinstance(min_fidelity, int)
or (min_fidelity < 0)
or min_fidelity > 100
):
raise ValueError(
f"min_fidelity must be an integer in the range [0, 100], not {min_fidelity}"
)
self._min_fidelity: int = min_fidelity
self._logger: logging.Logger = get_netqasm_logger(
f"{self.__class__.__name__}({self._remote_app_name}, {self._epr_socket_id})"
)
@property
def conn(self) -> connection.BaseNetQASMConnection:
"""Get the underlying :class:`NetQASMConnection`"""
if self._conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self._conn
@conn.setter
def conn(self, conn: connection.BaseNetQASMConnection):
self._conn = conn
self._remote_node_id = self._get_node_id(app_name=self._remote_app_name)
@property
def remote_app_name(self) -> str:
"""Get the remote application name"""
return self._remote_app_name
@property
def remote_node_id(self) -> int:
"""Get the remote node ID"""
if self._remote_node_id is None:
raise RuntimeError("Remote Node ID has not been initialized")
return self._remote_node_id
@property
def epr_socket_id(self) -> int:
"""Get the EPR socket ID"""
return self._epr_socket_id
@property
def remote_epr_socket_id(self) -> int:
"""Get the remote EPR socket ID"""
return self._remote_epr_socket_id
@property
def min_fidelity(self) -> int:
"""Get the desired minimum fidelity"""
return self._min_fidelity
def create_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to generate EPR pairs with the remote node and keep
them in memory.
A `create_keep` operation must always be matched by a `recv_keep` operation on
the remote node.
If `sequential` is False (default), this operation returns a list of Qubit
objects representing the local qubits that are each one half of the generated
pairs. These qubits can then be manipulated locally just like locally
initialized qubits, by e.g. applying gates or measuring them.
Each qubit also contains information about the entanglement generation that
lead to its creation, and can be accessed by its `entanglement_info` property.
A typical example for just generating one pair with another node would be:
.. code-block::
q = epr_socket.create_keep()[0]
# `q` can now be used as a normal qubit
If `sequential` is False (default), the all requested EPR pairs are generated
at once, before returning the results (qubits or entanglement info objects).
If `sequential` is True, a callback function (`post_routine`) should be
specified. After generating one EPR pair, this callback will be called, before
generating the next pair. This method can e.g. be used to generate many EPR
pairs (more than the number of physical qubits available), by measuring (and
freeing up) each qubit before the next pair is generated.
For example:
.. code-block::
outcomes = alice.new_array(num)
def post_create(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.create_keep(number=num, post_routine=post_create, sequential=True)
:param number: number of EPR pairs to generate, defaults to 1
:param post_routine: callback function for each genated pair. Only used if
`sequential` is True.
The callback should take three arguments `(conn, q, pair)` where
* `conn` is the connection (e.g. `self`)
* `q` is the entangled qubit (of type `FutureQubit`)
* `pair` is a register holding which pair is handled (0, 1, ...)
:param sequential: whether to use callbacks after each pair, defaults to False
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param min_fidelity_all_at_end: the minimum fidelity that *all* entangled
qubits should ideally still have at the moment the last qubit has been
generated. For example, when specifying `number=2` and
`min_fidelity_all_at_end=80`, the the program will automatically try to
make sure that both qubits have a fidelity of at least 80% when the
second qubit has been generated. It will attempt to do this by
automatically re-trying the entanglement generation if the fidelity
constraint is not satisfied. This is however an *attempt*, and not
a guarantee!.
:param max_tries: maximum number of re-tries should be made to try and achieve
the `min_fidelity_all_at_end` constraint.
:return: list of qubits created
"""
qubits, _ = self.conn.builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def create_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as create_keep but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `create_keep`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
qubits, info = self.conn._builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
),
)
return qubits, info
def create_measure(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> List[EprMeasureResult]:
"""Ask the network stack to generate EPR pairs with the remote node and
measure them immediately (on both nodes).
A `create_measure` operation must always be matched by a `recv_measure`
operation on the remote node.
This operation returns a list of Linklayer response objects. These objects
contain information about the entanglement generation and includes the
measurement outcome and basis used. Note that all values are `Future` objects.
This means that the current subroutine must be flushed before the values
become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
# list of Futures that become defined when subroutine is flushed
outcomes = []
with NetQASMConnection("alice", epr_sockets=[epr_socket]):
ent_infos = epr_socket.create(number=10, tp=EPRType.M)
for ent_info in ent_infos:
outcomes.append(ent_info.measurement_outcome)
The basis to measure in can also be specified. There are 3 ways to specify a
basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
NOTE: the node that initiates the entanglement generation, i.e. the one that
calls `create` on its EPR socket, also controls the measurement bases of the
receiving node (by setting e.g. `rotations_remote`). The receiving node cannot
change this.
:param number: number of EPR pairs to generate, defaults to 1
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
:param rotations_remote: rotations to apply before measuring on remote node
:param random_basis_local: random bases to choose from when measuring on this
node
:param random_basis_remote: random bases to choose from when measuring on
the remote node
:return: list of entanglement info objects per created pair.
"""
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
if basis_remote is not None:
rotations_remote = basis_to_rotation(basis_remote)
return self.conn.builder.sdk_create_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
),
)
def create_rsp(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
min_fidelity_all_at_end: Optional[int] = None,
) -> List[EprMeasureResult]:
"""Ask the network stack to do remote preparation with the remote node.
A `create_rsp` operation must always be matched by a `recv_erp` operation
on the remote node.
This operation returns a list of Linklayer response objects. These objects
contain information about the entanglement generation and includes the
measurement outcome and basis used. Note that all values are `Future` objects.
This means that the current subroutine must be flushed before the values
become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
m: LinkLayerOKTypeM = epr_socket.create_rsp(tp=EPRType.R)[0]
print(m.measurement_outcome)
# remote node now has a prepared qubit
The basis to measure in can also be specified.
There are 3 ways to specify a basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
:param number: number of EPR pairs to generate, defaults to 1
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
:param rotations_remote: rotations to apply before measuring on remote node
:param random_basis_local: random bases to choose from when measuring on this
node
:param random_basis_remote: random bases to choose from when measuring on
the remote node
:return: list of entanglement info objects per created pair.
"""
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
return self.conn.builder.sdk_create_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
rotations_local=rotations_local,
min_fidelity_all_at_end=min_fidelity_all_at_end,
)
)
def create(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeM]]:
"""Ask the network stack to generate EPR pairs with the remote node.
A `create` operation must always be matched by a `recv` operation on the remote
node.
If the type of request is Create and Keep (CK, or just K) and if `sequential`
is False (default), this operation returns a list of Qubit objects representing
the local qubits that are each one half of the generated pairs. These qubits
can then be manipulated locally just like locally initialized qubits, by e.g.
applying gates or measuring them.
Each qubit also contains information about the entanglement generation that
lead to its creation, and can be accessed by its `entanglement_info` property.
A typical example for just generating one pair with another node would be:
.. code-block::
q = epr_socket.create()[0]
# `q` can now be used as a normal qubit
If the type of request is Measure Directly (MD, or just M), this operation
returns a list of Linklayer response objects. These objects contain information
about the entanglement generation and includes the measurement outcome and
basis used. Note that all values are `Future` objects. This means that the
current subroutine must be flushed before the values become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
# list of Futures that become defined when subroutine is flushed
outcomes = []
with NetQASMConnection("alice", epr_sockets=[epr_socket]):
ent_infos = epr_socket.create(number=10, tp=EPRType.M)
for ent_info in ent_infos:
outcomes.append(ent_info.measurement_outcome)
For "Measure Directly"-type requests, the basis to measure in can also be
specified. There are 3 ways to specify a basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
NOTE: the node that initiates the entanglement generation, i.e. the one that
calls `create` on its EPR socket, also controls the measurement bases of the
receiving node (by setting e.g. `rotations_remote`). The receiving node cannot
change this.
If `sequential` is False (default), the all requested EPR pairs are generated
at once, before returning the results (qubits or entanglement info objects).
If `sequential` is True, a callback function (`post_routine`) should be
specified. After generating one EPR pair, this callback will be called, before
generating the next pair. This method can e.g. be used to generate many EPR
pairs (more than the number of physical qubits available), by measuring (and
freeing up) each qubit before the next pair is generated.
For example:
.. code-block::
outcomes = alice.new_array(num)
def post_create(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.create(number=num, post_routine=post_create, sequential=True)
:param number: number of EPR pairs to generate, defaults to 1
:param post_routine: callback function for each genated pair. Only used if
`sequential` is True.
The callback should take three arguments `(conn, q, pair)` where
* `conn` is the connection (e.g. `self`)
* `q` is the entangled qubit (of type `FutureQubit`)
* `pair` is a register holding which pair is handled (0, 1, ...)
:param sequential: whether to use callbacks after each pair, defaults to False
:param tp: type of entanglement generation, defaults to EPRType.K. Note that
corresponding `recv` of the remote node's EPR socket must specify the
same type.
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
(for M-type requests)
:param rotations_remote: rotations to apply before measuring on remote node
(for M-type requests)
:param random_basis_local: random bases to choose from when measuring on this
node (for M-type requests)
:param random_basis_remote: random bases to choose from when measuring on
the remote node (for M-type requests)
:return: For K-type requests: list of qubits created. For M-type requests:
list of entanglement info objects per created pair.
"""
self._logger.warning(
"EPRSocket.create() is deprecated. Use one of "
"create_keep, create_measure, or create_rsp instead."
)
if tp == EPRType.K:
return self.create_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
elif tp == EPRType.M:
return self.create_measure(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
basis_remote=basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
)
elif tp == EPRType.R:
return self.create_rsp(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
random_basis_local=random_basis_local,
)
assert False
def create_context(
self,
number: int = 1,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
) -> ContextManager[Tuple[FutureQubit, RegFuture]]:
"""Create a context that is executed for each generated EPR pair consecutively.
Creates EPR pairs with a remote node and handles each pair by
the operations defined in a subsequent context. See the example below.
.. code-block::
with epr_socket.create_context(number=10) as (q, pair):
q.H()
m = q.measure()
NOTE: even though all pairs are handled consecutively, they are still
generated concurrently by the network stack. By setting `sequential` to True,
the network stack only generates the next pair after the context for the
previous pair has been executed, similar to using a callback (`post_routine`)
in the `create` method.
:param number: number of EPR pairs to generate, defaults to 1
:param sequential: whether to generate pairs sequentially, defaults to False
"""
return self.conn.builder.sdk_create_epr_context(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
)
def recv_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to wait for the remote node to generate EPR pairs,
which are kept in memory.
A `recv_keep` operation must always be matched by a `create_keep` operation on
the remote node. The number of generated pairs must also match.
For more information see the documentation of `create_keep`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:param min_fidelity_all_at_end: the minimum fidelity that *all* entangled
qubits should ideally still have at the moment the last qubit has been
generated. For example, when specifying `number=2` and
`min_fidelity_all_at_end=80`, the the program will automatically try to
make sure that both qubits have a fidelity of at least 80% when the
second qubit has been generated. It will attempt to do this by
automatically re-trying the entanglement generation if the fidelity
constraint is not satisfied. This is however an *attempt*, and not
a guarantee!.
:param max_tries: maximum number of re-tries should be made to try and achieve
the `min_fidelity_all_at_end` constraint.
:return: list of qubits created
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as recv_keep but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `recv_keep`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
qubits, info = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, info
def recv_measure(
self,
number: int = 1,
) -> List[EprMeasureResult]:
"""Ask the network stack to wait for the remote node to generate EPR pairs,
which are immediately measured (on both nodes).
A `recv_measure` operation must always be matched by a `create_measure`
operation on the remote node. The number and type of generation must also match.
For more information see the documentation of `create_measure`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:return: list of entanglement info objects per created pair.
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self.conn.builder.sdk_recv_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
),
)
def recv_rsp(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to wait for remote state preparation from another node.
A `recv_rsp` operation must always be matched by a `create_rsp` operation on
the remote node. The number and type of generation must also match.
For more information see the documentation of `create_rsp`.
:param number: number of pairs to generate, defaults to 1
:return: list of qubits created
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_rsp_with_info(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as recv_rsp but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `recv_rsp`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, infos = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, infos
def recv(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeR]]:
"""Ask the network stack to wait for the remote node to generate EPR pairs.
A `recv` operation must always be matched by a `create` operation on the remote
node. See also the documentation of `create`.
The number and type of generation must also match.
In case of Measure Directly requests, it is the initiating node (that calls
`create`) which specifies the measurement bases. This should not and cannot be
done in `recv`.
For more information see the documentation of `create`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:param tp: type of entanglement generation, defaults to EPRType.K
:return: For K-type requests: list of qubits created. For M-type requests:
list of entanglement info objects per created pair.
"""
self._logger.warning(
"EPRSocket.recv() is deprecated. Use one of "
"recv_keep, recv_measure, or recv_rsp instead."
)
if tp == EPRType.K:
return self.recv_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
)
elif tp == EPRType.M:
return self.recv_measure(number=number)
elif tp == EPRType.R:
return self.recv_rsp(number=number)
assert False
@contextmanager
def recv_context(
self,
number: int = 1,
sequential: bool = False,
):
"""Receives EPR pair with a remote node (see doc of :meth:`~.create_context`)"""
try:
# NOTE loop_register is the register used for looping over the generated pairs
(
pre_commands,
loop_register,
ent_results_array,
output,
pair,
) = self.conn.builder._pre_epr_context(
role=EPRRole.RECV,
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
),
)
yield output, pair
finally:
self.conn.builder._post_epr_context(
pre_commands=pre_commands,
number=number,
loop_register=loop_register,
ent_results_array=ent_results_array,
pair=pair,
)
def _get_node_id(self, app_name: str) -> int:
return self.conn.network_info.get_node_id_for_app(app_name=app_name)
| 43.856828
| 92
| 0.641454
|
from __future__ import annotations
import abc
import logging
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, Union
from netqasm.logging.glob import get_netqasm_logger
from netqasm.qlink_compat import (
EPRRole,
EPRType,
LinkLayerOKTypeK,
LinkLayerOKTypeM,
LinkLayerOKTypeR,
RandomBasis,
TimeUnit,
)
from netqasm.sdk.build_epr import EprMeasBasis, basis_to_rotation
from netqasm.sdk.builder import EntRequestParams, EprKeepResult, EprMeasureResult
from netqasm.sdk.futures import RegFuture
from .qubit import FutureQubit, Qubit
if TYPE_CHECKING:
from netqasm.sdk import connection
T_LinkLayerOkList = Union[
List[LinkLayerOKTypeK], List[LinkLayerOKTypeM], List[LinkLayerOKTypeR]
]
class EPRSocket(abc.ABC):
def __init__(
self,
remote_app_name: str,
epr_socket_id: int = 0,
remote_epr_socket_id: int = 0,
min_fidelity: int = 100,
):
self._conn: Optional[connection.BaseNetQASMConnection] = None
self._remote_app_name: str = remote_app_name
self._remote_node_id: Optional[
int
] = None self._epr_socket_id: int = epr_socket_id
self._remote_epr_socket_id: int = remote_epr_socket_id
if (
not isinstance(min_fidelity, int)
or (min_fidelity < 0)
or min_fidelity > 100
):
raise ValueError(
f"min_fidelity must be an integer in the range [0, 100], not {min_fidelity}"
)
self._min_fidelity: int = min_fidelity
self._logger: logging.Logger = get_netqasm_logger(
f"{self.__class__.__name__}({self._remote_app_name}, {self._epr_socket_id})"
)
@property
def conn(self) -> connection.BaseNetQASMConnection:
if self._conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self._conn
@conn.setter
def conn(self, conn: connection.BaseNetQASMConnection):
self._conn = conn
self._remote_node_id = self._get_node_id(app_name=self._remote_app_name)
@property
def remote_app_name(self) -> str:
return self._remote_app_name
@property
def remote_node_id(self) -> int:
if self._remote_node_id is None:
raise RuntimeError("Remote Node ID has not been initialized")
return self._remote_node_id
@property
def epr_socket_id(self) -> int:
return self._epr_socket_id
@property
def remote_epr_socket_id(self) -> int:
return self._remote_epr_socket_id
@property
def min_fidelity(self) -> int:
return self._min_fidelity
def create_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
qubits, _ = self.conn.builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def create_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
qubits, info = self.conn._builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
),
)
return qubits, info
def create_measure(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> List[EprMeasureResult]:
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
if basis_remote is not None:
rotations_remote = basis_to_rotation(basis_remote)
return self.conn.builder.sdk_create_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
),
)
def create_rsp(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
min_fidelity_all_at_end: Optional[int] = None,
) -> List[EprMeasureResult]:
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
return self.conn.builder.sdk_create_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
rotations_local=rotations_local,
min_fidelity_all_at_end=min_fidelity_all_at_end,
)
)
def create(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeM]]:
self._logger.warning(
"EPRSocket.create() is deprecated. Use one of "
"create_keep, create_measure, or create_rsp instead."
)
if tp == EPRType.K:
return self.create_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
elif tp == EPRType.M:
return self.create_measure(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
basis_remote=basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
)
elif tp == EPRType.R:
return self.create_rsp(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
random_basis_local=random_basis_local,
)
assert False
def create_context(
self,
number: int = 1,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
) -> ContextManager[Tuple[FutureQubit, RegFuture]]:
return self.conn.builder.sdk_create_epr_context(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
)
def recv_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
qubits, info = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, info
def recv_measure(
self,
number: int = 1,
) -> List[EprMeasureResult]:
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self.conn.builder.sdk_recv_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
),
)
def recv_rsp(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_rsp_with_info(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, infos = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, infos
def recv(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeR]]:
self._logger.warning(
"EPRSocket.recv() is deprecated. Use one of "
"recv_keep, recv_measure, or recv_rsp instead."
)
if tp == EPRType.K:
return self.recv_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
)
elif tp == EPRType.M:
return self.recv_measure(number=number)
elif tp == EPRType.R:
return self.recv_rsp(number=number)
assert False
@contextmanager
def recv_context(
self,
number: int = 1,
sequential: bool = False,
):
try:
(
pre_commands,
loop_register,
ent_results_array,
output,
pair,
) = self.conn.builder._pre_epr_context(
role=EPRRole.RECV,
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
),
)
yield output, pair
finally:
self.conn.builder._post_epr_context(
pre_commands=pre_commands,
number=number,
loop_register=loop_register,
ent_results_array=ent_results_array,
pair=pair,
)
def _get_node_id(self, app_name: str) -> int:
return self.conn.network_info.get_node_id_for_app(app_name=app_name)
| true
| true
|
f7036fe313c66a97f4197a25427a74fb4d122a60
| 14,595
|
py
|
Python
|
google_cal_app/views.py
|
google-business-communications/bm-gCal-bot
|
fe67391f956ccfcfe80ad786c40847a154ca6a6e
|
[
"Apache-2.0"
] | 2
|
2021-02-26T16:32:53.000Z
|
2021-03-04T16:12:10.000Z
|
google_cal_app/views.py
|
google-business-communications/bm-gCal-bot
|
fe67391f956ccfcfe80ad786c40847a154ca6a6e
|
[
"Apache-2.0"
] | null | null | null |
google_cal_app/views.py
|
google-business-communications/bm-gCal-bot
|
fe67391f956ccfcfe80ad786c40847a154ca6a6e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The view layer of logic for the BM gCal Assistant.
The logic here defines the behavior of the webhook when messages are received
from users messaging through Business Messages.
"""
import base64
import datetime
import hashlib
import json
import os
import uuid
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import (
BusinessmessagesConversationsMessagesCreateRequest,
BusinessMessagesMessage, BusinessMessagesRepresentative,
BusinessMessagesSuggestion, BusinessMessagesSuggestedReply,
BusinessmessagesConversationsEventsCreateRequest, BusinessMessagesEvent,
BusinessMessagesAuthenticationRequest, BusinessMessagesAuthenticationRequestOauth
)
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from google_cal_app.models import Conversation
from googleapiclient.discovery import build
from oauth2client import client
from oauth2client.service_account import ServiceAccountCredentials
import requests
# The location of the service account credentials
SERVICE_ACCOUNT_LOCATION = 'resources/bm-agent-service-account-credentials.json'
# Set of commands the bot understands
CMD_LOGIN = 'login'
CMD_MY_DAY_SUMMARY = 'day-summary'
CMD_FOCUS_SPRINT_SLOTS = 'focus-sprints'
CMD_CANCEL_ALL_MEETINGS = 'cancel-all-meetings'
CMD_YES_CANCEL = 'yes-cancel'
CMD_NO_CANCEL = 'no-do-not-cancel'
# The representative type that all messages are sent as
BOT_REPRESENTATIVE = BusinessMessagesRepresentative(
representativeType=BusinessMessagesRepresentative
.RepresentativeTypeValueValuesEnum.BOT,
displayName='BM gCal Assistant',
avatarImage='https://lh3.googleusercontent.com/9PMLInqtfgnRnV-9QUgYj8W-ZAutv-49KsYmHthZayM9YnCsd01P0eNhbqtu9QoIF31tKzgwo-x1oCkVIQas5Q'
)
LARGE_DATE = datetime.datetime(9999, 12, 30, 12, 59, 59, 59)
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
@csrf_exempt
def callback(request):
"""Callback URL.
Processes messages sent from the user.
Args:
request (HttpRequest): The request object that django passes to the
function
Returns:
An :HttpResponse: containing browser renderable HTML.
"""
if request.method == 'POST':
request_data = request.body.decode('utf8').replace("'", '"')
request_body = json.loads(request_data)
print('request_body: %s', request_body)
# Extract the conversation id and message text
conversation_id = request_body.get('conversationId')
conv = get_conversation(conversation_id)
print('conversation_id: %s', conversation_id)
try:
display_name = request_body.get('context').get('userInfo').get(
'displayName')
except Exception as e:
print(e)
display_name = None
# Check that the message and text body exist
if 'message' in request_body and 'text' in request_body['message']:
message = request_body['message']['text']
print('message: %s', message)
route_message(message, conv)
elif 'suggestionResponse' in request_body:
message = request_body['suggestionResponse']['postbackData']
print('message: %s', message)
route_message(message, conv)
elif 'authenticationResponse' in request_body:
try:
auth_code = request_body.get('authenticationResponse').get('code')
redirect_uri = request_body.get('authenticationResponse').get('redirectUri')
print(f'redirect_uri extracted from authenticationResponse {redirect_uri}')
# Exchange auth_code with OAuth provider and get access_token
code_verifier = conv.code_verifier
if code_verifier is None or auth_code is None:
print('There was an error.')
else:
access_token = request_access_token(auth_code, code_verifier, redirect_uri)
# Save the access token in an encrypted format using save_token
send_day_summary_message(conv, access_token)
except Exception as e:
print(f'Login error: {e}')
elif 'userStatus' in request_body:
if 'isTyping' in request_body['userStatus']:
print('User is typing')
elif 'requestedLiveAgent' in request_body['userStatus']:
print('User requested transfer to live agent')
return HttpResponse('Response.')
return HttpResponse('This webhook expects a POST request.')
def request_access_token(auth_code, code_verifier, redirect_uri):
"""Requests access_token from identity provider.
Args:
auth_code (str): Authorization code to request access_token
code_verifier (str): pair of code_challenge and code_verifier for PKCE.
"""
obj = {
'client_secret': os.environ['OAUTH_CLIENT_SECRET'],
'client_id': os.environ['OAUTH_CLIENT_ID'],
'grant_type': 'authorization_code',
'code': auth_code,
'code_verifier': code_verifier,
'redirect_uri': redirect_uri
}
res = requests.post('https://oauth2.googleapis.com/token', data=obj)
res_dict = json.loads(res.text)
access_token = res_dict.get('access_token')
if access_token is None:
print(f'Could not find the access token: {res.content}')
return None
print(f'We found the access_token.')
return access_token
def get_conversation(conversation_id):
"""Returns a google_cal_app.Conversation object.
Args:
conversation_id (str): The unique id for this user and agent.
"""
conv = Conversation.objects.filter(id=conversation_id)
if not conv:
return Conversation(id=conversation_id).save()
else:
return conv[0]
def route_message(message, conv):
"""Routes the message received from the user to create a response.
Args:
message (str): The message text received from the user.
conv (Conversation): The unique id for this user and agent.
"""
normalized_message = message.lower()
print(f'Routing message: {normalized_message}')
if normalized_message == CMD_LOGIN:
invoke_login_chip(conv)
else:
echo_message(message, conv)
def fetch_events(access_token, today):
"""Fetches events from Calendar API.
Args:
access_token (str): The user's access_token to query data with.
today (datetime.Date): Date object representing todays date.
Returns:
event_items (list): A list of sorted event items.
"""
credentials = client.AccessTokenCredentials(
access_token, 'USER_AGENT')
service = build('calendar', 'v3', credentials=credentials)
events = service.events().list(
calendarId='primary',
timeMax=f'{today}T23:59:59-07:00',
timeMin=f'{today}T06:00:00-07:00').execute()
event_items = events.get('items')
event_items.sort(
key=lambda x: LARGE_DATE
if (x.get('start') is None or x.get('start').get('dateTime') is None)
else datetime.datetime.strptime(
x.get('start').get('dateTime')[:19], DATE_FORMAT))
print("Returning")
return event_items
def send_day_summary_message(conv, access_token):
"""Fetches calendar data with access_token and sends it to the conversation.
Args:
conv (Conversation): The unique id for this user and agent.
"""
try:
print("Send summary of my day")
today = str(datetime.datetime.now().date())
event_items = fetch_events(access_token, today)
print(f"Events: {event_items}")
event_set = set()
event_list_message = ''
for event in event_items:
try:
if event.get('status') == 'confirmed' and today in event.get(
'start').get('dateTime') and event.get(
'summary') not in event_set:
event_list_message = event_list_message + '- ' + event.get(
'summary') + '\n'
event_set.add(event.get('summary'))
except Exception as e:
print(f'Exception A: {e}')
if len(event_set) > 4:
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Looks like you have a lot of meetings today!')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Here\'s the list of items or your calendar...')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_suggestions(),
text=event_list_message)
send_message(message_obj, conv.id)
except Exception as e:
print(f'Exception B: {e}')
def invoke_login_chip(conv, message=None):
"""Invokes the login chip within the conversation.
Args:
conv (Conversation): The unique id for this user and agent.
message (str): The message text received from the user.
"""
message = message or 'To see your calendar summary, please sign in!'
message_id = str(uuid.uuid4())
# Generate a code_verifier and code_challenge used in the OAuth 2.0 PKCE flow.
# code_challenge is shared with Google to send to kick start the auth flow
# with the identity provider. Then exchange the auth_code along with the
# code_verifier to the identity provider to get an access_token to make
# requests on behalf of the user.
random_val = str(uuid.uuid1()).encode()
base64_random = base64.urlsafe_b64encode(random_val)
code_verifier = base64_random.decode('utf-8')
hashed_code_verifier = hashlib.sha256(code_verifier.encode('utf-8')).digest()
utf8_decoded_verifier = base64.urlsafe_b64encode(hashed_code_verifier).decode(
'utf-8')
code_challenge = utf8_decoded_verifier.replace('=', '')
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_auth_chip_suggestion(
os.environ['OAUTH_CLIENT_ID'],
code_challenge,
['profile','https://www.googleapis.com/auth/calendar.readonly']),
text=message,
fallback='Your device does not support suggestions')
send_message(message_obj, conv.id)
print(f'The code verifier is: {code_verifier}')
conv.code_verifier = code_verifier
conv.save()
def echo_message(message, conv):
"""Sends the message received from the user back to the user.
Args:
message (str): The message text received from the user.
conv (Conversation): The unique id for this user and agent.
"""
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text=f"Hey! Here's the message you sent:\n\n{message}"
)
send_message(message_obj, conv.id)
def send_message(message, conversation_id):
"""Posts a message to the Business Messages API.
Args:
message (obj): The message object payload to send to the user.
conversation_id (str): The unique id for this user and agent.
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_LOCATION,
scopes=['https://www.googleapis.com/auth/businessmessages'])
bm_credentials = bm_client.BusinessmessagesV1(credentials=credentials)
# Send the typing started event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STARTED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=bm_credentials).Create(request=create_request)
# Send the typing stopped event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STOPPED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
def get_auth_chip_suggestion(client_id, code_challenge, scopes):
"""Returns an authorization chip
Arguments:
client_id (str): client_id from your client configuration with the
identity provider
code_challenge (str): code_challenge generated from the code_verifier for
use with PKCE in OAuth 2.0 access_token exchange
scopes (List): A list of scopes you want the access token to grant API
access to
Returns:
A :list: BusinessMessagesSuggestions invoking the auth chip
"""
return [
BusinessMessagesSuggestion(
authenticationRequest=BusinessMessagesAuthenticationRequest(
oauth=BusinessMessagesAuthenticationRequestOauth(
clientId=client_id, codeChallenge=code_challenge, scopes=scopes))),
]
def get_suggestions():
"""Creates a list of suggestions.
Returns:
A :list: A list of sample BusinessMessagesSuggestions.
"""
return [
BusinessMessagesSuggestion(
reply=BusinessMessagesSuggestedReply(
text='Let\'s do it again!', postbackData=CMD_LOGIN)),
]
def landing_placeholder(request):
"""Creates an HttpResponse for a web request at the root of the project.
Args:
request (HttpRequest): The django web request object
Returns:
An :HttpResponse: containing browser renderable HTML.
"""
return HttpResponse("""
<h1>Welcome to gCal BM Assistant</h1>
<br/><br/>
Check out the <a href="https://business-communications.sandbox.google.com/console/">
Business Communications Developer Console</a> to access this agent's
test URLs.
""")
| 32.578125
| 138
| 0.722576
|
import base64
import datetime
import hashlib
import json
import os
import uuid
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import (
BusinessmessagesConversationsMessagesCreateRequest,
BusinessMessagesMessage, BusinessMessagesRepresentative,
BusinessMessagesSuggestion, BusinessMessagesSuggestedReply,
BusinessmessagesConversationsEventsCreateRequest, BusinessMessagesEvent,
BusinessMessagesAuthenticationRequest, BusinessMessagesAuthenticationRequestOauth
)
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from google_cal_app.models import Conversation
from googleapiclient.discovery import build
from oauth2client import client
from oauth2client.service_account import ServiceAccountCredentials
import requests
SERVICE_ACCOUNT_LOCATION = 'resources/bm-agent-service-account-credentials.json'
CMD_LOGIN = 'login'
CMD_MY_DAY_SUMMARY = 'day-summary'
CMD_FOCUS_SPRINT_SLOTS = 'focus-sprints'
CMD_CANCEL_ALL_MEETINGS = 'cancel-all-meetings'
CMD_YES_CANCEL = 'yes-cancel'
CMD_NO_CANCEL = 'no-do-not-cancel'
BOT_REPRESENTATIVE = BusinessMessagesRepresentative(
representativeType=BusinessMessagesRepresentative
.RepresentativeTypeValueValuesEnum.BOT,
displayName='BM gCal Assistant',
avatarImage='https://lh3.googleusercontent.com/9PMLInqtfgnRnV-9QUgYj8W-ZAutv-49KsYmHthZayM9YnCsd01P0eNhbqtu9QoIF31tKzgwo-x1oCkVIQas5Q'
)
LARGE_DATE = datetime.datetime(9999, 12, 30, 12, 59, 59, 59)
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
@csrf_exempt
def callback(request):
if request.method == 'POST':
request_data = request.body.decode('utf8').replace("'", '"')
request_body = json.loads(request_data)
print('request_body: %s', request_body)
# Extract the conversation id and message text
conversation_id = request_body.get('conversationId')
conv = get_conversation(conversation_id)
print('conversation_id: %s', conversation_id)
try:
display_name = request_body.get('context').get('userInfo').get(
'displayName')
except Exception as e:
print(e)
display_name = None
# Check that the message and text body exist
if 'message' in request_body and 'text' in request_body['message']:
message = request_body['message']['text']
print('message: %s', message)
route_message(message, conv)
elif 'suggestionResponse' in request_body:
message = request_body['suggestionResponse']['postbackData']
print('message: %s', message)
route_message(message, conv)
elif 'authenticationResponse' in request_body:
try:
auth_code = request_body.get('authenticationResponse').get('code')
redirect_uri = request_body.get('authenticationResponse').get('redirectUri')
print(f'redirect_uri extracted from authenticationResponse {redirect_uri}')
# Exchange auth_code with OAuth provider and get access_token
code_verifier = conv.code_verifier
if code_verifier is None or auth_code is None:
print('There was an error.')
else:
access_token = request_access_token(auth_code, code_verifier, redirect_uri)
# Save the access token in an encrypted format using save_token
send_day_summary_message(conv, access_token)
except Exception as e:
print(f'Login error: {e}')
elif 'userStatus' in request_body:
if 'isTyping' in request_body['userStatus']:
print('User is typing')
elif 'requestedLiveAgent' in request_body['userStatus']:
print('User requested transfer to live agent')
return HttpResponse('Response.')
return HttpResponse('This webhook expects a POST request.')
def request_access_token(auth_code, code_verifier, redirect_uri):
obj = {
'client_secret': os.environ['OAUTH_CLIENT_SECRET'],
'client_id': os.environ['OAUTH_CLIENT_ID'],
'grant_type': 'authorization_code',
'code': auth_code,
'code_verifier': code_verifier,
'redirect_uri': redirect_uri
}
res = requests.post('https://oauth2.googleapis.com/token', data=obj)
res_dict = json.loads(res.text)
access_token = res_dict.get('access_token')
if access_token is None:
print(f'Could not find the access token: {res.content}')
return None
print(f'We found the access_token.')
return access_token
def get_conversation(conversation_id):
conv = Conversation.objects.filter(id=conversation_id)
if not conv:
return Conversation(id=conversation_id).save()
else:
return conv[0]
def route_message(message, conv):
normalized_message = message.lower()
print(f'Routing message: {normalized_message}')
if normalized_message == CMD_LOGIN:
invoke_login_chip(conv)
else:
echo_message(message, conv)
def fetch_events(access_token, today):
credentials = client.AccessTokenCredentials(
access_token, 'USER_AGENT')
service = build('calendar', 'v3', credentials=credentials)
events = service.events().list(
calendarId='primary',
timeMax=f'{today}T23:59:59-07:00',
timeMin=f'{today}T06:00:00-07:00').execute()
event_items = events.get('items')
event_items.sort(
key=lambda x: LARGE_DATE
if (x.get('start') is None or x.get('start').get('dateTime') is None)
else datetime.datetime.strptime(
x.get('start').get('dateTime')[:19], DATE_FORMAT))
print("Returning")
return event_items
def send_day_summary_message(conv, access_token):
try:
print("Send summary of my day")
today = str(datetime.datetime.now().date())
event_items = fetch_events(access_token, today)
print(f"Events: {event_items}")
event_set = set()
event_list_message = ''
for event in event_items:
try:
if event.get('status') == 'confirmed' and today in event.get(
'start').get('dateTime') and event.get(
'summary') not in event_set:
event_list_message = event_list_message + '- ' + event.get(
'summary') + '\n'
event_set.add(event.get('summary'))
except Exception as e:
print(f'Exception A: {e}')
if len(event_set) > 4:
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Looks like you have a lot of meetings today!')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Here\'s the list of items or your calendar...')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_suggestions(),
text=event_list_message)
send_message(message_obj, conv.id)
except Exception as e:
print(f'Exception B: {e}')
def invoke_login_chip(conv, message=None):
message = message or 'To see your calendar summary, please sign in!'
message_id = str(uuid.uuid4())
# Generate a code_verifier and code_challenge used in the OAuth 2.0 PKCE flow.
# code_challenge is shared with Google to send to kick start the auth flow
# with the identity provider. Then exchange the auth_code along with the
# code_verifier to the identity provider to get an access_token to make
# requests on behalf of the user.
random_val = str(uuid.uuid1()).encode()
base64_random = base64.urlsafe_b64encode(random_val)
code_verifier = base64_random.decode('utf-8')
hashed_code_verifier = hashlib.sha256(code_verifier.encode('utf-8')).digest()
utf8_decoded_verifier = base64.urlsafe_b64encode(hashed_code_verifier).decode(
'utf-8')
code_challenge = utf8_decoded_verifier.replace('=', '')
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_auth_chip_suggestion(
os.environ['OAUTH_CLIENT_ID'],
code_challenge,
['profile','https://www.googleapis.com/auth/calendar.readonly']),
text=message,
fallback='Your device does not support suggestions')
send_message(message_obj, conv.id)
print(f'The code verifier is: {code_verifier}')
conv.code_verifier = code_verifier
conv.save()
def echo_message(message, conv):
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text=f"Hey! Here's the message you sent:\n\n{message}"
)
send_message(message_obj, conv.id)
def send_message(message, conversation_id):
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_LOCATION,
scopes=['https://www.googleapis.com/auth/businessmessages'])
bm_credentials = bm_client.BusinessmessagesV1(credentials=credentials)
# Send the typing started event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STARTED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=bm_credentials).Create(request=create_request)
# Send the typing stopped event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STOPPED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
def get_auth_chip_suggestion(client_id, code_challenge, scopes):
return [
BusinessMessagesSuggestion(
authenticationRequest=BusinessMessagesAuthenticationRequest(
oauth=BusinessMessagesAuthenticationRequestOauth(
clientId=client_id, codeChallenge=code_challenge, scopes=scopes))),
]
def get_suggestions():
return [
BusinessMessagesSuggestion(
reply=BusinessMessagesSuggestedReply(
text='Let\'s do it again!', postbackData=CMD_LOGIN)),
]
def landing_placeholder(request):
return HttpResponse("""
<h1>Welcome to gCal BM Assistant</h1>
<br/><br/>
Check out the <a href="https://business-communications.sandbox.google.com/console/">
Business Communications Developer Console</a> to access this agent's
test URLs.
""")
| true
| true
|
f70370a8e859605dfe04aa5778ef4129010bb78a
| 7,577
|
py
|
Python
|
distrax/_src/bijectors/tfp_compatible_bijector.py
|
kuperov/distrax
|
dd3363a64017c5eafb3241bb2a3884de50f21427
|
[
"Apache-2.0"
] | 278
|
2021-04-13T09:06:23.000Z
|
2022-03-29T11:01:20.000Z
|
distrax/_src/bijectors/tfp_compatible_bijector.py
|
kuperov/distrax
|
dd3363a64017c5eafb3241bb2a3884de50f21427
|
[
"Apache-2.0"
] | 26
|
2021-04-13T18:19:37.000Z
|
2022-03-25T15:00:15.000Z
|
distrax/_src/bijectors/tfp_compatible_bijector.py
|
kuperov/distrax
|
dd3363a64017c5eafb3241bb2a3884de50f21427
|
[
"Apache-2.0"
] | 14
|
2021-04-13T17:05:33.000Z
|
2022-03-13T13:01:04.000Z
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to adapt a Distrax bijector for use in TFP."""
from typing import Any, Optional
import chex
from distrax._src.bijectors import bijector
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
Array = chex.Array
Bijector = bijector.Bijector
def tfp_compatible_bijector(
base_bijector: Bijector,
name: Optional[str] = None):
"""Create a TFP-compatible bijector from a Distrax bijector.
Given a Distrax bijector, return a wrapped bijector that behaves as a TFP
bijector, to be used in TFP meta-bijectors and the TransformedDistribution.
In particular, the wrapped bijector implements the methods
`[forward|inverse]_event_ndims`, `[forward|inverse]_event_shape`,
`[forward|inverse]_event_shape_tensor`, `[forward|inverse]_log_det_jacobian`,
and the properties `[forward|inverse]_min_event_ndims`. Other attributes are
delegated to the `base_bijector`.
The methods of the resulting object do not take a `name` argument,
unlike their TFP equivalents.
The `shape` methods are implemented by tracing the `forward` and `inverse`
methods of the bijector, applied to a zero tensor of the requested dtype. If
the `forward` or `inverse` methods are not traceable or cannot be applied to a
zero tensor, then we cannot guarantee the correctness of the result.
Args:
base_bijector: A Distrax bijector.
name: The bijector name.
Returns:
An object that behaves like a TFP bijector.
"""
name_ = name
class TFPCompatibleBijector(base_bijector.__class__):
"""Class to wrap a Distrax bijector."""
def __init__(self):
self._is_injective = True
self._is_permutation = False
self._parts_interact = False
self.dtype = None
self.has_static_min_event_ndims = True
self.forward_min_event_ndims = base_bijector.event_ndims_in
self.inverse_min_event_ndims = base_bijector.event_ndims_out
def __getattr__(self, name: str):
return getattr(base_bijector, name)
def forward_and_log_det(self, x: Array) -> Array:
"""See `Bijector.forward_and_log_det`."""
return base_bijector.forward_and_log_det(x)
@property
def name(self) -> str:
"""The name of the wrapped bijector."""
return name_ or f"TFPCompatible{base_bijector.name}"
def experimental_batch_shape(self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def experimental_batch_shape_tensor(
self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def forward_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def inverse_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def forward_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `forward`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
return base_bijector.event_ndims_out + extra_event_ndims
def inverse_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `inverse`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
return base_bijector.event_ndims_in + extra_event_ndims
def forward_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `forward` as a `TensorShape`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(forward_event_shape)
def inverse_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `inverse` as a `TensorShape`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(inverse_event_shape)
def forward_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `forward` as a `jnp.array`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return jnp.array(forward_event_shape, dtype=jnp.int32)
def inverse_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `inverse` as a `jnp.array`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return jnp.array(inverse_event_shape, dtype=jnp.int32)
def forward_log_det_jacobian(
self, x: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.forward_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
fldj = base_bijector.forward_log_det_jacobian(x)
return math.sum_last(fldj, extra_event_ndims)
def inverse_log_det_jacobian(
self, y: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.inverse_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
ildj = base_bijector.inverse_log_det_jacobian(y)
return math.sum_last(ildj, extra_event_ndims)
def _check_ndims(
self, direction: str, event_ndims: int, expected_ndims: int) -> int:
"""Checks that `event_ndims` are correct and returns any extra ndims."""
if event_ndims is not None and event_ndims < expected_ndims:
raise ValueError(f"{direction} `event_ndims` of {self.name} must be at "
f"least {expected_ndims} but was passed {event_ndims} "
f"instead.")
return 0 if event_ndims is None else event_ndims - expected_ndims
def _check_shape(
self, direction: str, event_shape: Any, expected_ndims: int):
"""Checks that `event_shape` is correct, raising ValueError otherwise."""
if len(event_shape) < expected_ndims:
raise ValueError(f"{direction} `event_shape` of {self.name} must have "
f"at least {expected_ndims} dimensions, but was "
f"{event_shape} which has only {len(event_shape)} "
f"dimensions instead.")
return TFPCompatibleBijector()
| 42.329609
| 80
| 0.707668
|
from typing import Any, Optional
import chex
from distrax._src.bijectors import bijector
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
Array = chex.Array
Bijector = bijector.Bijector
def tfp_compatible_bijector(
base_bijector: Bijector,
name: Optional[str] = None):
name_ = name
class TFPCompatibleBijector(base_bijector.__class__):
def __init__(self):
self._is_injective = True
self._is_permutation = False
self._parts_interact = False
self.dtype = None
self.has_static_min_event_ndims = True
self.forward_min_event_ndims = base_bijector.event_ndims_in
self.inverse_min_event_ndims = base_bijector.event_ndims_out
def __getattr__(self, name: str):
return getattr(base_bijector, name)
def forward_and_log_det(self, x: Array) -> Array:
return base_bijector.forward_and_log_det(x)
@property
def name(self) -> str:
return name_ or f"TFPCompatible{base_bijector.name}"
def experimental_batch_shape(self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def experimental_batch_shape_tensor(
self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def forward_dtype(self, _: jnp.dtype) -> None:
return None
def inverse_dtype(self, _: jnp.dtype) -> None:
return None
def forward_event_ndims(self, event_ndims: int) -> int:
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
return base_bijector.event_ndims_out + extra_event_ndims
def inverse_event_ndims(self, event_ndims: int) -> int:
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
return base_bijector.event_ndims_in + extra_event_ndims
def forward_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(forward_event_shape)
def inverse_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(inverse_event_shape)
def forward_event_shape_tensor(self, event_shape) -> Array:
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return jnp.array(forward_event_shape, dtype=jnp.int32)
def inverse_event_shape_tensor(self, event_shape) -> Array:
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return jnp.array(inverse_event_shape, dtype=jnp.int32)
def forward_log_det_jacobian(
self, x: Array, event_ndims: Optional[int] = None) -> Array:
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
fldj = base_bijector.forward_log_det_jacobian(x)
return math.sum_last(fldj, extra_event_ndims)
def inverse_log_det_jacobian(
self, y: Array, event_ndims: Optional[int] = None) -> Array:
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
ildj = base_bijector.inverse_log_det_jacobian(y)
return math.sum_last(ildj, extra_event_ndims)
def _check_ndims(
self, direction: str, event_ndims: int, expected_ndims: int) -> int:
if event_ndims is not None and event_ndims < expected_ndims:
raise ValueError(f"{direction} `event_ndims` of {self.name} must be at "
f"least {expected_ndims} but was passed {event_ndims} "
f"instead.")
return 0 if event_ndims is None else event_ndims - expected_ndims
def _check_shape(
self, direction: str, event_shape: Any, expected_ndims: int):
if len(event_shape) < expected_ndims:
raise ValueError(f"{direction} `event_shape` of {self.name} must have "
f"at least {expected_ndims} dimensions, but was "
f"{event_shape} which has only {len(event_shape)} "
f"dimensions instead.")
return TFPCompatibleBijector()
| true
| true
|
f70370b31a5fa70191a3015de5da4b12b92ec6dd
| 3,338
|
py
|
Python
|
example/loc3/rewards.py
|
Principe92/contextualbandits
|
43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5
|
[
"BSD-2-Clause"
] | null | null | null |
example/loc3/rewards.py
|
Principe92/contextualbandits
|
43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5
|
[
"BSD-2-Clause"
] | null | null | null |
example/loc3/rewards.py
|
Principe92/contextualbandits
|
43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5
|
[
"BSD-2-Clause"
] | null | null | null |
import pandas
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from pylab import rcParams
df = pandas.read_csv('rewards_loc3.csv')
ucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\
df['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']
#y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \
#,np.mean(ovr), np.mean(egr), np.mean(egr2) \
#,np.mean(agr), np.mean(agr2), np.mean(efr) \
#,np.mean(ac), np.mean(aac), np.mean(sft)
def get_mean_reward(reward_lst):
mean_rew=list()
for r in range(len(reward_lst)):
mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))
return mean_rew
y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \
,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \
,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \
,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)
x1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]
x3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]
x5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]
x7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]
x9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]
x11 = [index for index in range(len(df['sft']))]
def CI_model(y, confidence = 0.95):
std_err_y = st.sem(y)
n_y = len(y)
h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)
return h_y
h_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\
CI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)
plt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')
plt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')
plt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')
plt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')
plt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')
plt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')
plt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')
plt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')
plt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')
plt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')
plt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')
#plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)')
ax = plt.subplot(111)
plt.xlabel('Rounds (models were updated every 50 rounds)', size=10)
plt.ylabel('Cummulative Mean Reward', size=10)
plt.title('Comparison of Online Contextual Bandit Policies in location 3')
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("location_3.png", bbox_inches='tight', dpi = 600)
| 46.361111
| 122
| 0.687837
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from pylab import rcParams
df = pandas.read_csv('rewards_loc3.csv')
ucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\
df['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']
def get_mean_reward(reward_lst):
mean_rew=list()
for r in range(len(reward_lst)):
mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))
return mean_rew
y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \
,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \
,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \
,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)
x1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]
x3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]
x5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]
x7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]
x9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]
x11 = [index for index in range(len(df['sft']))]
def CI_model(y, confidence = 0.95):
std_err_y = st.sem(y)
n_y = len(y)
h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)
return h_y
h_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\
CI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)
plt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')
plt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')
plt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')
plt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')
plt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')
plt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')
plt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')
plt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')
plt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')
plt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')
plt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')
ax = plt.subplot(111)
plt.xlabel('Rounds (models were updated every 50 rounds)', size=10)
plt.ylabel('Cummulative Mean Reward', size=10)
plt.title('Comparison of Online Contextual Bandit Policies in location 3')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("location_3.png", bbox_inches='tight', dpi = 600)
| true
| true
|
f70371f921691212a121904c194ef6c94e3d6329
| 21,898
|
py
|
Python
|
test/db/test_db_sqlite.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 150
|
2016-10-05T11:09:36.000Z
|
2022-03-06T16:24:41.000Z
|
test/db/test_db_sqlite.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 27
|
2017-03-02T03:37:02.000Z
|
2022-02-10T04:59:54.000Z
|
test/db/test_db_sqlite.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 21
|
2016-11-11T07:51:48.000Z
|
2020-04-26T21:38:33.000Z
|
from datetime import datetime
from dino.config import UserKeys, RedisKeys, SessionKeys
from dino.db.rdbms.models import Channels
from dino.db.rdbms.models import Rooms
from test.base import BaseTest
from test.db import BaseDatabaseTest
class DatabaseSqliteTest(BaseDatabaseTest):
def setUp(self):
self.set_up_env('sqlite')
def tearDown(self):
from dino.db.rdbms.dbman import Database
from dino.db.rdbms.dbman import DeclarativeBase
db = Database(self.env)
con = db.engine.connect()
trans = con.begin()
for table in reversed(DeclarativeBase.metadata.sorted_tables):
con.execute(table.delete())
trans.commit()
con.close()
self.env.cache._flushall()
def test_get_user_infos(self):
self.db.set_user_info(BaseTest.USER_ID, {SessionKeys.gender.value: 'm', 'last_login': datetime.utcnow()})
self.db.set_user_info(BaseTest.OTHER_USER_ID, {SessionKeys.gender.value: 'w', 'last_login': datetime.utcnow()})
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.USER_ID))
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.OTHER_USER_ID))
infos = self.db.get_user_infos({BaseTest.USER_ID, BaseTest.OTHER_USER_ID})
self.assertEqual('m', infos[BaseTest.USER_ID][SessionKeys.gender.value])
self.assertEqual('w', infos[BaseTest.OTHER_USER_ID][SessionKeys.gender.value])
def test_set_two_owners_on_room(self):
self._test_set_two_owners_on_room()
def test_is_admin_before_create(self):
self._test_is_admin_before_create()
def test_is_admin_after_create(self):
self._test_is_admin_after_create()
def test_is_admin_after_create_set_admin(self):
self._test_is_admin_after_create_set_admin()
def test_channel_for_room_no_channel(self):
self._test_channel_for_room_no_channel()
def test_channel_for_room_with_channel_without_room(self):
self._test_channel_for_room_with_channel_without_room()
def test_channel_for_room_with_channel_with_room(self):
self._test_channel_for_room_with_channel_with_room()
def test_leave_room_not_joined(self):
self._test_leave_room_not_joined()
def test_leave_room_joined(self):
self._test_leave_room_joined()
def test_set_moderator_no_room(self):
self._test_set_moderator_no_room()
def test_set_moderator_with_room(self):
self._test_set_moderator_with_room()
def test_set_room_owner_no_room(self):
self._test_set_room_owner_no_room()
def test_set_room_owner_with_room(self):
self._test_set_room_owner_with_room()
def test_set_channel_owner_no_channel(self):
self._test_set_channel_owner_no_channel()
def test_set_channel_owner_with_channel(self):
self._test_set_channel_owner_with_channel()
def test_get_user_status_before_set(self):
self._test_get_user_status_before_set(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_offline(self):
self._test_set_user_offline(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_online(self):
self._test_set_user_online(UserKeys.STATUS_AVAILABLE)
def test_set_user_invisible(self):
self._test_set_user_invisible(UserKeys.STATUS_INVISIBLE)
def test_remove_current_rooms_for_user_before_joining(self):
self._test_remove_current_rooms_for_user_before_joining()
def test_remove_current_rooms_for_user_after_joining(self):
self._test_remove_current_rooms_for_user_after_joining()
def test_rooms_for_user_before_joining(self):
self._test_rooms_for_user_before_joining()
def test_create_existing_room_name(self):
self._test_create_existing_room_name()
def test_rooms_for_user_after_joining(self):
self._test_rooms_for_user_after_joining()
def test_rooms_for_channel_before_create_channel(self):
self._test_rooms_for_channel_before_create_channel()
def test_rooms_for_channel_after_create_channel_before_create_room(self):
self._test_rooms_for_channel_after_create_channel_before_create_room()
def test_rooms_for_channel_after_create_channel_after_create_room(self):
self._test_rooms_for_channel_after_create_channel_after_create_room()
def test_get_channels_before_create(self):
self._test_get_channels_before_create()
def test_get_channels_after_create(self):
self._test_get_channels_after_create()
def test_room_exists(self):
self._test_room_exists()
def test_create_room_no_channel(self):
self._test_create_room_no_channel()
def test_create_existing_channel(self):
self._test_create_existing_channel()
def test_create_channel(self):
self._test_create_channel()
def test_create_channel_again_to_make_sure_tables_cleared_after_each_test(self):
self._test_create_channel()
channels = self.db._session().query(Channels).filter(Channels.uuid == BaseDatabaseTest.CHANNEL_ID).all()
self.assertEqual(1, len(channels))
def test_create_channel_blank_name(self):
self._test_create_channel_blank_name()
def test_create_channel_exists(self):
self._test_create_channel_exists()
def test_create_room(self):
self._test_create_room()
rooms = self.db._session().query(Rooms).filter(Rooms.uuid == BaseDatabaseTest.ROOM_ID).all()
self.assertEqual(1, len(rooms))
def test_create_room_blank_name(self):
self._test_create_room_blank_name()
def test_create_existing_room(self):
self._test_create_existing_room()
def test_channel_exists_after_create(self):
self._test_channel_exists_after_create()
def test_channel_exists_before_create(self):
self._test_channel_exists_before_create()
def test_room_name_exists_before_create(self):
self._test_room_name_exists_before_create()
def test_room_name_exists_after_create(self):
self._test_room_name_exists_after_create()
def test_delete_one_non_existing_acl(self):
self._test_delete_one_non_existing_acl()
def test_add_one_extra_acl(self):
self._test_add_one_extra_acl()
def test_get_acl(self):
self._test_get_acl()
def test_set_acl(self):
self._test_set_acl()
def test_delete_one_acl(self):
self._test_delete_one_acl()
def test_set_room_allows_cross_group_messaging(self):
self._test_set_room_allows_cross_group_messaging()
def test_get_room_allows_cross_group_messaging_no_room(self):
self._test_get_room_allows_cross_group_messaging_no_room()
def test_get_room_allows_cross_group_messaging(self):
self._test_get_room_allows_cross_group_messaging()
def test_get_room_does_not_allow_cross_group_messaging(self):
self._test_get_room_does_not_allow_cross_group_messaging()
def test_room_allows_cross_group_messaging_no_room(self):
self._test_room_allows_cross_group_messaging_no_room()
def test_room_allows_cross_group_messaging(self):
self._test_room_allows_cross_group_messaging()
def test_room_does_not_allow_cross_group_messaging_no_room(self):
self._test_room_does_not_allow_cross_group_messaging_no_room()
def test_create_admin_room(self):
self._test_create_admin_room()
def test_is_super_user(self):
self._test_is_super_user()
def test_get_admin_room(self):
self._test_get_admin_room()
def test_set_owner_and_moderator(self):
self._test_set_owner_and_moderator()
def test_remove_channel_role(self):
self._test_remove_channel_role()
def test_remove_room_role(self):
self._test_remove_room_role()
def test_remove_super_user(self):
self._test_remove_super_user()
def test_get_super_users(self):
self._test_get_super_users()
def test_remove_owner(self):
self._test_remove_owner()
def test_remove_channel_owner(self):
self._test_remove_channel_owner()
def test_remove_admin(self):
self._test_remove_admin()
def test_remove_moderator(self):
self._test_remove_moderator()
def test_set_owner_is_unique(self):
self._test_set_owner_is_unique()
def test_set_owner_channel_is_unique(self):
self._test_set_owner_channel_is_unique()
def test_set_moderator_is_unique(self):
self._test_set_moderator_is_unique()
def test_set_admin_is_unique(self):
self._test_set_admin_is_unique()
def test_set_super_user_is_unique(self):
self._test_set_super_user_is_unique()
def test_remove_super_user_without_setting(self):
self._test_remove_super_user_without_setting()
def test_remove_owner_without_setting(self):
self._test_remove_owner_without_setting()
def test_remove_channel_owner_without_setting(self):
self._test_remove_channel_owner_without_setting()
def test_remove_admin_without_setting(self):
self._test_remove_admin_without_setting()
def test_remove_moderator_without_setting(self):
self._test_remove_moderator_without_setting()
def test_remove_other_role_channel(self):
self._test_remove_other_role_channel()
def test_remove_other_role_room(self):
self._test_remove_other_role_room()
def test_set_admin_no_such_channel(self):
self._test_set_admin_no_such_channel()
def test_remove_admin_no_such_channel(self):
self._test_remove_admin_no_such_room()
def test_remove_moderator_no_such_room(self):
self._test_remove_moderator_no_such_room()
def test_channel_name_exists(self):
self._test_channel_name_exists()
def test_channel_exists(self):
self._test_channel_exists()
def test_create_user(self):
self._test_create_user()
def test_users_in_room(self):
self._test_users_in_room()
def test_delete_acl_in_channel_for_action(self):
self._test_delete_acl_in_channel_for_action()
def test_delete_acl_in_room_for_action(self):
self._test_delete_acl_in_room_for_action()
def test_remove_owner_channel_no_channel(self):
self._test_remove_owner_channel_no_channel()
def test_remove_owner_channel_not_owner(self):
self._test_remove_owner_channel_not_owner()
def test_remove_owner_channel_is_owner(self):
self._test_remove_owner_channel_is_owner()
def test_create_user_exists(self):
self._test_create_user_exists()
def test_update_acl_in_room_for_action(self):
self._test_update_acl_in_room_for_action()
def test_update_acl_in_room_for_action_no_channel(self):
self._test_update_acl_in_room_for_action_no_channel()
def test_update_acl_in_room_for_action_no_room(self):
self._test_update_acl_in_room_for_action_no_room()
def test_update_acl_in_room_for_action_invalid_action(self):
self._test_update_acl_in_room_for_action_invalid_action()
def test_update_acl_in_room_for_action_invalid_type(self):
self._test_update_acl_in_room_for_action_invalid_type()
def test_update_acl_in_room_for_action_invalid_value(self):
self._test_update_acl_in_room_for_action_invalid_value()
def test_update_acl_in_channel_for_action(self):
self._test_update_acl_in_channel_for_action()
def test_update_acl_in_channel_for_action_no_channel(self):
self._test_update_acl_in_channel_for_action_no_channel()
def test_update_acl_in_channel_for_action_invalid_action(self):
self._test_update_acl_in_channel_for_action_invalid_action()
def test_update_acl_in_channel_for_action_invalid_type(self):
self._test_update_acl_in_channel_for_action_invalid_type()
def test_update_acl_in_channel_for_action_invalid_value(self):
self._test_update_acl_in_channel_for_action_invalid_value()
def test_is_banned_from_channel(self):
self._test_is_banned_from_channel()
def test_is_banned_from_room(self):
self._test_is_banned_from_room()
def test_is_banned_globally(self):
self._test_is_banned_globally()
def test_remove_global_ban(self):
self._test_remove_global_ban()
def test_remove_channel_ban(self):
self._test_remove_channel_ban()
def test_remove_room_ban(self):
self._test_remove_room_ban()
def test_was_banned_globally(self):
self._test_was_banned_globally()
def test_was_banned_from_room(self):
self._test_was_banned_from_room()
def test_was_banned_from_channel(self):
self._test_was_banned_from_channel()
def test_get_user_ban_status_channel(self):
self._test_get_user_ban_status_channel()
def test_get_user_ban_status_room(self):
self._test_get_user_ban_status_room()
def test_get_user_ban_status_global(self):
self._test_get_user_ban_status_global()
def test_get_banned_users_global_not_empty_after_ban(self):
self._test_get_banned_users_global_not_empty_after_ban()
def test_get_banned_users_global_is_empty(self):
self._test_get_banned_users_global_is_empty()
def test_get_banned_users_global_is_empty_if_expired(self):
self._test_get_banned_users_global_is_empty_if_expired()
def test_get_banned_users_channel_not_empty_after_ban(self):
self._test_get_banned_users_channel_not_empty_after_ban()
def test_get_banned_users_channel_is_empty(self):
self._test_get_banned_users_channel_is_empty()
def test_get_banned_users_channel_is_empty_if_expired(self):
self._test_get_banned_users_channel_is_empty_if_expired()
def test_get_banned_users_room_not_empty_after_ban(self):
self._test_get_banned_users_room_not_empty_after_ban()
def test_get_banned_users_room_is_empty(self):
self._test_get_banned_users_room_is_empty()
def test_get_banned_users_room_is_empty_if_expired(self):
self._test_get_banned_users_room_is_empty_if_expired()
def test_get_banned_users_is_empty(self):
self._test_get_banned_users_is_empty()
def test_get_banned_users_for_room(self):
self._test_get_banned_users_for_room()
def test_get_banned_users_for_channel(self):
self._test_get_banned_users_for_channel()
def test_get_banned_users_globally(self):
self._test_get_banned_users_globally()
def test_get_global_ban_timestamp_is_none(self):
self._test_get_global_ban_timestamp_is_none()
def test_get_global_ban_timestamp_not_none(self):
self._test_get_global_ban_timestamp_not_none()
def test_get_global_ban_timestamp_empty_if_expired(self):
self._test_get_global_ban_timestamp_not_empty_if_expired()
def test_get_channel_ban_timestamp_is_none(self):
self._test_get_channel_ban_timestamp_is_none()
def test_get_channel_ban_timestamp_not_none(self):
self._test_get_channel_ban_timestamp_not_none()
def test_get_channel_ban_timestamp_empty_if_expired(self):
self._test_get_channel_ban_timestamp_not_empty_if_expired()
def test_get_room_ban_timestamp_is_none(self):
self._test_get_room_ban_timestamp_is_none()
def test_get_room_ban_timestamp_not_none(self):
self._test_get_room_ban_timestamp_not_none()
def test_get_room_ban_timestamp_empty_if_expired(self):
self._test_get_room_ban_timestamp_not_empty_if_expired()
def test_get_acls_in_channel_for_action_no_channel(self):
self._test_get_acls_in_channel_for_action_no_channel()
def test_get_acls_in_channel_for_action_no_room(self):
self._test_get_acls_in_channel_for_action_no_room()
def test_get_all_acls_channel_is_empty(self):
self._test_get_all_acls_channel_is_empty()
def test_get_all_acls_channel_not_empty(self):
self._test_get_all_acls_channel_not_empty()
def test_get_all_acls_room_is_empty(self):
self._test_get_all_acls_room_is_empty()
def test_get_all_acls_room_not_empty(self):
self._test_get_all_acls_room_not_empty()
def test_channel_for_room_blank_room_id(self):
self._test_channel_for_room_blank_room_id()
def test_channel_for_room_before_create(self):
self._test_channel_for_room_before_create()
def test_channel_for_room_after_create(self):
self._test_channel_for_room_after_create()
def test_channel_for_room_cache(self):
self._test_channel_for_room_cache()
def test_get_username_before_set(self):
self._test_get_username_before_set()
def test_get_username_after_set(self):
self._test_get_username_after_set()
def test_rename_channel(self):
self._test_rename_channel()
def test_rename_channel_before_create(self):
self._test_rename_channel_before_create()
def test_rename_channel_empty_name(self):
self._test_rename_channel_empty_name()
def test_rename_room(self):
self._test_rename_room()
def test_rename_room_before_create_channel(self):
self._test_rename_room_before_create_channel()
def test_rename_room_before_create_room(self):
self._test_rename_room_before_create_room()
def test_rename_room_empty_name(self):
self._test_rename_room_empty_name()
def test_rename_room_already_exists(self):
self._test_rename_room_already_exists()
def test_remove_room(self):
self._test_remove_room()
def test_remove_room_before_create_channel(self):
self._test_remove_room_before_create_channel()
def test_remove_room_before_create_room(self):
self._test_remove_room_before_create_room()
def test_admin_room_for_channel_before_exists(self):
self._test_admin_room_before_exists()
def test_admin_room_for_channel_get_from_cache(self):
self._test_admin_room_get_from_cache()
def test_room_exists_from_cache(self):
self._test_room_exists_from_cache()
def test_get_user_status_from_cache(self):
self._test_get_user_status_from_cache()
def test_get_user_status_after_set(self):
self._test_get_user_status_after_set()
def test_set_user_invisible_twice_ignores_second(self):
self._test_set_user_invisible_twice_ignores_second()
def test_set_user_offline_twice_ignores_second(self):
self._test_set_user_offline_twice_ignores_second()
def test_set_user_online_twice_ignores_second(self):
self._test_set_user_online_twice_ignores_second()
def test_users_in_room_after_join(self):
self._test_users_in_room_after_join()
def test_set_user_offline_after_online(self):
self._test_set_user_offline_after_online()
def test_room_contains_before_create_channel(self):
self._test_room_contains_before_create_channel()
def test_room_contains_before_create_room(self):
self._test_room_contains_before_create_room()
def test_room_contains_after_create(self):
self._test_room_contains_after_create()
def test_room_contains_after_join(self):
self._test_room_contains_after_join()
def test_room_name_exists_from_cache_after_create(self):
self._test_room_name_exists_from_cache_after_create()
def test_rename_channel_exists(self):
self._test_rename_channel_exists()
def test_channel_for_room_from_cache(self):
self._test_channel_for_room_from_cache()
def test_leave_room_before_create(self):
self._test_leave_room_before_create()
def test_remove_moderator_twice(self):
self._test_remove_moderator_twice()
def test_set_owner_channel_after_removing_owner(self):
self._test_set_owner_channel_after_removing_owner()
def test_delete_acl_in_channel_for_action_invalid_action(self):
self._test_delete_acl_in_channel_for_action_invalid_action()
def test_delete_acl_in_room_for_action_invalid_action(self):
self._test_delete_acl_in_room_for_action_invalid_action()
def test_delete_acl_in_channel_for_action_after_create(self):
self._test_delete_acl_in_channel_for_action_after_create()
def test_delete_acl_in_room_for_action_after_create(self):
self._test_delete_acl_in_room_for_action_after_create()
def test_update_acl(self):
self._test_update_acl()
def test_get_all_acls_channel(self):
self._test_get_all_acls_channel()
def test_get_all_acls_channel_before_create(self):
self._test_get_all_acls_channel_before_create()
def test_get_all_acls_room(self):
self._test_get_all_acls_room()
def test_get_all_acls_room_before_create(self):
self._test_get_all_acls_room_before_create()
def test_update_last_read_for(self):
self._test_update_last_read_for()
def test_update_username(self):
self._test_update_username()
def test_get_room_name_from_cache(self):
self._test_get_room_name_from_cache()
def test_get_channel_name_from_cache(self):
self._test_get_channel_name_from_cache()
def test_is_banned_globally_after_clearing_cache(self):
self._test_is_banned_globally_after_clearing_cache()
def test_is_banned_globally_after_clearing_cache_if_expired(self):
self._test_is_banned_globally_after_clearing_cache_if_expired()
def test_is_banned_from_room_after_clearing_cache(self):
self._test_is_banned_from_room_after_clearing_cache()
def test_is_banned_from_room_after_clearing_cache_if_expired(self):
self._test_is_banned_from_room_after_clearing_cache_if_expired()
def test_is_banned_from_channel_after_clearing_cache(self):
self._test_is_banned_from_channel_after_clearing_cache()
def test_is_banned_from_channel_after_clearing_cache_if_expired(self):
self._test_is_banned_from_channel_after_clearing_cache_if_expired()
| 34.269171
| 119
| 0.776692
|
from datetime import datetime
from dino.config import UserKeys, RedisKeys, SessionKeys
from dino.db.rdbms.models import Channels
from dino.db.rdbms.models import Rooms
from test.base import BaseTest
from test.db import BaseDatabaseTest
class DatabaseSqliteTest(BaseDatabaseTest):
def setUp(self):
self.set_up_env('sqlite')
def tearDown(self):
from dino.db.rdbms.dbman import Database
from dino.db.rdbms.dbman import DeclarativeBase
db = Database(self.env)
con = db.engine.connect()
trans = con.begin()
for table in reversed(DeclarativeBase.metadata.sorted_tables):
con.execute(table.delete())
trans.commit()
con.close()
self.env.cache._flushall()
def test_get_user_infos(self):
self.db.set_user_info(BaseTest.USER_ID, {SessionKeys.gender.value: 'm', 'last_login': datetime.utcnow()})
self.db.set_user_info(BaseTest.OTHER_USER_ID, {SessionKeys.gender.value: 'w', 'last_login': datetime.utcnow()})
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.USER_ID))
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.OTHER_USER_ID))
infos = self.db.get_user_infos({BaseTest.USER_ID, BaseTest.OTHER_USER_ID})
self.assertEqual('m', infos[BaseTest.USER_ID][SessionKeys.gender.value])
self.assertEqual('w', infos[BaseTest.OTHER_USER_ID][SessionKeys.gender.value])
def test_set_two_owners_on_room(self):
self._test_set_two_owners_on_room()
def test_is_admin_before_create(self):
self._test_is_admin_before_create()
def test_is_admin_after_create(self):
self._test_is_admin_after_create()
def test_is_admin_after_create_set_admin(self):
self._test_is_admin_after_create_set_admin()
def test_channel_for_room_no_channel(self):
self._test_channel_for_room_no_channel()
def test_channel_for_room_with_channel_without_room(self):
self._test_channel_for_room_with_channel_without_room()
def test_channel_for_room_with_channel_with_room(self):
self._test_channel_for_room_with_channel_with_room()
def test_leave_room_not_joined(self):
self._test_leave_room_not_joined()
def test_leave_room_joined(self):
self._test_leave_room_joined()
def test_set_moderator_no_room(self):
self._test_set_moderator_no_room()
def test_set_moderator_with_room(self):
self._test_set_moderator_with_room()
def test_set_room_owner_no_room(self):
self._test_set_room_owner_no_room()
def test_set_room_owner_with_room(self):
self._test_set_room_owner_with_room()
def test_set_channel_owner_no_channel(self):
self._test_set_channel_owner_no_channel()
def test_set_channel_owner_with_channel(self):
self._test_set_channel_owner_with_channel()
def test_get_user_status_before_set(self):
self._test_get_user_status_before_set(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_offline(self):
self._test_set_user_offline(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_online(self):
self._test_set_user_online(UserKeys.STATUS_AVAILABLE)
def test_set_user_invisible(self):
self._test_set_user_invisible(UserKeys.STATUS_INVISIBLE)
def test_remove_current_rooms_for_user_before_joining(self):
self._test_remove_current_rooms_for_user_before_joining()
def test_remove_current_rooms_for_user_after_joining(self):
self._test_remove_current_rooms_for_user_after_joining()
def test_rooms_for_user_before_joining(self):
self._test_rooms_for_user_before_joining()
def test_create_existing_room_name(self):
self._test_create_existing_room_name()
def test_rooms_for_user_after_joining(self):
self._test_rooms_for_user_after_joining()
def test_rooms_for_channel_before_create_channel(self):
self._test_rooms_for_channel_before_create_channel()
def test_rooms_for_channel_after_create_channel_before_create_room(self):
self._test_rooms_for_channel_after_create_channel_before_create_room()
def test_rooms_for_channel_after_create_channel_after_create_room(self):
self._test_rooms_for_channel_after_create_channel_after_create_room()
def test_get_channels_before_create(self):
self._test_get_channels_before_create()
def test_get_channels_after_create(self):
self._test_get_channels_after_create()
def test_room_exists(self):
self._test_room_exists()
def test_create_room_no_channel(self):
self._test_create_room_no_channel()
def test_create_existing_channel(self):
self._test_create_existing_channel()
def test_create_channel(self):
self._test_create_channel()
def test_create_channel_again_to_make_sure_tables_cleared_after_each_test(self):
self._test_create_channel()
channels = self.db._session().query(Channels).filter(Channels.uuid == BaseDatabaseTest.CHANNEL_ID).all()
self.assertEqual(1, len(channels))
def test_create_channel_blank_name(self):
self._test_create_channel_blank_name()
def test_create_channel_exists(self):
self._test_create_channel_exists()
def test_create_room(self):
self._test_create_room()
rooms = self.db._session().query(Rooms).filter(Rooms.uuid == BaseDatabaseTest.ROOM_ID).all()
self.assertEqual(1, len(rooms))
def test_create_room_blank_name(self):
self._test_create_room_blank_name()
def test_create_existing_room(self):
self._test_create_existing_room()
def test_channel_exists_after_create(self):
self._test_channel_exists_after_create()
def test_channel_exists_before_create(self):
self._test_channel_exists_before_create()
def test_room_name_exists_before_create(self):
self._test_room_name_exists_before_create()
def test_room_name_exists_after_create(self):
self._test_room_name_exists_after_create()
def test_delete_one_non_existing_acl(self):
self._test_delete_one_non_existing_acl()
def test_add_one_extra_acl(self):
self._test_add_one_extra_acl()
def test_get_acl(self):
self._test_get_acl()
def test_set_acl(self):
self._test_set_acl()
def test_delete_one_acl(self):
self._test_delete_one_acl()
def test_set_room_allows_cross_group_messaging(self):
self._test_set_room_allows_cross_group_messaging()
def test_get_room_allows_cross_group_messaging_no_room(self):
self._test_get_room_allows_cross_group_messaging_no_room()
def test_get_room_allows_cross_group_messaging(self):
self._test_get_room_allows_cross_group_messaging()
def test_get_room_does_not_allow_cross_group_messaging(self):
self._test_get_room_does_not_allow_cross_group_messaging()
def test_room_allows_cross_group_messaging_no_room(self):
self._test_room_allows_cross_group_messaging_no_room()
def test_room_allows_cross_group_messaging(self):
self._test_room_allows_cross_group_messaging()
def test_room_does_not_allow_cross_group_messaging_no_room(self):
self._test_room_does_not_allow_cross_group_messaging_no_room()
def test_create_admin_room(self):
self._test_create_admin_room()
def test_is_super_user(self):
self._test_is_super_user()
def test_get_admin_room(self):
self._test_get_admin_room()
def test_set_owner_and_moderator(self):
self._test_set_owner_and_moderator()
def test_remove_channel_role(self):
self._test_remove_channel_role()
def test_remove_room_role(self):
self._test_remove_room_role()
def test_remove_super_user(self):
self._test_remove_super_user()
def test_get_super_users(self):
self._test_get_super_users()
def test_remove_owner(self):
self._test_remove_owner()
def test_remove_channel_owner(self):
self._test_remove_channel_owner()
def test_remove_admin(self):
self._test_remove_admin()
def test_remove_moderator(self):
self._test_remove_moderator()
def test_set_owner_is_unique(self):
self._test_set_owner_is_unique()
def test_set_owner_channel_is_unique(self):
self._test_set_owner_channel_is_unique()
def test_set_moderator_is_unique(self):
self._test_set_moderator_is_unique()
def test_set_admin_is_unique(self):
self._test_set_admin_is_unique()
def test_set_super_user_is_unique(self):
self._test_set_super_user_is_unique()
def test_remove_super_user_without_setting(self):
self._test_remove_super_user_without_setting()
def test_remove_owner_without_setting(self):
self._test_remove_owner_without_setting()
def test_remove_channel_owner_without_setting(self):
self._test_remove_channel_owner_without_setting()
def test_remove_admin_without_setting(self):
self._test_remove_admin_without_setting()
def test_remove_moderator_without_setting(self):
self._test_remove_moderator_without_setting()
def test_remove_other_role_channel(self):
self._test_remove_other_role_channel()
def test_remove_other_role_room(self):
self._test_remove_other_role_room()
def test_set_admin_no_such_channel(self):
self._test_set_admin_no_such_channel()
def test_remove_admin_no_such_channel(self):
self._test_remove_admin_no_such_room()
def test_remove_moderator_no_such_room(self):
self._test_remove_moderator_no_such_room()
def test_channel_name_exists(self):
self._test_channel_name_exists()
def test_channel_exists(self):
self._test_channel_exists()
def test_create_user(self):
self._test_create_user()
def test_users_in_room(self):
self._test_users_in_room()
def test_delete_acl_in_channel_for_action(self):
self._test_delete_acl_in_channel_for_action()
def test_delete_acl_in_room_for_action(self):
self._test_delete_acl_in_room_for_action()
def test_remove_owner_channel_no_channel(self):
self._test_remove_owner_channel_no_channel()
def test_remove_owner_channel_not_owner(self):
self._test_remove_owner_channel_not_owner()
def test_remove_owner_channel_is_owner(self):
self._test_remove_owner_channel_is_owner()
def test_create_user_exists(self):
self._test_create_user_exists()
def test_update_acl_in_room_for_action(self):
self._test_update_acl_in_room_for_action()
def test_update_acl_in_room_for_action_no_channel(self):
self._test_update_acl_in_room_for_action_no_channel()
def test_update_acl_in_room_for_action_no_room(self):
self._test_update_acl_in_room_for_action_no_room()
def test_update_acl_in_room_for_action_invalid_action(self):
self._test_update_acl_in_room_for_action_invalid_action()
def test_update_acl_in_room_for_action_invalid_type(self):
self._test_update_acl_in_room_for_action_invalid_type()
def test_update_acl_in_room_for_action_invalid_value(self):
self._test_update_acl_in_room_for_action_invalid_value()
def test_update_acl_in_channel_for_action(self):
self._test_update_acl_in_channel_for_action()
def test_update_acl_in_channel_for_action_no_channel(self):
self._test_update_acl_in_channel_for_action_no_channel()
def test_update_acl_in_channel_for_action_invalid_action(self):
self._test_update_acl_in_channel_for_action_invalid_action()
def test_update_acl_in_channel_for_action_invalid_type(self):
self._test_update_acl_in_channel_for_action_invalid_type()
def test_update_acl_in_channel_for_action_invalid_value(self):
self._test_update_acl_in_channel_for_action_invalid_value()
def test_is_banned_from_channel(self):
self._test_is_banned_from_channel()
def test_is_banned_from_room(self):
self._test_is_banned_from_room()
def test_is_banned_globally(self):
self._test_is_banned_globally()
def test_remove_global_ban(self):
self._test_remove_global_ban()
def test_remove_channel_ban(self):
self._test_remove_channel_ban()
def test_remove_room_ban(self):
self._test_remove_room_ban()
def test_was_banned_globally(self):
self._test_was_banned_globally()
def test_was_banned_from_room(self):
self._test_was_banned_from_room()
def test_was_banned_from_channel(self):
self._test_was_banned_from_channel()
def test_get_user_ban_status_channel(self):
self._test_get_user_ban_status_channel()
def test_get_user_ban_status_room(self):
self._test_get_user_ban_status_room()
def test_get_user_ban_status_global(self):
self._test_get_user_ban_status_global()
def test_get_banned_users_global_not_empty_after_ban(self):
self._test_get_banned_users_global_not_empty_after_ban()
def test_get_banned_users_global_is_empty(self):
self._test_get_banned_users_global_is_empty()
def test_get_banned_users_global_is_empty_if_expired(self):
self._test_get_banned_users_global_is_empty_if_expired()
def test_get_banned_users_channel_not_empty_after_ban(self):
self._test_get_banned_users_channel_not_empty_after_ban()
def test_get_banned_users_channel_is_empty(self):
self._test_get_banned_users_channel_is_empty()
def test_get_banned_users_channel_is_empty_if_expired(self):
self._test_get_banned_users_channel_is_empty_if_expired()
def test_get_banned_users_room_not_empty_after_ban(self):
self._test_get_banned_users_room_not_empty_after_ban()
def test_get_banned_users_room_is_empty(self):
self._test_get_banned_users_room_is_empty()
def test_get_banned_users_room_is_empty_if_expired(self):
self._test_get_banned_users_room_is_empty_if_expired()
def test_get_banned_users_is_empty(self):
self._test_get_banned_users_is_empty()
def test_get_banned_users_for_room(self):
self._test_get_banned_users_for_room()
def test_get_banned_users_for_channel(self):
self._test_get_banned_users_for_channel()
def test_get_banned_users_globally(self):
self._test_get_banned_users_globally()
def test_get_global_ban_timestamp_is_none(self):
self._test_get_global_ban_timestamp_is_none()
def test_get_global_ban_timestamp_not_none(self):
self._test_get_global_ban_timestamp_not_none()
def test_get_global_ban_timestamp_empty_if_expired(self):
self._test_get_global_ban_timestamp_not_empty_if_expired()
def test_get_channel_ban_timestamp_is_none(self):
self._test_get_channel_ban_timestamp_is_none()
def test_get_channel_ban_timestamp_not_none(self):
self._test_get_channel_ban_timestamp_not_none()
def test_get_channel_ban_timestamp_empty_if_expired(self):
self._test_get_channel_ban_timestamp_not_empty_if_expired()
def test_get_room_ban_timestamp_is_none(self):
self._test_get_room_ban_timestamp_is_none()
def test_get_room_ban_timestamp_not_none(self):
self._test_get_room_ban_timestamp_not_none()
def test_get_room_ban_timestamp_empty_if_expired(self):
self._test_get_room_ban_timestamp_not_empty_if_expired()
def test_get_acls_in_channel_for_action_no_channel(self):
self._test_get_acls_in_channel_for_action_no_channel()
def test_get_acls_in_channel_for_action_no_room(self):
self._test_get_acls_in_channel_for_action_no_room()
def test_get_all_acls_channel_is_empty(self):
self._test_get_all_acls_channel_is_empty()
def test_get_all_acls_channel_not_empty(self):
self._test_get_all_acls_channel_not_empty()
def test_get_all_acls_room_is_empty(self):
self._test_get_all_acls_room_is_empty()
def test_get_all_acls_room_not_empty(self):
self._test_get_all_acls_room_not_empty()
def test_channel_for_room_blank_room_id(self):
self._test_channel_for_room_blank_room_id()
def test_channel_for_room_before_create(self):
self._test_channel_for_room_before_create()
def test_channel_for_room_after_create(self):
self._test_channel_for_room_after_create()
def test_channel_for_room_cache(self):
self._test_channel_for_room_cache()
def test_get_username_before_set(self):
self._test_get_username_before_set()
def test_get_username_after_set(self):
self._test_get_username_after_set()
def test_rename_channel(self):
self._test_rename_channel()
def test_rename_channel_before_create(self):
self._test_rename_channel_before_create()
def test_rename_channel_empty_name(self):
self._test_rename_channel_empty_name()
def test_rename_room(self):
self._test_rename_room()
def test_rename_room_before_create_channel(self):
self._test_rename_room_before_create_channel()
def test_rename_room_before_create_room(self):
self._test_rename_room_before_create_room()
def test_rename_room_empty_name(self):
self._test_rename_room_empty_name()
def test_rename_room_already_exists(self):
self._test_rename_room_already_exists()
def test_remove_room(self):
self._test_remove_room()
def test_remove_room_before_create_channel(self):
self._test_remove_room_before_create_channel()
def test_remove_room_before_create_room(self):
self._test_remove_room_before_create_room()
def test_admin_room_for_channel_before_exists(self):
self._test_admin_room_before_exists()
def test_admin_room_for_channel_get_from_cache(self):
self._test_admin_room_get_from_cache()
def test_room_exists_from_cache(self):
self._test_room_exists_from_cache()
def test_get_user_status_from_cache(self):
self._test_get_user_status_from_cache()
def test_get_user_status_after_set(self):
self._test_get_user_status_after_set()
def test_set_user_invisible_twice_ignores_second(self):
self._test_set_user_invisible_twice_ignores_second()
def test_set_user_offline_twice_ignores_second(self):
self._test_set_user_offline_twice_ignores_second()
def test_set_user_online_twice_ignores_second(self):
self._test_set_user_online_twice_ignores_second()
def test_users_in_room_after_join(self):
self._test_users_in_room_after_join()
def test_set_user_offline_after_online(self):
self._test_set_user_offline_after_online()
def test_room_contains_before_create_channel(self):
self._test_room_contains_before_create_channel()
def test_room_contains_before_create_room(self):
self._test_room_contains_before_create_room()
def test_room_contains_after_create(self):
self._test_room_contains_after_create()
def test_room_contains_after_join(self):
self._test_room_contains_after_join()
def test_room_name_exists_from_cache_after_create(self):
self._test_room_name_exists_from_cache_after_create()
def test_rename_channel_exists(self):
self._test_rename_channel_exists()
def test_channel_for_room_from_cache(self):
self._test_channel_for_room_from_cache()
def test_leave_room_before_create(self):
self._test_leave_room_before_create()
def test_remove_moderator_twice(self):
self._test_remove_moderator_twice()
def test_set_owner_channel_after_removing_owner(self):
self._test_set_owner_channel_after_removing_owner()
def test_delete_acl_in_channel_for_action_invalid_action(self):
self._test_delete_acl_in_channel_for_action_invalid_action()
def test_delete_acl_in_room_for_action_invalid_action(self):
self._test_delete_acl_in_room_for_action_invalid_action()
def test_delete_acl_in_channel_for_action_after_create(self):
self._test_delete_acl_in_channel_for_action_after_create()
def test_delete_acl_in_room_for_action_after_create(self):
self._test_delete_acl_in_room_for_action_after_create()
def test_update_acl(self):
self._test_update_acl()
def test_get_all_acls_channel(self):
self._test_get_all_acls_channel()
def test_get_all_acls_channel_before_create(self):
self._test_get_all_acls_channel_before_create()
def test_get_all_acls_room(self):
self._test_get_all_acls_room()
def test_get_all_acls_room_before_create(self):
self._test_get_all_acls_room_before_create()
def test_update_last_read_for(self):
self._test_update_last_read_for()
def test_update_username(self):
self._test_update_username()
def test_get_room_name_from_cache(self):
self._test_get_room_name_from_cache()
def test_get_channel_name_from_cache(self):
self._test_get_channel_name_from_cache()
def test_is_banned_globally_after_clearing_cache(self):
self._test_is_banned_globally_after_clearing_cache()
def test_is_banned_globally_after_clearing_cache_if_expired(self):
self._test_is_banned_globally_after_clearing_cache_if_expired()
def test_is_banned_from_room_after_clearing_cache(self):
self._test_is_banned_from_room_after_clearing_cache()
def test_is_banned_from_room_after_clearing_cache_if_expired(self):
self._test_is_banned_from_room_after_clearing_cache_if_expired()
def test_is_banned_from_channel_after_clearing_cache(self):
self._test_is_banned_from_channel_after_clearing_cache()
def test_is_banned_from_channel_after_clearing_cache_if_expired(self):
self._test_is_banned_from_channel_after_clearing_cache_if_expired()
| true
| true
|
f7037282b9d4bec22d75254382e9b4bf3d7a655e
| 5,332
|
py
|
Python
|
Add/add_heuristic_engine.py
|
Jesmine0902/TSP_CPLEX_2
|
8853d6837bd5408b8925eb5f45e21c79945a5904
|
[
"MIT"
] | 1
|
2020-08-14T03:25:09.000Z
|
2020-08-14T03:25:09.000Z
|
Add/add_heuristic_engine.py
|
Jesmine0902/TSP_CPLEX_2
|
8853d6837bd5408b8925eb5f45e21c79945a5904
|
[
"MIT"
] | null | null | null |
Add/add_heuristic_engine.py
|
Jesmine0902/TSP_CPLEX_2
|
8853d6837bd5408b8925eb5f45e21c79945a5904
|
[
"MIT"
] | 1
|
2020-08-14T03:24:29.000Z
|
2020-08-14T03:24:29.000Z
|
import pandas as pd
__author__ = 'slei'
class AddHeuristicTSP:
""" Finds the shortest path using a heuristic method """
def __init__(self, cities_df):
self.df = cities_df
self.edges = list((t.origin, t.destination) for t in df.itertuples())
self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()])
self.cities = list(set(df['destination']))
self.cities_lst = []
self.tour_lst = []
self.distance_lst = []
self.tour_leg_distances_lst = []
self._final_df = None
self._shortest_distance = None
self._shortest_tour = None
def find_subtour(self, starting_city):
""" Given a starting city, finds a tour by selecting next shortest distance from list of unvisited cities """
tour = []
tour_distance_lst = [0]
cities_unvisited = list(set(self.df['destination']))
initial_city = starting_city
current_city = initial_city
tour.append(current_city)
cities_unvisited.pop(0)
total_distance = 0
count = 0
while len(cities_unvisited) > 0:
# remove any city that has already been visited from consideration
df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)]
# filter for rows based on first criterion
is_current = df_unvisited['origin'] == current_city
df2 = df_unvisited[is_current]
# find the nearest city
index_min = df2['distance'].idxmin()
min_row = df2.loc[index_min]
d = min_row.distance
destination = min_row.destination
# update next city and tour and total distance
current_city = destination
total_distance = total_distance + d
tour_distance_lst.append(d)
# update city tracker lists
tour.append(current_city)
index_i = cities_unvisited.index(current_city)
cities_unvisited.pop(index_i)
count = count + 1
# check
print("next destination: ", destination)
print("distance: ", d)
print("total_distance: ", total_distance)
print("tour: ", tour)
print("tour_distance_lst: ", tour_distance_lst)
print("cities_unvisited: ", cities_unvisited)
print()
# adding the distance from last city back to initial city
last_city = tour[-1]
last_mile = (initial_city, last_city)
last_mile_distance = self.distance[last_mile]
tour.append(initial_city)
total_distance = total_distance + last_mile_distance
tour_distance_lst.append(last_mile_distance)
# check
print("last_mile: ", last_mile)
print("last_mile_distance: ", last_mile_distance)
print("tour: ", tour)
print("total_distance: ", total_distance)
print("tour_leg_distances_lst: ", tour_distance_lst)
# update lists
self.tour_lst.append(tour)
self.distance_lst.append(total_distance)
self.tour_leg_distances_lst.append(tour_distance_lst)
@property
def final_df(self):
""" Add description here"""
if self._final_df is None:
self._final_df = self._generate_final_df()
return self._final_df
def _generate_final_df(self):
for c in self.cities: # for every city in the dataset
print("city: ", c) # generate a tour for each
print("--------------------------------------------------------------------------------")
self.find_subtour(c)
print('********************************************************************************')
print()
soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst,
'distance': self.distance_lst}
return pd.DataFrame(soln_dict)
@property
def shortest_distance(self):
""" Add description here"""
if self._shortest_distance is None:
return self._calculate_shortest_distance()
def _calculate_shortest_distance(self): # find the tour with the lowest distance
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.distance
@property
def shortest_tour(self):
""" Add description here"""
if self._shortest_tour is None:
return self._generate_shortest_tour()
def _generate_shortest_tour(self):
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.tour
# ********************************************************************************
# ********************************************************************************
if __name__ == '__main__':
df = pd.read_csv('city_data_add.csv')
tsp = AddHeuristicTSP(df)
tsp.final_df
print("final_df")
print(tsp.final_df)
print()
print("shortest_distance_final", tsp.shortest_distance)
print("shortest_tour_final", tsp.shortest_tour)
| 37.027778
| 117
| 0.589835
|
import pandas as pd
__author__ = 'slei'
class AddHeuristicTSP:
def __init__(self, cities_df):
self.df = cities_df
self.edges = list((t.origin, t.destination) for t in df.itertuples())
self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()])
self.cities = list(set(df['destination']))
self.cities_lst = []
self.tour_lst = []
self.distance_lst = []
self.tour_leg_distances_lst = []
self._final_df = None
self._shortest_distance = None
self._shortest_tour = None
def find_subtour(self, starting_city):
tour = []
tour_distance_lst = [0]
cities_unvisited = list(set(self.df['destination']))
initial_city = starting_city
current_city = initial_city
tour.append(current_city)
cities_unvisited.pop(0)
total_distance = 0
count = 0
while len(cities_unvisited) > 0:
df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)]
is_current = df_unvisited['origin'] == current_city
df2 = df_unvisited[is_current]
index_min = df2['distance'].idxmin()
min_row = df2.loc[index_min]
d = min_row.distance
destination = min_row.destination
current_city = destination
total_distance = total_distance + d
tour_distance_lst.append(d)
tour.append(current_city)
index_i = cities_unvisited.index(current_city)
cities_unvisited.pop(index_i)
count = count + 1
print("next destination: ", destination)
print("distance: ", d)
print("total_distance: ", total_distance)
print("tour: ", tour)
print("tour_distance_lst: ", tour_distance_lst)
print("cities_unvisited: ", cities_unvisited)
print()
last_city = tour[-1]
last_mile = (initial_city, last_city)
last_mile_distance = self.distance[last_mile]
tour.append(initial_city)
total_distance = total_distance + last_mile_distance
tour_distance_lst.append(last_mile_distance)
print("last_mile: ", last_mile)
print("last_mile_distance: ", last_mile_distance)
print("tour: ", tour)
print("total_distance: ", total_distance)
print("tour_leg_distances_lst: ", tour_distance_lst)
self.tour_lst.append(tour)
self.distance_lst.append(total_distance)
self.tour_leg_distances_lst.append(tour_distance_lst)
@property
def final_df(self):
if self._final_df is None:
self._final_df = self._generate_final_df()
return self._final_df
def _generate_final_df(self):
for c in self.cities: print("city: ", c) print("--------------------------------------------------------------------------------")
self.find_subtour(c)
print('********************************************************************************')
print()
soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst,
'distance': self.distance_lst}
return pd.DataFrame(soln_dict)
@property
def shortest_distance(self):
if self._shortest_distance is None:
return self._calculate_shortest_distance()
def _calculate_shortest_distance(self): index_min_final = self.final_df['distance'].idxmin() min_row_final = self.final_df.loc[index_min_final]
return min_row_final.distance
@property
def shortest_tour(self):
if self._shortest_tour is None:
return self._generate_shortest_tour()
def _generate_shortest_tour(self):
index_min_final = self.final_df['distance'].idxmin() min_row_final = self.final_df.loc[index_min_final]
return min_row_final.tour
if __name__ == '__main__':
df = pd.read_csv('city_data_add.csv')
tsp = AddHeuristicTSP(df)
tsp.final_df
print("final_df")
print(tsp.final_df)
print()
print("shortest_distance_final", tsp.shortest_distance)
print("shortest_tour_final", tsp.shortest_tour)
| true
| true
|
f70372ff51a0c2ba1144263ef3314dfe0f59ebb1
| 28,309
|
py
|
Python
|
ots_eval/stability_evaluation/close.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 3
|
2021-03-28T14:46:57.000Z
|
2022-01-03T17:25:19.000Z
|
ots_eval/stability_evaluation/close.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | null | null | null |
ots_eval/stability_evaluation/close.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T10:56:14.000Z
|
2022-01-11T10:56:14.000Z
|
import numpy as np
from scipy.spatial.distance import euclidean
from typing import Union
import pandas
class CLOSE(object):
def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False,
jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False):
"""
Params:
data (pandas.DataFrame) - pandas dataframe with columns order 'object_id', 'time', 'cluster_id' containing cluster belongings,
features ..
Note: outliers should have negative labels/cluster_ids, these should be different for different times
Optional:
measure (str or callable) - for used quality measure, possible measures:
'sse', 'mse', 'mae', 'max', 'dbi', 'exploit'
minPts (int) - used minPts for density-based quality measure
output (boolean) - whether intermediate results should be printed
jaccard (boolean) - whether the jaccard index should be used for proportion
weighting (boolean) - whether the weighting function should be used for subsequence_score
exploitation_term (boolean) - whether the exploitation term should be included in CLOSE calculation
"""
self._data = data
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._jaccard = jaccard
self._weighting = weighting
self._exp_term = exploitation_term
self._minPts = minPts
self._output = output
self.pos_measures = {### Measures for Clusters
'sse': self.calc_sse, # NOTE: sse is not between 0 and 1
'mse': self.calc_mse, # NOTE: mse is only between 0 and 1, if data is normalized
'mae': self.calc_mae, # NOTE: mae is only between 0 and 1, if data is normalized
'max': self.calc_max_dist,
'dbi': self.calc_min_pts,
'None': self.return_zero,
### Measures for Time Clusterings
'exploit': self.calc_exploit_at_t}
if measure in self.pos_measures:
self.measure = self.pos_measures[measure]
elif callable(measure):
self.measure = measure
else:
self.measure = self.pos_measures['mse']
def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
return_measures (boolean) - whether additional information such as average stability
and quality should be returned
Returns:
CLOSE score (float): rating of clustering regarding all clusters
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
gr_clusters = self._data.groupby(self._cluster_column_name)
score = 0
avg_quality = 0
avg_stab = 0
for cluster in cluster_ratings:
cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique()
cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0]
feature_list = self.get_feature_list(cluster_objects, cluster_time)
measure = self.measure(feature_list)
avg_quality += measure
avg_stab += cluster_ratings[cluster]
score += (cluster_ratings[cluster] * (1 - measure))
num_clusters = len(cluster_ratings)
num_timestamps = self.get_num_timestamps(start_time, end_time)
if num_clusters <= 0:
if self._output:
print('Clustering has no Clusters!!')
return 0
avg_quality /= num_clusters
if self._output:
print('Average Quality: ', str(avg_quality))
avg_stab /= num_clusters
if self._output:
print('Average Stability: ', str(avg_stab))
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': avg_stab,
'quality': avg_quality,
'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)}
def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (optional) - int: time that should be considered as beginning
end_time (optional) - int: time which should be rated up to
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Returns:
CLOSE score (float) - rating of clustering regarding all time clusterings
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True)
score = 0
if return_measures:
quality = 0
stability = 0
for time in timestamps:
if not return_measures:
score += self.calc_t_clustering_rating(cluster_ratings, time)
else:
cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True)
score += cur_scores['score']
quality += cur_scores['quality']
stability += cur_scores['stability']
if return_measures:
quality /= num_timestamps
stability /= num_timestamps
num_clusters = len(cluster_ratings)
if num_clusters <= 0:
if self._output:
print('Over-Time Clustering has no Clusters!!')
return 0
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': stability,
'quality': quality,
'pre-factor': factor}
def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]:
"""
Params:
cluster_ratings (dict) - {<object_id>: <rating>} with ratings of objects
time (int) - time that should be considered
Optional:
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Output:
CLOSE score (float) - rating of clustering at considered time
(dict): with key 'score', 'stability', 'quality' with additional information if 'return_measures' is True
"""
avg_stab = 0
clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique()
clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0))
for cluster in clusters_at_time:
try:
avg_stab += cluster_ratings[cluster]
except:
continue
num_clusters = len(clusters_at_time)
if num_clusters <= 0:
if self._output:
print('Time Clustering at Time ', str(time), ' has no Clusters!!')
return 0
avg_stab /= num_clusters
if self._output:
print('Average Stability at Time ', str(time), ' : ', str(avg_stab))
quality = self.measure(time)
if self._output:
print('Quality of Clustering at Time ' , str(time), ' : ', str(quality))
t_clustering_score = avg_stab * quality
if not return_measures:
return t_clustering_score
else:
return {
'score': t_clustering_score,
'stability': avg_stab,
'quality': quality
}
def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
id (int, str, list or None) - representing the cluster_ids that should be rated. If id is None,
all objects are rated
Returns:
ratings (dict) - {<cluster_id>: <rating>} with ratings of clusters
"""
ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time)
ids = ids_to_rate[:]
# don't rate outliers
for i in ids_to_rate:
if int(i) < 0:
ids.remove(i)
ratings = self.calc_cluster_rating(ids, start_time)
return ratings
def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict:
"""
Params:
ids_to_rate (array-like) - list of clusters that should be rated
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<cluster_id>: <rating>} with ratings of clusters
"""
if start_time is None:
start_time = np.min(self._data[self._time_column_name].unique())
ratings = {}
cluster_compositions = self.obtain_cluster_compositions()
gr_clusters = self._data.groupby(self._cluster_column_name)
# iterate over all cluster ids
for id in ids_to_rate:
time = gr_clusters.get_group(id)[self._time_column_name].iloc[0]
# rate the clusters of all timestamps except of the first one
if time != start_time:
num_merged_clusters = len(cluster_compositions[id])
obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist()
obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time)
score = 0
for obj in obj_ratings:
score += obj_ratings[obj]
try:
score /= len(obj_ratings)
except ZeroDivisionError:
if self._output:
print('Cluster ', str(id), ' has no non-outlier members.')
else:
continue
clusters = list(cluster_compositions[id].keys())
num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)]
[self._time_column_name].unique())
try:
div = num_merged_clusters / num_timestamps
score /= div
except ZeroDivisionError:
if self._output:
print("<<ZeroDivisionError - Cluster Score>> Cluster ID: ", str(id), " Merged Clusters: ", str(num_merged_clusters),
" Num Timestamps: ", str(num_timestamps))
else:
continue
ratings[id] = score
# clusters of the first timestamp have a stability of 1.0
else:
ratings[id] = 1.0
return ratings
def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:
"""
Optional:
id (int, str, list or None) - representing the data points that should be rated. If id is None,
all objects are rated
start_time (int) - time that should be considered as beginning
end_time (int) - representing the timestamp which should be rated up to
Returns:
ratings (dict) - {<object_id>: <rating>} with ratings of objects
"""
ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)
if end_time is None:
end_time = np.max(self._data[self._time_column_name].unique())
cluster_compositions = self.obtain_cluster_compositions()
ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time)
return ratings
def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict:
"""
Params:
cluster_composition (dict) - {<cluster_id>: {<contained_cluster_id>: <proportion>}} containing the proportions of
clusters (contained_cluster_id) that belong to cluster (cluster_id)
ids_to_rate (array-like) - list of data points that should be rated
end_time (int) - representing the timestamp which should be rated up to
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<object_id>: <rating>} with ratings of objects
"""
ratings = {}
gr_clusters = self._data.groupby(self._object_column_name)
# iterate over object ids
for id in ids_to_rate:
cur_group = gr_clusters.get_group(id)
cur_group = cur_group[cur_group[self._time_column_name] <= end_time]
if start_time is not None:
cur_group = cur_group[cur_group[self._time_column_name] >= start_time]
try:
# id of the cluster of the last considered timestamp
last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[
0]
except IndexError:
print(">>INDEXERROR - LAST CLUSTER<< ID: ", str(id), ", Start Time: ", str(start_time), ", End Time: ",
str(end_time))
continue
# if object is an outlier for the considered timestamp, it is skipped
if int(last_cluster) < 0:
continue
cluster_ids = cur_group[self._cluster_column_name].unique()
object_ratings = []
num_clusters = 0
has_outlier = False
for cluster in cluster_ids:
if cluster == last_cluster:
continue
# Add the proportion of clusters before last timestamp, that merged in last cluster
else:
# outliers get worst rating of 0.0
if int(cluster) < 0:
object_ratings.append(0.0)
has_outlier = True
else:
object_ratings.append(cluster_composition[last_cluster][cluster])
num_clusters += 1
if not has_outlier and len(object_ratings) == 0:
# print(str(id) + " has no data before t=" + str(end_time))
continue
if self._weighting:
try:
weighting_denominator = 0
for i in range(1, num_clusters + 1):
weighting_denominator += i
if num_clusters > 0:
object_rating = 0
for i in range(num_clusters):
object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)
else:
continue
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
else:
try:
object_rating = np.sum(object_ratings)
object_rating /= num_clusters
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
ratings[id] = round(object_rating, 3)
return ratings
def calc_exploit(self) -> float:
"""
Returns:
exploitation_term (float) - exploitation term for whole clustering
"""
num_objects = len(self._data[self._object_column_name].unique())
num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique())
return num_no_outliers / num_objects
######## HELPER FUNCTIONS ########
def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray:
"""
Params:
objects (array-like) - list of objects_ids that belong to considered cluster
time (int) - time of cluster that is considered
Output:
feature_list (list) - list of lists containing the features of objects in the considered cluster
"""
feature_list = []
for obj in objects:
features = self._data[
(self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]
try:
features = \
features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],
axis=1).iloc[0].tolist()
except IndexError:
print(">>INDEXERROR - FEATURE LIST<< ID: ", str(obj), ", Time: ", str(time))
continue
if len(features) <= 0:
print("No features found for object ", str(obj))
continue
feature_list.append(features)
return np.array(feature_list)
def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int:
"""
Params:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Optional:
return_timestamps (boolean) - list of all timestamps
Returns:
num_timestamps (int) - number of timestamps between start_time and end_time
"""
timestamp_list = self._data[self._time_column_name].unique()
if start_time is not None:
timestamp_list = [i for i in timestamp_list if i >= start_time]
if end_time is not None:
timestamp_list = [i for i in timestamp_list if i <= end_time]
num_timestamps = len(timestamp_list)
if not return_timestamps:
return num_timestamps
else:
return num_timestamps, timestamp_list
def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:
"""
Params:
id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated
id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract
Optional:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Returns:
ids_to_rate (list) - list of ids that should be rated
"""
if id is None:
data = self._data.copy()
if start_time is not None:
data = data[data[self._time_column_name] >= start_time]
if end_time is not None:
data = data[data[self._time_column_name] <= end_time]
ids_to_rate = data[id_name].unique().tolist()
elif isinstance(id, int) or isinstance(id, str):
ids_to_rate = [id]
elif isinstance(id, list):
ids_to_rate = id[:]
else:
raise Exception('id has to be int, str, list or None')
return ids_to_rate
def obtain_cluster_compositions(self) -> dict:
"""
Returns:
cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster compositions
Example:
{5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that
100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5
"""
cluster_compositions = {}
g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])
if not self._jaccard:
cluster_members = self._data.groupby(self._cluster_column_name).count()
# iterate over all clusters - 'group' contains the time and cluster_id
# and 'objects' is the corresponding dataframe
for group, objects in g_clusters:
# Ignore outliers
if int(group[1]) < 0:
continue
objects = objects[self._object_column_name].values.tolist()
# temporal intersection
# select considered clusters with later timestamps than the current one to check which clusters the
# current one merged into and count, how many objects of the current cluster are in the considered clusters
# example of a series from the dataframe: [cluster_id, count] with [2, 10]
# meaning: 10 objects of the current cluster merged into the cluster with the id 2
temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &
(self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count()
# iterate over all clusters which the current cluster has merged into
# 'cluster' contains the cluster_id
# and 'con_objects' is the corresponding number of objects of the temporal intersection
for cluster, num_objects in temp_intersection.iterrows():
# Ignore outliers
if int(cluster) < 0:
continue
# for all considered clusters save the proportion of the current cluster that merged into the considered
# one
# example: {3: {2: 0.3}, 4: {2: 0.1}}
# meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4
if cluster not in cluster_compositions:
cluster_compositions[cluster] = {}
if self._jaccard:
# cardinality of the union of both considered clusters
card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |
(self._data[self._cluster_column_name] == group[1])]
[self._object_column_name].unique())
# jaccard distance
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(card_union), 3)
else:
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(cluster_members.loc[group[1]].values[1]), 3)
if group[1] not in cluster_compositions:
cluster_compositions[group[1]] = {}
return cluster_compositions
######## QUALITY MEASURES ########
@staticmethod
def calc_sse(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
sse (float) - sum of squared errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
sse = np.sum(np.power(feature_list - centroid[None, :], 2))
return sse
def calc_mse(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mse (float) - mean squared error of cluster
"""
sse = self.calc_sse(feature_list)
return sse / len(feature_list)
@staticmethod
def calc_mae(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mae (float) - mean average errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
mae = np.average(np.abs(feature_list - centroid[None, :]))
return mae
@staticmethod
def calc_max_dist(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
max_dist (float) - maximal distance of cluster member to centroid of cluster
"""
max_dist = 0
for i in range(len(feature_list) - 1):
for j in range(i + 1, len(feature_list)):
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
if cur_dist > max_dist:
max_dist = cur_dist
max_dist /= 2 ** (1 / 2)
return max_dist
def calc_min_pts(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
avg_dist (float) - average distance of cluster members to their minPts neighbor
"""
avg_dist = 0
for i in range(len(feature_list)):
dist_list = [10] * self._minPts
for j in range(len(feature_list)):
if i == j:
continue
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
for k in range(len(dist_list)):
if cur_dist < dist_list[k]:
dist_list.insert(k, cur_dist)
dist_list.pop(self._minPts)
avg_dist += dist_list[self._minPts - 1]
avg_dist /= len(feature_list)
return avg_dist
@staticmethod
def return_zero():
"""
Function is used if no quality measure should be used in CLOSE
This is the case when only the exploitation term is considered
Returns:
0
"""
return 0
def calc_exploit_at_t(self, time: int) -> float:
"""
Params:
time (int) - time to be considered
Returns:
rating (float) - exploitation rating of time clustering
"""
num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique())
num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) &
(self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique())
return num_no_outliers / num_objects_at_t
| 44.511006
| 145
| 0.575753
|
import numpy as np
from scipy.spatial.distance import euclidean
from typing import Union
import pandas
class CLOSE(object):
def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False,
jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False):
self._data = data
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._jaccard = jaccard
self._weighting = weighting
self._exp_term = exploitation_term
self._minPts = minPts
self._output = output
self.pos_measures = { 'sse': self.calc_sse, 'mse': self.calc_mse, 'mae': self.calc_mae, 'max': self.calc_max_dist,
'dbi': self.calc_min_pts,
'None': self.return_zero,
'exploit': self.calc_exploit_at_t}
if measure in self.pos_measures:
self.measure = self.pos_measures[measure]
elif callable(measure):
self.measure = measure
else:
self.measure = self.pos_measures['mse']
def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
cluster_ratings = self.rate_clusters(start_time, end_time)
gr_clusters = self._data.groupby(self._cluster_column_name)
score = 0
avg_quality = 0
avg_stab = 0
for cluster in cluster_ratings:
cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique()
cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0]
feature_list = self.get_feature_list(cluster_objects, cluster_time)
measure = self.measure(feature_list)
avg_quality += measure
avg_stab += cluster_ratings[cluster]
score += (cluster_ratings[cluster] * (1 - measure))
num_clusters = len(cluster_ratings)
num_timestamps = self.get_num_timestamps(start_time, end_time)
if num_clusters <= 0:
if self._output:
print('Clustering has no Clusters!!')
return 0
avg_quality /= num_clusters
if self._output:
print('Average Quality: ', str(avg_quality))
avg_stab /= num_clusters
if self._output:
print('Average Stability: ', str(avg_stab))
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': avg_stab,
'quality': avg_quality,
'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)}
def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
cluster_ratings = self.rate_clusters(start_time, end_time)
num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True)
score = 0
if return_measures:
quality = 0
stability = 0
for time in timestamps:
if not return_measures:
score += self.calc_t_clustering_rating(cluster_ratings, time)
else:
cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True)
score += cur_scores['score']
quality += cur_scores['quality']
stability += cur_scores['stability']
if return_measures:
quality /= num_timestamps
stability /= num_timestamps
num_clusters = len(cluster_ratings)
if num_clusters <= 0:
if self._output:
print('Over-Time Clustering has no Clusters!!')
return 0
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': stability,
'quality': quality,
'pre-factor': factor}
def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]:
avg_stab = 0
clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique()
clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0))
for cluster in clusters_at_time:
try:
avg_stab += cluster_ratings[cluster]
except:
continue
num_clusters = len(clusters_at_time)
if num_clusters <= 0:
if self._output:
print('Time Clustering at Time ', str(time), ' has no Clusters!!')
return 0
avg_stab /= num_clusters
if self._output:
print('Average Stability at Time ', str(time), ' : ', str(avg_stab))
quality = self.measure(time)
if self._output:
print('Quality of Clustering at Time ' , str(time), ' : ', str(quality))
t_clustering_score = avg_stab * quality
if not return_measures:
return t_clustering_score
else:
return {
'score': t_clustering_score,
'stability': avg_stab,
'quality': quality
}
def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict:
ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time)
ids = ids_to_rate[:]
for i in ids_to_rate:
if int(i) < 0:
ids.remove(i)
ratings = self.calc_cluster_rating(ids, start_time)
return ratings
def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict:
if start_time is None:
start_time = np.min(self._data[self._time_column_name].unique())
ratings = {}
cluster_compositions = self.obtain_cluster_compositions()
gr_clusters = self._data.groupby(self._cluster_column_name)
# iterate over all cluster ids
for id in ids_to_rate:
time = gr_clusters.get_group(id)[self._time_column_name].iloc[0]
# rate the clusters of all timestamps except of the first one
if time != start_time:
num_merged_clusters = len(cluster_compositions[id])
obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist()
obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time)
score = 0
for obj in obj_ratings:
score += obj_ratings[obj]
try:
score /= len(obj_ratings)
except ZeroDivisionError:
if self._output:
print('Cluster ', str(id), ' has no non-outlier members.')
else:
continue
clusters = list(cluster_compositions[id].keys())
num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)]
[self._time_column_name].unique())
try:
div = num_merged_clusters / num_timestamps
score /= div
except ZeroDivisionError:
if self._output:
print("<<ZeroDivisionError - Cluster Score>> Cluster ID: ", str(id), " Merged Clusters: ", str(num_merged_clusters),
" Num Timestamps: ", str(num_timestamps))
else:
continue
ratings[id] = score
# clusters of the first timestamp have a stability of 1.0
else:
ratings[id] = 1.0
return ratings
def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:
ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)
if end_time is None:
end_time = np.max(self._data[self._time_column_name].unique())
cluster_compositions = self.obtain_cluster_compositions()
ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time)
return ratings
def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict:
ratings = {}
gr_clusters = self._data.groupby(self._object_column_name)
# iterate over object ids
for id in ids_to_rate:
cur_group = gr_clusters.get_group(id)
cur_group = cur_group[cur_group[self._time_column_name] <= end_time]
if start_time is not None:
cur_group = cur_group[cur_group[self._time_column_name] >= start_time]
try:
# id of the cluster of the last considered timestamp
last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[
0]
except IndexError:
print(">>INDEXERROR - LAST CLUSTER<< ID: ", str(id), ", Start Time: ", str(start_time), ", End Time: ",
str(end_time))
continue
# if object is an outlier for the considered timestamp, it is skipped
if int(last_cluster) < 0:
continue
cluster_ids = cur_group[self._cluster_column_name].unique()
object_ratings = []
num_clusters = 0
has_outlier = False
for cluster in cluster_ids:
if cluster == last_cluster:
continue
# Add the proportion of clusters before last timestamp, that merged in last cluster
else:
# outliers get worst rating of 0.0
if int(cluster) < 0:
object_ratings.append(0.0)
has_outlier = True
else:
object_ratings.append(cluster_composition[last_cluster][cluster])
num_clusters += 1
if not has_outlier and len(object_ratings) == 0:
# print(str(id) + " has no data before t=" + str(end_time))
continue
if self._weighting:
try:
weighting_denominator = 0
for i in range(1, num_clusters + 1):
weighting_denominator += i
if num_clusters > 0:
object_rating = 0
for i in range(num_clusters):
object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)
else:
continue
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
else:
try:
object_rating = np.sum(object_ratings)
object_rating /= num_clusters
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
ratings[id] = round(object_rating, 3)
return ratings
def calc_exploit(self) -> float:
num_objects = len(self._data[self._object_column_name].unique())
num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique())
return num_no_outliers / num_objects
######## HELPER FUNCTIONS ########
def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray:
feature_list = []
for obj in objects:
features = self._data[
(self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]
try:
features = \
features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],
axis=1).iloc[0].tolist()
except IndexError:
print(">>INDEXERROR - FEATURE LIST<< ID: ", str(obj), ", Time: ", str(time))
continue
if len(features) <= 0:
print("No features found for object ", str(obj))
continue
feature_list.append(features)
return np.array(feature_list)
def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int:
timestamp_list = self._data[self._time_column_name].unique()
if start_time is not None:
timestamp_list = [i for i in timestamp_list if i >= start_time]
if end_time is not None:
timestamp_list = [i for i in timestamp_list if i <= end_time]
num_timestamps = len(timestamp_list)
if not return_timestamps:
return num_timestamps
else:
return num_timestamps, timestamp_list
def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:
if id is None:
data = self._data.copy()
if start_time is not None:
data = data[data[self._time_column_name] >= start_time]
if end_time is not None:
data = data[data[self._time_column_name] <= end_time]
ids_to_rate = data[id_name].unique().tolist()
elif isinstance(id, int) or isinstance(id, str):
ids_to_rate = [id]
elif isinstance(id, list):
ids_to_rate = id[:]
else:
raise Exception('id has to be int, str, list or None')
return ids_to_rate
def obtain_cluster_compositions(self) -> dict:
cluster_compositions = {}
g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])
if not self._jaccard:
cluster_members = self._data.groupby(self._cluster_column_name).count()
# iterate over all clusters - 'group' contains the time and cluster_id
# and 'objects' is the corresponding dataframe
for group, objects in g_clusters:
# Ignore outliers
if int(group[1]) < 0:
continue
objects = objects[self._object_column_name].values.tolist()
# temporal intersection
# select considered clusters with later timestamps than the current one to check which clusters the
# current one merged into and count, how many objects of the current cluster are in the considered clusters
# example of a series from the dataframe: [cluster_id, count] with [2, 10]
# meaning: 10 objects of the current cluster merged into the cluster with the id 2
temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &
(self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count()
# iterate over all clusters which the current cluster has merged into
# 'cluster' contains the cluster_id
# and 'con_objects' is the corresponding number of objects of the temporal intersection
for cluster, num_objects in temp_intersection.iterrows():
# Ignore outliers
if int(cluster) < 0:
continue
# for all considered clusters save the proportion of the current cluster that merged into the considered
# one
# example: {3: {2: 0.3}, 4: {2: 0.1}}
# meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4
if cluster not in cluster_compositions:
cluster_compositions[cluster] = {}
if self._jaccard:
# cardinality of the union of both considered clusters
card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |
(self._data[self._cluster_column_name] == group[1])]
[self._object_column_name].unique())
# jaccard distance
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(card_union), 3)
else:
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(cluster_members.loc[group[1]].values[1]), 3)
if group[1] not in cluster_compositions:
cluster_compositions[group[1]] = {}
return cluster_compositions
######## QUALITY MEASURES ########
@staticmethod
def calc_sse(feature_list: list) -> float:
centroid = np.average(feature_list, axis=0)
sse = np.sum(np.power(feature_list - centroid[None, :], 2))
return sse
def calc_mse(self, feature_list: list) -> float:
sse = self.calc_sse(feature_list)
return sse / len(feature_list)
@staticmethod
def calc_mae(feature_list: list) -> float:
centroid = np.average(feature_list, axis=0)
mae = np.average(np.abs(feature_list - centroid[None, :]))
return mae
@staticmethod
def calc_max_dist(feature_list: list) -> float:
max_dist = 0
for i in range(len(feature_list) - 1):
for j in range(i + 1, len(feature_list)):
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
if cur_dist > max_dist:
max_dist = cur_dist
max_dist /= 2 ** (1 / 2)
return max_dist
def calc_min_pts(self, feature_list: list) -> float:
avg_dist = 0
for i in range(len(feature_list)):
dist_list = [10] * self._minPts
for j in range(len(feature_list)):
if i == j:
continue
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
for k in range(len(dist_list)):
if cur_dist < dist_list[k]:
dist_list.insert(k, cur_dist)
dist_list.pop(self._minPts)
avg_dist += dist_list[self._minPts - 1]
avg_dist /= len(feature_list)
return avg_dist
@staticmethod
def return_zero():
return 0
def calc_exploit_at_t(self, time: int) -> float:
num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique())
num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) &
(self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique())
return num_no_outliers / num_objects_at_t
| true
| true
|
f70373e85dd7d594ac8c8db40a01f648beb6171b
| 7,943
|
py
|
Python
|
Modules/Roles.py
|
Skk-nsmt/reina
|
1b658bfcf4ff35a641c7fe9398652aa36b254d3a
|
[
"WTFPL"
] | null | null | null |
Modules/Roles.py
|
Skk-nsmt/reina
|
1b658bfcf4ff35a641c7fe9398652aa36b254d3a
|
[
"WTFPL"
] | 13
|
2020-12-22T04:35:03.000Z
|
2021-07-07T07:05:53.000Z
|
Modules/Roles.py
|
Skk-nsmt/reina
|
1b658bfcf4ff35a641c7fe9398652aa36b254d3a
|
[
"WTFPL"
] | 1
|
2022-02-28T21:40:51.000Z
|
2022-02-28T21:40:51.000Z
|
import discord
from discord.ext import commands
from Modules import CONSTANT
from Modules.Checks import check_if_role_or_bot_spam
class Roles(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@check_if_role_or_bot_spam()
async def role(self, ctx: commands.Context, role_type: str, *role_names):
"""
Add a role.
<role_type>: Use 'main', 'sub', or 'unit' to indicate which type of role you want.
Your main role will control your nametag colour.
[role_names...]: The name of the roles you want to add, names are not case-sensitive.
You can enter as many names as you want to.
Examples:
">role main Sally"
--- will add Sally as your main role and make your nametag yellow.
">role sub Mizzy"
--- will add Mizzy as a sub role without affecting your nametag colour.
Unit examples:
>role unit Cider
>role unit Bench
>role unit Keikoto
Unit roles work the same as sub roles, you can have many of them.
You can enter multiple role names for this command.
If you enter ">role main" with more than one role name, you will get the first valid role you entered.
Examples:
">role sub Sally Sakura Ruri Jun"
--- will add all these four sub roles to you.
">role main Sally Sakura Ruri Jun"
--- will only add Sally as the main role, if you had Sally as your main role, the operation does nothing.
Only the following roles may be added for 'main' and 'sub' roles:
Sally, Sakura, Ruri, Jun, Mizzy, Miyako, Kanaeru, Akane,
Nagomin, Miu, Meimei, Uta, Nicole, Chiharun, Reika,
Reinyan, Ayaka, Moe, Mikami, Rettan, Yuki, Ainacchi, Tsubomi,
Tamago, Gouda, Kaoruko, Nana, Miko, Komiya, Aida, Mukai
Only the following roles may be added for 'unit' roles:
>> Hareta Hi no Bench (use the word "Bench" to add),
>> Keikoto saisei keikaku (use the word "Keikoto"),
>> Ki no Nuketa Cider (use the word "Cider")
"""
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required arguments. ")
result_msgs: list[str] = []
if role_type in CONSTANT.ROLES_ID.keys():
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == "main":
role_ids: list[int] = [role.id for role in ctx.author.roles]
main_roles = list(set(role_ids) & set(CONSTANT.ROLES_ID["main"].values()))
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
elif main_roles:
result_msgs.append("You can't have more than one main role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
break
elif role_type == "sub":
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
elif role_name in CONSTANT.UNIT_ROLABLES:
if role_type == "unit": # verify that the type is actually unit
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
else:
result_msgs.append("Illegal role name. Type `>help role` for a list of acceptable role names. ")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
@commands.command()
@check_if_role_or_bot_spam()
async def unrole(self, ctx: commands.Context, role_type: str, *role_names):
"""
Delete a role.
<role_type>: Use 'main' or 'sub' to indicate which type of role you wish to delete.
If you delete your main role, your nametag colour will change to that of your highest sub role
until you add a new main role.
[role_names...]: The name of the role you want to delete, names are not case-sensitive.
You can enter as many names as you want to.
Example:
">unrole main Sally"
--- will remove Sally as your main role.
--- If you have Meimei as a sub role, your nametag colour will then be light blue
until you add a new main role.
Multiple role deletion works similarly as >role does, for more help, send ">help role".
"""
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required argument. ")
result_msgs: list[str] = []
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == 'main':
if role_name in CONSTANT.ROLES_ID["main"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
else:
result_msgs.append("Illegal role name for main roles. ")
continue
elif role_type == 'sub':
if role_name in CONSTANT.ROLES_ID["sub"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
else:
result_msgs.append("Illegal role name for sub roles. ")
continue
elif role_type == 'unit':
if role_name in CONSTANT.ROLES_ID["unit"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
else:
result_msgs.append("Illegal role name for unit roles. ")
continue
else:
await ctx.send("Invalid selection. ")
return
if role not in ctx.author.roles:
result_msgs.append("You don't have that role!")
else:
await ctx.author.remove_roles(role)
result_msgs.append("Role removed.")
else:
result_msgs.append("Illegal role name. Type `>help unrole` for a list of acceptable role names. ")
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
| 43.883978
| 117
| 0.545134
|
import discord
from discord.ext import commands
from Modules import CONSTANT
from Modules.Checks import check_if_role_or_bot_spam
class Roles(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@check_if_role_or_bot_spam()
async def role(self, ctx: commands.Context, role_type: str, *role_names):
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required arguments. ")
result_msgs: list[str] = []
if role_type in CONSTANT.ROLES_ID.keys():
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == "main":
role_ids: list[int] = [role.id for role in ctx.author.roles]
main_roles = list(set(role_ids) & set(CONSTANT.ROLES_ID["main"].values()))
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
elif main_roles:
result_msgs.append("You can't have more than one main role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
break
elif role_type == "sub":
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
elif role_name in CONSTANT.UNIT_ROLABLES:
if role_type == "unit": # verify that the type is actually unit
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
else:
result_msgs.append("Illegal role name. Type `>help role` for a list of acceptable role names. ")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
@commands.command()
@check_if_role_or_bot_spam()
async def unrole(self, ctx: commands.Context, role_type: str, *role_names):
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required argument. ")
result_msgs: list[str] = []
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == 'main':
if role_name in CONSTANT.ROLES_ID["main"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
else:
result_msgs.append("Illegal role name for main roles. ")
continue
elif role_type == 'sub':
if role_name in CONSTANT.ROLES_ID["sub"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
else:
result_msgs.append("Illegal role name for sub roles. ")
continue
elif role_type == 'unit':
if role_name in CONSTANT.ROLES_ID["unit"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
else:
result_msgs.append("Illegal role name for unit roles. ")
continue
else:
await ctx.send("Invalid selection. ")
return
if role not in ctx.author.roles:
result_msgs.append("You don't have that role!")
else:
await ctx.author.remove_roles(role)
result_msgs.append("Role removed.")
else:
result_msgs.append("Illegal role name. Type `>help unrole` for a list of acceptable role names. ")
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
| true
| true
|
f7037425cd747334c2b88b75c4d4315203805b2e
| 1,119
|
py
|
Python
|
ptk_gestaorecurso_teste/recursos/forms.py
|
rodrigombsantana/ptk-gestaorecursos-teste
|
4f6f1fbac8b5501cf54520bf7434e92e19d6dfc6
|
[
"MIT"
] | null | null | null |
ptk_gestaorecurso_teste/recursos/forms.py
|
rodrigombsantana/ptk-gestaorecursos-teste
|
4f6f1fbac8b5501cf54520bf7434e92e19d6dfc6
|
[
"MIT"
] | null | null | null |
ptk_gestaorecurso_teste/recursos/forms.py
|
rodrigombsantana/ptk-gestaorecursos-teste
|
4f6f1fbac8b5501cf54520bf7434e92e19d6dfc6
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.forms import ModelForm
from django.forms import TextInput
from .models import agendamento
#import datetime
#class frm_agendamento(forms.ModelForm):
#
# data_agendamento = forms.DateField(label="Data",initial=datetime.date.today)
# horario_inicio = forms.TimeField(label="Inicio",initial=datetime.datetime.now().strftime('%H:%M'))
# horario_fim = forms.TimeField(label="Fim", initial=datetime.datetime.now().strftime('%H:%M'))
#
# motivo = forms.CharField(
# label='Motivo', widget=forms.Textarea
# )
class frm_agendamento(ModelForm): #formulario baseado em modelo
class Meta:
model = agendamento
exclude = ('criado_em','google_link') #campo que nao sera usado no formulario
widgets = {
'data_agendamento': TextInput( attrs={'class':'form-control datepicker', 'data-date-format':'dd/mm/yyyy'}),
'horario_inicio': TextInput( attrs={'class':'form-control'}),
'horario_fim': TextInput( attrs={'class':'form-control'}),
'motivo': TextInput( attrs={'class':'form-control'})
}
| 43.038462
| 119
| 0.679178
|
from django.db import models
from django.forms import ModelForm
from django.forms import TextInput
from .models import agendamento
class frm_agendamento(ModelForm): class Meta:
model = agendamento
exclude = ('criado_em','google_link') widgets = {
'data_agendamento': TextInput( attrs={'class':'form-control datepicker', 'data-date-format':'dd/mm/yyyy'}),
'horario_inicio': TextInput( attrs={'class':'form-control'}),
'horario_fim': TextInput( attrs={'class':'form-control'}),
'motivo': TextInput( attrs={'class':'form-control'})
}
| true
| true
|
f703746e0316f7cff40f0b2d9a7c219de426744e
| 26,744
|
py
|
Python
|
tools/validators/ontology_validator/yamlformat/validator/entity_type_lib.py
|
a-o-u/digitalbuildings
|
407075cbfbd3d81e6c2ce0c25cc160e3238d9225
|
[
"Apache-2.0"
] | null | null | null |
tools/validators/ontology_validator/yamlformat/validator/entity_type_lib.py
|
a-o-u/digitalbuildings
|
407075cbfbd3d81e6c2ce0c25cc160e3238d9225
|
[
"Apache-2.0"
] | null | null | null |
tools/validators/ontology_validator/yamlformat/validator/entity_type_lib.py
|
a-o-u/digitalbuildings
|
407075cbfbd3d81e6c2ce0c25cc160e3238d9225
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods for working with entity types in the ontology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import typing
from typing import Optional, Tuple
from yamlformat.validator import base_lib
from yamlformat.validator import config_folder_lib
from yamlformat.validator import field_lib
from yamlformat.validator import findings_lib
ENTITY_TYPE_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*(?:_[a-zA-Z0-9]+)*$')
FIELD_INCREMENT_STRIPPER_REGEX = re.compile(
r'(^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*)((?:_[0-9]+)+)$')
FieldParts = typing.NamedTuple('FieldParts',
[('namespace', str), ('field', str),
('increment', str)])
OptWrapper = typing.NamedTuple('OptWrapper', [('field', FieldParts),
('optional', bool)])
TypeParts = typing.NamedTuple('TypeParts', [('namespace', str),
('typename', str)])
EntityIdByEntry = typing.NamedTuple('EntityIdByEntry', [('namespace', str),
('typename', str)])
def SeparateFieldNamespace(qualified_field_name: str) -> Tuple[str, str]:
"""Returns the namespace and its field name as separate values or an Error.
Args:
qualified_field_name: a qualified field string like `HVAC/run_status`
Throws:
TypeError: if the field is not qualified
"""
fqf_parsed = qualified_field_name.split('/')
if len(fqf_parsed) == 1:
raise TypeError('Type improperly formatted, a namespace is missing: ',
fqf_parsed)
if len(fqf_parsed) > 2:
raise ValueError('Type improperly formatted, too many separators: ',
fqf_parsed)
return fqf_parsed[0], fqf_parsed[1]
def SeparateFieldIncrement(field_name) -> Tuple[str, str]:
"""Takes as an input a field_name (string) and returns a tuple of strings.
The first element is the standard field name and its increment when available.
For example: zone_occupancy_status_1 -> [zone_occupancy_status, 1]
Args:
field_name: the field name to parse.
Returns:
A tuple of string, the standard field name and its increment if available.
"""
field_name_part = field_name
increment_part = ''
match = FIELD_INCREMENT_STRIPPER_REGEX.match(field_name)
if match:
field_name_part = match.group(1)
increment_part = match.group(2)
return field_name_part, increment_part
class EntityTypeUniverse(findings_lib.Findings):
"""Helper class to represent the defined universe of EntityTypes.
Only contains valid EntityTypes.
Attributes;
namespace_folder_map: a map of namespace names to EntityTypeFolders.
type_namespaces_map: a map of type names to TypeNamespaces.
type_ids_map: maps type IDs to entity types. Contains all valid types w/IDs.
"""
def __init__(self, entity_type_folders):
"""Init.
Args:
entity_type_folders: list of EntityTypeFolder objects parsed from files.
"""
super(EntityTypeUniverse, self).__init__()
self.namespace_folder_map = {}
self.type_namespaces_map = {}
self.type_ids_map = {}
self._BuildNamespaceFolderMap(entity_type_folders)
self._BuildTypeMaps(
[folder.local_namespace for folder in entity_type_folders])
def GetEntityType(self, namespace_name, typename):
"""Finds entity_type by namespace and typename and returns it or None."""
if namespace_name not in self.type_namespaces_map:
return None
return self.type_namespaces_map[namespace_name].GetType(typename)
def GetNamespace(self, namespace_name):
"""Finds namespace in the universe by name and returns it or None."""
return self.type_namespaces_map.get(namespace_name, None)
def GetNamespaces(self):
"""Get the entity type namespace objects in this universe.
Returns:
A list of EntityTypeNamespace objects
"""
return list(self.type_namespaces_map.values())
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for folder in self.namespace_folder_map.values():
findings += folder.GetFindings(filter_old_warnings)
return findings
def _BuildTypeMaps(self, type_namespaces):
"""Creates a dict mapping namespace strings to TypeNamespace objects.
Sets the self.type_namespaces_map attribute of the class.
Args:
type_namespaces: a list of TypeNamespace objects.
Raises:
RuntimeError: if assumptions about internal data structures are violated.
"""
for type_namespace in type_namespaces:
self.type_namespaces_map[type_namespace.namespace] = type_namespace
for entity_type in type_namespace.valid_types_map.values():
if entity_type.uid:
if entity_type.uid in self.type_ids_map:
dup_id_entry = self.type_ids_map[entity_type.uid]
dup_id_type = self.GetEntityType(dup_id_entry.namespace,
dup_id_entry.typename)
if dup_id_type is None:
raise RuntimeError('Duplicate type with uid ' + entity_type.uid +
' should always be mapped')
entity_type.AddFinding(
findings_lib.DuplicateIdsError(type_namespace.namespace,
entity_type, dup_id_type))
dup_id_type.AddFinding(
findings_lib.DuplicateIdsError(dup_id_entry.namespace,
dup_id_type, entity_type))
self.type_ids_map[entity_type.uid] = EntityIdByEntry(
namespace=type_namespace.namespace, typename=entity_type.typename)
def _BuildNamespaceFolderMap(self, type_folders):
"""Creates a dict mapping namespace strings to EntityTypeFolder objects.
Sets the self.namespace_folder_map attribute of the class.
Args:
type_folders: a list of EntityTypeFolder objects.
"""
for folder in type_folders:
self.namespace_folder_map[folder.local_namespace.namespace] = folder
class EntityTypeFolder(config_folder_lib.ConfigFolder):
"""Class representing a namespace folder of entity types.
Class fully validates all entity types defined within the namespace folder,
collects issues found, and stores all valid entity types.
Attributes:
local_namespace: TypeNamespace object representing this namespace.
"""
def __init__(self, folderpath, field_universe=None):
"""Init.
Args:
folderpath: required string with full path to the folder containing entity
type files. Path should be relative to google3/ and have no leading or
trailing /.
field_universe: optional FieldsUniverse object.
"""
super(EntityTypeFolder, self).__init__(folderpath,
base_lib.ComponentType.ENTITY_TYPE)
self.local_namespace = TypeNamespace(self._namespace_name, field_universe)
def Finalize(self):
"""Call to complete entity creation after all types are added."""
self.local_namespace.QualifyParentNames()
def _AddFromConfigHelper(self, document, context):
for type_name in document:
new_type = self._ConstructType(type_name, document[type_name],
context.filepath)
self._AddType(new_type)
def _ConstructField(self, local_field_names, optional, output_array):
for qualified_field_name in local_field_names:
field_ns, raw_field_name = field_lib.SplitFieldName(qualified_field_name)
std_field_name, increment = SeparateFieldIncrement(raw_field_name)
# Field will look local if undefined, but we'll catch the error later
# Because we do explict existence checks and it will fail
# TODO(berkoben) refactor so validation happens in an order that
# prevents this logic lint
field_ns = self.local_namespace.GetQualifiedNamespace(
field_ns, std_field_name)
output_array.append(
OptWrapper(
field=FieldParts(
namespace=field_ns, field=std_field_name,
increment=increment),
optional=optional))
def _ConstructType(self, type_name, type_contents, filepath):
"""Reads a entity type config block and generates an EntityType object."""
description = ''
parents = None
local_field_names = None
opt_local_field_names = None
is_abstract = False
is_canonical = False
uid = None
expected_keys = set([
'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'id',
'is_canonical'
])
if 'description' in type_contents:
description = type_contents['description']
if 'implements' in type_contents:
parents = type_contents['implements']
if 'uses' in type_contents:
local_field_names = type_contents['uses']
if 'opt_uses' in type_contents:
opt_local_field_names = type_contents['opt_uses']
if 'is_abstract' in type_contents:
is_abstract = type_contents['is_abstract']
if 'is_canonical' in type_contents:
is_canonical = type_contents['is_canonical']
if 'id' in type_contents:
uid = type_contents['id']
# Generate tuples to represent each field
fq_lfn = []
if local_field_names:
self._ConstructField(local_field_names, False, fq_lfn)
if opt_local_field_names:
self._ConstructField(opt_local_field_names, True, fq_lfn)
entity_type = EntityType(
filepath=filepath,
typename=type_name,
description=description,
parents=parents,
local_field_tuples=fq_lfn,
is_abstract=is_abstract,
inherited_fields_expanded=False,
is_canonical=is_canonical,
uid=uid,
namespace=self.local_namespace)
# Add errors to type if there's anything extra in the block. We add to the
# entity type because an extra key here is likely a typo in a real key name
# that would result in information being lost from the type.
for key in type_contents:
if key not in expected_keys:
entity_type.AddFinding(
findings_lib.UnrecognizedKeyError(key, entity_type.file_context))
return entity_type
def _AddType(self, entity_type):
"""Adds entity_type if it is fully valid.
If formatting is correct, continues on to field validation.
Records all findings in object.
Args:
entity_type: EntityType object.
Returns:
True if the entity type was successfully validated and added. False
otherwise.
"""
if not entity_type.IsValid():
self.AddFindings(entity_type.GetFindings())
return False
return self.local_namespace.InsertType(entity_type)
class TypeNamespace(findings_lib.Findings):
"""Class representing a namespace of entity types.
Attributes:
namespace: string
valid_types_map: Dict mapping typename strings to EntityType objects.
"""
def __init__(self, namespace, field_universe=None):
super(TypeNamespace, self).__init__()
self.namespace = namespace
self._field_universe = field_universe
self.valid_types_map = {}
self._parents_qualified = False
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for entity_type in self.valid_types_map.values():
findings += entity_type.GetFindings(filter_old_warnings)
return findings
def GetType(self, typename):
return self.valid_types_map.get(typename, None)
def InsertType(self, entity_type):
"""Validate that declared fields are defined.
Adds type if valid and unique.
Findings for non-validated fields are applied to this TypeNamespace.
Args:
entity_type: entity to attempt to add.
Returns:
True if entity was added successfully.
Raises:
RuntimeError: if this is called after qualifying parent names
"""
if self._parents_qualified:
raise RuntimeError('Cannot add types after Qualifying parents')
if self._ValidateFields(entity_type):
typename = entity_type.typename
mapped_entity_type = self.valid_types_map.get(typename)
if mapped_entity_type is None:
self.valid_types_map[typename] = entity_type
return True
# entity_type is a duplicate type
self.AddFinding(
findings_lib.DuplicateEntityTypeDefinitionError(
self, entity_type, mapped_entity_type.file_context))
return False
return False
def GetQualifiedNamespace(self, field_ns, field_name):
"""Returns the namespace name for this field.
Args:
field_ns: namespace of field as parsed from the config
field_name: unqualified field name string
Returns:
The fully qualified field string.
"""
if not field_ns and self.IsLocalField(field_name):
return self.namespace
return field_ns
def _BuildQualifiedParentTuple(self, parent_name):
"""Creates the two-part parent tuple with a fully-qualified namespace.
Args:
parent_name: string as specified in the config file.
Returns:
A TypeParts tuple representing this parent.
"""
namespace_name = self.namespace
split = parent_name.split('/')
if len(split) != 2:
if not self.GetType(parent_name):
# parent is in the global namespace
namespace_name = ''
else:
namespace_name = split[0]
parent_name = split[1]
return TypeParts(namespace=namespace_name, typename=parent_name)
def QualifyParentNames(self):
"""Sets parents attribute of this namespace with fully qualified names."""
if self._parents_qualified:
return
for entity_type in self.valid_types_map.values():
fq_tuplemap = {}
for parent in entity_type.unqualified_parent_names:
fq_tuple = self._BuildQualifiedParentTuple(parent)
fq_name = '{0}/{1}'.format(fq_tuple.namespace, fq_tuple.typename)
fq_tuplemap[fq_name] = fq_tuple
entity_type.parent_names = fq_tuplemap
self._parents_qualified = True
def IsLocalField(self, field_name):
"""Returns true if this unqualified field is defined in the namespace.
Args:
field_name: an unqualified field name with no leading '/'
"""
if not self._field_universe:
return False
return self._field_universe.IsFieldDefined(field_name, self.namespace)
def _ValidateFields(self, entity):
"""Validates that all fields declared by entity are defined."""
# if field_universe is not defined just return true
if not self._field_universe:
return True
valid = True
for field_tuple in entity.local_field_names.values():
if not self._ValidateField(field_tuple.field, entity):
valid = False
return valid
def _ValidateField(self, field_tuple, entity):
"""Validates that field declared by entity is defined.
Field formatting has already been validated.
Findings are saved on the TypeNamespace.
Args:
field_tuple: tuple representing a fully qualified field
entity: EntityType
Returns:
True if field is defined.
"""
if not self._field_universe.IsFieldDefined(field_tuple.field,
field_tuple.namespace):
self.AddFinding(
findings_lib.UndefinedFieldError(entity, field_tuple.field))
return False
return True
def BuildQualifiedField(opt_tuple):
field_tuple = opt_tuple.field
return '{0}/{1}{2}'.format(field_tuple.namespace, field_tuple.field,
field_tuple.increment)
class EntityType(findings_lib.Findings):
"""Creates an EntityType object from a set of values describing the type.
Attributes:
file_context: FileContext object containing file info.
typename: string.
description: string.
parent_names: a list of parent typename strings.
local_field_names: the local set of standard field names
inherited_field_names: the set of inherited field names. Is always assigned
to an empty set at init, to be expanded later.
inherited_fields_expanded: boolean.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
Returns:
An instance of the EntityType class.
"""
def __init__(self,
begin_line_number=0,
filepath='',
typename='',
description='',
parents=None,
local_field_tuples=None,
is_abstract=False,
inherited_fields_expanded=False,
is_canonical=False,
uid=None,
namespace=None):
"""Init.
Args:
begin_line_number: int. Starting line number for the entity type
definition.
filepath: string. google3 path to the file defining the type.
typename: required string.
description: required string.
parents: list of parent typename strings.
local_field_tuples: list of OptWrapper tuples
is_abstract: boolean indicating if this is an abstract type.
inherited_fields_expanded: boolean. Should be false at init.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
"""
super(EntityType, self).__init__()
self.file_context = findings_lib.FileContext(
begin_line_number=begin_line_number, filepath=filepath)
self.typename = typename
self.description = description
self.namespace = namespace
self.local_field_names = {}
local_field_names = []
if local_field_tuples:
local_field_names = [
BuildQualifiedField(opt_parts) for opt_parts in local_field_tuples
]
for i, lfn in enumerate(local_field_names):
self.local_field_names[lfn] = local_field_tuples[i]
self.inherited_field_names = {}
self.inherited_fields_expanded = inherited_fields_expanded
if parents is None:
parents = []
self.parent_names = None
self.parent_name_tuples = None
self.unqualified_parent_names = parents
self._all_fields = None
self._has_optional_fields = None
self.is_abstract = is_abstract
self.is_canonical = is_canonical
self.uid = uid
# TODO(berkoben) update this method to use tuples if possible
self._ValidateType(local_field_names)
def HasOptionalFields(self, run_unsafe=False):
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type has not been expanded')
if self._has_optional_fields is not None:
return self._has_optional_fields
fields = self.GetAllFields()
for field in fields.values():
if field.optional:
self._has_optional_fields = True
return self._has_optional_fields
self._has_optional_fields = False
return self._has_optional_fields
def GetAllFields(self, run_unsafe=False):
"""Returns the expanded set of fields for this type.
Args:
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
A dictionary of fully qualified strings representing fields in the type to
OptWrapper tuples representing the contents of the field.
Raises:
RuntimeError: if fields have not yet been expanded.
"""
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type {0} has not been expanded'.format(self.typename))
if self._all_fields is None:
tmp = self.local_field_names.copy()
tmp.update(self.inherited_field_names)
if run_unsafe:
return tmp
self._all_fields = tmp
return self._all_fields
def HasFieldAsWritten(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> bool:
"""Returns true if a valid config file value maps to a field in the type.
Accepts a field name as written in a configuration file
referencing this type. The method applies context-aware namespace
omission (i.e. referencing a field without its namespace) to identify the
field regardless of the namespace and syntax variation.
Note: to minimize redundancy, this method simply wraps.
`GetFieldFromConfigText()`. If your application also needs the `Field` use
that method instead to eliminate redundant processing.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
True if the Field is defined on the type. False otherwise.
"""
return self.GetFieldFromConfigText(fieldname_as_written,
run_unsafe) is not None
def GetFieldFromConfigText(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` provided string validates against the entity.
Accepts a field name as written in a configuration file
referencing this type. The method applies all shorthanding rules to identify
the field regardless of the namespace and syntax variation.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
`OptWrapper` if field is present, None otherwise
"""
try:
# Check the field as if it's fully qualified.
return self.GetField(fieldname_as_written, run_unsafe)
except TypeError:
pass
# Field is unqualified so it is either global or type-namespace-local
# Check for a locally defined field first using type's namespace
field = self._GetField(
self.namespace.namespace + '/' + fieldname_as_written, run_unsafe)
if not field:
# Check field as if it's in the global namespace
field = self._GetField('/' + fieldname_as_written, run_unsafe)
return field
def HasField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> bool:
"""Returns True if field string validates against the entity's fields.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Throws:
TypeError: if the field is not fully qualified
"""
return self.GetField(fully_qualified_fieldname, run_unsafe) is not None
def GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` if field string validates against the entity.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
`OptWrapper` if field is present, None otherwise
Throws:
TypeError: if the field is not fully qualified
"""
# Throws an error in the case that this isn't a fully qualified field
_, _ = SeparateFieldNamespace(fully_qualified_fieldname)
return self._GetField(fully_qualified_fieldname, run_unsafe)
def _GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
return self.GetAllFields(run_unsafe).get(fully_qualified_fieldname)
def _ValidateType(self, local_field_names):
"""Validates that the entity type is formatted correctly.
Checks for formatting and duplicate fields and parents.
Records any errors found.
Args:
local_field_names: list of local field names for the type.
"""
# Make sure the typename is non-empty.
if not self.typename:
self.AddFinding(findings_lib.MissingTypenameError(self))
elif not isinstance(self.typename, str):
self.AddFinding(
findings_lib.IllegalKeyTypeError(self.typename, self.file_context))
elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):
self.AddFinding(
findings_lib.InvalidTypenameError(self.typename, self.file_context))
# Make sure the type description is non-empty.
if not self.description:
self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))
# Check for duplicate local fields.
# this check is case insensitive to catch dupes earlier in the event that
# we stop explicitly rejecting upper case characters
check_fields = set()
for field in local_field_names:
field_lower = field.lower()
if field_lower in check_fields:
self.AddFinding(findings_lib.DuplicateFieldError(self, field))
continue
check_fields.add(field_lower)
# TODO(berkoben): Add more checks to validate fields in isolation
# (in case we don't have a field set to check against)
# (i.e. check for chirality, formatting. Could use actual Field objects)
# Check formatting of field name
if len(field.split('/')) > 2:
self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))
# Check for duplicate parent names.
parent_names_check = set()
for parent_name in self.unqualified_parent_names:
if parent_name in parent_names_check:
self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))
continue
parent_names_check.add(parent_name)
# Check formatting of parent name
if len(parent_name.split('/')) > 2:
self.AddFinding(
findings_lib.UnrecognizedParentFormatError(self, parent_name))
# Enforce that the inherited_fields_expanded field is not set
if self.inherited_fields_expanded:
self.AddFinding(findings_lib.InheritedFieldsSetError(self))
| 36.091768
| 80
| 0.695408
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import typing
from typing import Optional, Tuple
from yamlformat.validator import base_lib
from yamlformat.validator import config_folder_lib
from yamlformat.validator import field_lib
from yamlformat.validator import findings_lib
ENTITY_TYPE_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*(?:_[a-zA-Z0-9]+)*$')
FIELD_INCREMENT_STRIPPER_REGEX = re.compile(
r'(^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*)((?:_[0-9]+)+)$')
FieldParts = typing.NamedTuple('FieldParts',
[('namespace', str), ('field', str),
('increment', str)])
OptWrapper = typing.NamedTuple('OptWrapper', [('field', FieldParts),
('optional', bool)])
TypeParts = typing.NamedTuple('TypeParts', [('namespace', str),
('typename', str)])
EntityIdByEntry = typing.NamedTuple('EntityIdByEntry', [('namespace', str),
('typename', str)])
def SeparateFieldNamespace(qualified_field_name: str) -> Tuple[str, str]:
fqf_parsed = qualified_field_name.split('/')
if len(fqf_parsed) == 1:
raise TypeError('Type improperly formatted, a namespace is missing: ',
fqf_parsed)
if len(fqf_parsed) > 2:
raise ValueError('Type improperly formatted, too many separators: ',
fqf_parsed)
return fqf_parsed[0], fqf_parsed[1]
def SeparateFieldIncrement(field_name) -> Tuple[str, str]:
field_name_part = field_name
increment_part = ''
match = FIELD_INCREMENT_STRIPPER_REGEX.match(field_name)
if match:
field_name_part = match.group(1)
increment_part = match.group(2)
return field_name_part, increment_part
class EntityTypeUniverse(findings_lib.Findings):
def __init__(self, entity_type_folders):
super(EntityTypeUniverse, self).__init__()
self.namespace_folder_map = {}
self.type_namespaces_map = {}
self.type_ids_map = {}
self._BuildNamespaceFolderMap(entity_type_folders)
self._BuildTypeMaps(
[folder.local_namespace for folder in entity_type_folders])
def GetEntityType(self, namespace_name, typename):
if namespace_name not in self.type_namespaces_map:
return None
return self.type_namespaces_map[namespace_name].GetType(typename)
def GetNamespace(self, namespace_name):
return self.type_namespaces_map.get(namespace_name, None)
def GetNamespaces(self):
return list(self.type_namespaces_map.values())
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for folder in self.namespace_folder_map.values():
findings += folder.GetFindings(filter_old_warnings)
return findings
def _BuildTypeMaps(self, type_namespaces):
for type_namespace in type_namespaces:
self.type_namespaces_map[type_namespace.namespace] = type_namespace
for entity_type in type_namespace.valid_types_map.values():
if entity_type.uid:
if entity_type.uid in self.type_ids_map:
dup_id_entry = self.type_ids_map[entity_type.uid]
dup_id_type = self.GetEntityType(dup_id_entry.namespace,
dup_id_entry.typename)
if dup_id_type is None:
raise RuntimeError('Duplicate type with uid ' + entity_type.uid +
' should always be mapped')
entity_type.AddFinding(
findings_lib.DuplicateIdsError(type_namespace.namespace,
entity_type, dup_id_type))
dup_id_type.AddFinding(
findings_lib.DuplicateIdsError(dup_id_entry.namespace,
dup_id_type, entity_type))
self.type_ids_map[entity_type.uid] = EntityIdByEntry(
namespace=type_namespace.namespace, typename=entity_type.typename)
def _BuildNamespaceFolderMap(self, type_folders):
for folder in type_folders:
self.namespace_folder_map[folder.local_namespace.namespace] = folder
class EntityTypeFolder(config_folder_lib.ConfigFolder):
def __init__(self, folderpath, field_universe=None):
super(EntityTypeFolder, self).__init__(folderpath,
base_lib.ComponentType.ENTITY_TYPE)
self.local_namespace = TypeNamespace(self._namespace_name, field_universe)
def Finalize(self):
self.local_namespace.QualifyParentNames()
def _AddFromConfigHelper(self, document, context):
for type_name in document:
new_type = self._ConstructType(type_name, document[type_name],
context.filepath)
self._AddType(new_type)
def _ConstructField(self, local_field_names, optional, output_array):
for qualified_field_name in local_field_names:
field_ns, raw_field_name = field_lib.SplitFieldName(qualified_field_name)
std_field_name, increment = SeparateFieldIncrement(raw_field_name)
# Because we do explict existence checks and it will fail
# TODO(berkoben) refactor so validation happens in an order that
# prevents this logic lint
field_ns = self.local_namespace.GetQualifiedNamespace(
field_ns, std_field_name)
output_array.append(
OptWrapper(
field=FieldParts(
namespace=field_ns, field=std_field_name,
increment=increment),
optional=optional))
def _ConstructType(self, type_name, type_contents, filepath):
description = ''
parents = None
local_field_names = None
opt_local_field_names = None
is_abstract = False
is_canonical = False
uid = None
expected_keys = set([
'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'id',
'is_canonical'
])
if 'description' in type_contents:
description = type_contents['description']
if 'implements' in type_contents:
parents = type_contents['implements']
if 'uses' in type_contents:
local_field_names = type_contents['uses']
if 'opt_uses' in type_contents:
opt_local_field_names = type_contents['opt_uses']
if 'is_abstract' in type_contents:
is_abstract = type_contents['is_abstract']
if 'is_canonical' in type_contents:
is_canonical = type_contents['is_canonical']
if 'id' in type_contents:
uid = type_contents['id']
# Generate tuples to represent each field
fq_lfn = []
if local_field_names:
self._ConstructField(local_field_names, False, fq_lfn)
if opt_local_field_names:
self._ConstructField(opt_local_field_names, True, fq_lfn)
entity_type = EntityType(
filepath=filepath,
typename=type_name,
description=description,
parents=parents,
local_field_tuples=fq_lfn,
is_abstract=is_abstract,
inherited_fields_expanded=False,
is_canonical=is_canonical,
uid=uid,
namespace=self.local_namespace)
# Add errors to type if there's anything extra in the block. We add to the
for key in type_contents:
if key not in expected_keys:
entity_type.AddFinding(
findings_lib.UnrecognizedKeyError(key, entity_type.file_context))
return entity_type
def _AddType(self, entity_type):
if not entity_type.IsValid():
self.AddFindings(entity_type.GetFindings())
return False
return self.local_namespace.InsertType(entity_type)
class TypeNamespace(findings_lib.Findings):
def __init__(self, namespace, field_universe=None):
super(TypeNamespace, self).__init__()
self.namespace = namespace
self._field_universe = field_universe
self.valid_types_map = {}
self._parents_qualified = False
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for entity_type in self.valid_types_map.values():
findings += entity_type.GetFindings(filter_old_warnings)
return findings
def GetType(self, typename):
return self.valid_types_map.get(typename, None)
def InsertType(self, entity_type):
if self._parents_qualified:
raise RuntimeError('Cannot add types after Qualifying parents')
if self._ValidateFields(entity_type):
typename = entity_type.typename
mapped_entity_type = self.valid_types_map.get(typename)
if mapped_entity_type is None:
self.valid_types_map[typename] = entity_type
return True
self.AddFinding(
findings_lib.DuplicateEntityTypeDefinitionError(
self, entity_type, mapped_entity_type.file_context))
return False
return False
def GetQualifiedNamespace(self, field_ns, field_name):
if not field_ns and self.IsLocalField(field_name):
return self.namespace
return field_ns
def _BuildQualifiedParentTuple(self, parent_name):
namespace_name = self.namespace
split = parent_name.split('/')
if len(split) != 2:
if not self.GetType(parent_name):
namespace_name = ''
else:
namespace_name = split[0]
parent_name = split[1]
return TypeParts(namespace=namespace_name, typename=parent_name)
def QualifyParentNames(self):
if self._parents_qualified:
return
for entity_type in self.valid_types_map.values():
fq_tuplemap = {}
for parent in entity_type.unqualified_parent_names:
fq_tuple = self._BuildQualifiedParentTuple(parent)
fq_name = '{0}/{1}'.format(fq_tuple.namespace, fq_tuple.typename)
fq_tuplemap[fq_name] = fq_tuple
entity_type.parent_names = fq_tuplemap
self._parents_qualified = True
def IsLocalField(self, field_name):
if not self._field_universe:
return False
return self._field_universe.IsFieldDefined(field_name, self.namespace)
def _ValidateFields(self, entity):
if not self._field_universe:
return True
valid = True
for field_tuple in entity.local_field_names.values():
if not self._ValidateField(field_tuple.field, entity):
valid = False
return valid
def _ValidateField(self, field_tuple, entity):
if not self._field_universe.IsFieldDefined(field_tuple.field,
field_tuple.namespace):
self.AddFinding(
findings_lib.UndefinedFieldError(entity, field_tuple.field))
return False
return True
def BuildQualifiedField(opt_tuple):
field_tuple = opt_tuple.field
return '{0}/{1}{2}'.format(field_tuple.namespace, field_tuple.field,
field_tuple.increment)
class EntityType(findings_lib.Findings):
def __init__(self,
begin_line_number=0,
filepath='',
typename='',
description='',
parents=None,
local_field_tuples=None,
is_abstract=False,
inherited_fields_expanded=False,
is_canonical=False,
uid=None,
namespace=None):
super(EntityType, self).__init__()
self.file_context = findings_lib.FileContext(
begin_line_number=begin_line_number, filepath=filepath)
self.typename = typename
self.description = description
self.namespace = namespace
self.local_field_names = {}
local_field_names = []
if local_field_tuples:
local_field_names = [
BuildQualifiedField(opt_parts) for opt_parts in local_field_tuples
]
for i, lfn in enumerate(local_field_names):
self.local_field_names[lfn] = local_field_tuples[i]
self.inherited_field_names = {}
self.inherited_fields_expanded = inherited_fields_expanded
if parents is None:
parents = []
self.parent_names = None
self.parent_name_tuples = None
self.unqualified_parent_names = parents
self._all_fields = None
self._has_optional_fields = None
self.is_abstract = is_abstract
self.is_canonical = is_canonical
self.uid = uid
self._ValidateType(local_field_names)
def HasOptionalFields(self, run_unsafe=False):
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type has not been expanded')
if self._has_optional_fields is not None:
return self._has_optional_fields
fields = self.GetAllFields()
for field in fields.values():
if field.optional:
self._has_optional_fields = True
return self._has_optional_fields
self._has_optional_fields = False
return self._has_optional_fields
def GetAllFields(self, run_unsafe=False):
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type {0} has not been expanded'.format(self.typename))
if self._all_fields is None:
tmp = self.local_field_names.copy()
tmp.update(self.inherited_field_names)
if run_unsafe:
return tmp
self._all_fields = tmp
return self._all_fields
def HasFieldAsWritten(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> bool:
return self.GetFieldFromConfigText(fieldname_as_written,
run_unsafe) is not None
def GetFieldFromConfigText(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
try:
return self.GetField(fieldname_as_written, run_unsafe)
except TypeError:
pass
# Field is unqualified so it is either global or type-namespace-local
# Check for a locally defined field first using type's namespace
field = self._GetField(
self.namespace.namespace + '/' + fieldname_as_written, run_unsafe)
if not field:
field = self._GetField('/' + fieldname_as_written, run_unsafe)
return field
def HasField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> bool:
return self.GetField(fully_qualified_fieldname, run_unsafe) is not None
def GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
# Throws an error in the case that this isn't a fully qualified field
_, _ = SeparateFieldNamespace(fully_qualified_fieldname)
return self._GetField(fully_qualified_fieldname, run_unsafe)
def _GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
return self.GetAllFields(run_unsafe).get(fully_qualified_fieldname)
def _ValidateType(self, local_field_names):
if not self.typename:
self.AddFinding(findings_lib.MissingTypenameError(self))
elif not isinstance(self.typename, str):
self.AddFinding(
findings_lib.IllegalKeyTypeError(self.typename, self.file_context))
elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):
self.AddFinding(
findings_lib.InvalidTypenameError(self.typename, self.file_context))
if not self.description:
self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))
check_fields = set()
for field in local_field_names:
field_lower = field.lower()
if field_lower in check_fields:
self.AddFinding(findings_lib.DuplicateFieldError(self, field))
continue
check_fields.add(field_lower)
# (i.e. check for chirality, formatting. Could use actual Field objects)
# Check formatting of field name
if len(field.split('/')) > 2:
self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))
# Check for duplicate parent names.
parent_names_check = set()
for parent_name in self.unqualified_parent_names:
if parent_name in parent_names_check:
self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))
continue
parent_names_check.add(parent_name)
# Check formatting of parent name
if len(parent_name.split('/')) > 2:
self.AddFinding(
findings_lib.UnrecognizedParentFormatError(self, parent_name))
# Enforce that the inherited_fields_expanded field is not set
if self.inherited_fields_expanded:
self.AddFinding(findings_lib.InheritedFieldsSetError(self))
| true
| true
|
f70374938ed4795d168e995524aeabe6d04b8859
| 33
|
py
|
Python
|
cfail/check.py
|
Seanny123/coala-fail
|
c45e9986e2a46de93f73ad7d1edbc86a609bbe28
|
[
"MIT"
] | null | null | null |
cfail/check.py
|
Seanny123/coala-fail
|
c45e9986e2a46de93f73ad7d1edbc86a609bbe28
|
[
"MIT"
] | null | null | null |
cfail/check.py
|
Seanny123/coala-fail
|
c45e9986e2a46de93f73ad7d1edbc86a609bbe28
|
[
"MIT"
] | null | null | null |
# a = 2
print("check this file")
| 11
| 24
| 0.606061
|
print("check this file")
| true
| true
|
f70375438cbfcec1b0797a36cf6bd37f200b244f
| 540
|
py
|
Python
|
realtimeplt/outputs/__init__.py
|
Aryan05/realtimeplt
|
e04c09550fece33df03aaf6f340d0405d77d6443
|
[
"MIT"
] | 3
|
2020-09-28T10:08:56.000Z
|
2020-10-10T00:30:05.000Z
|
realtimeplt/outputs/__init__.py
|
Aryan05/realtimeplt
|
e04c09550fece33df03aaf6f340d0405d77d6443
|
[
"MIT"
] | 1
|
2020-09-30T16:15:31.000Z
|
2020-09-30T16:15:42.000Z
|
realtimeplt/outputs/__init__.py
|
Aryan05/realtimeplt
|
e04c09550fece33df03aaf6f340d0405d77d6443
|
[
"MIT"
] | 1
|
2020-10-10T00:36:19.000Z
|
2020-10-10T00:36:19.000Z
|
# technical
from .base_output import BaseOutput
# default
from .matplotlib_plot import MatplotlibPlot
from .extrema_printer import ExtremaPrinter
# with external dependencies
# import are respective __init__ methods
# hack-ish, but works (and I am not aware of a more proper way to do so)
from .bokeh_plot import BokehPlot
from .neptune_logger import NeptuneLogger
from .tensorboard_logger import TensorboardLogger
from .tensorboard_tf_logger import TensorboardTFLogger
# with external dependencies
from . import matplotlib_subplots
| 24.545455
| 72
| 0.82963
|
from .base_output import BaseOutput
from .matplotlib_plot import MatplotlibPlot
from .extrema_printer import ExtremaPrinter
from .bokeh_plot import BokehPlot
from .neptune_logger import NeptuneLogger
from .tensorboard_logger import TensorboardLogger
from .tensorboard_tf_logger import TensorboardTFLogger
from . import matplotlib_subplots
| true
| true
|
f7037640bf3a851ac42c1c322ba7e2f5f9dc9b00
| 4,367
|
py
|
Python
|
test/functional/wallet-hd.py
|
danigarcia3k/Sikacoin
|
82ef600cfeed1d8f163df6071d8d080fb9dac6b0
|
[
"MIT"
] | null | null | null |
test/functional/wallet-hd.py
|
danigarcia3k/Sikacoin
|
82ef600cfeed1d8f163df6071d8d080fb9dac6b0
|
[
"MIT"
] | null | null | null |
test/functional/wallet-hd.py
|
danigarcia3k/Sikacoin
|
82ef600cfeed1d8f163df6071d8d080fb9dac6b0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Sikacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import SikacoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
class WalletHDTest(SikacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(tmpdir + "/node1/regtest/blocks")
shutil.rmtree(tmpdir + "/node1/regtest/chainstate")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| 41.198113
| 105
| 0.642317
|
from test_framework.test_framework import SikacoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
class WalletHDTest(SikacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'")
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'")
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
shutil.rmtree(tmpdir + "/node1/regtest/blocks")
shutil.rmtree(tmpdir + "/node1/regtest/chainstate")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.start_node(1)
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| true
| true
|
f70377c8ead6d814b75810083ddb336ecbc41f22
| 8,307
|
py
|
Python
|
commons/siam_mask/experiments/siammask_sharp/resnet.py
|
dajes/labelfficient
|
5dd0566224fb04285e690bf8576eacc04a7c87cd
|
[
"MIT"
] | 4
|
2021-02-09T08:18:18.000Z
|
2022-02-26T03:18:26.000Z
|
commons/siam_mask/experiments/siammask_sharp/resnet.py
|
dajes/labelfficient
|
5dd0566224fb04285e690bf8576eacc04a7c87cd
|
[
"MIT"
] | 1
|
2022-02-26T03:15:34.000Z
|
2022-02-26T03:20:27.000Z
|
commons/siam_mask/experiments/siammask_sharp/resnet.py
|
dajes/labelfficient
|
5dd0566224fb04285e690bf8576eacc04a7c87cd
|
[
"MIT"
] | 2
|
2021-06-06T16:39:57.000Z
|
2021-06-06T16:48:32.000Z
|
import torch.nn as nn
import torch
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
from commons.siam_mask.models.features import Features
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# padding = (2 - stride) + (dilation // 2 - 1)
padding = 2 - stride
assert stride==1 or dilation==1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, layer4=False, layer3=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 31x31, 15x15
self.feature_size = 128 * block.expansion
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x:x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x:x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
# layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
p0 = self.relu(x)
x = self.maxpool(p0)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
return p0, p1, p2, p3
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
net = resnet50()
print(net)
net = net.cuda()
var = torch.FloatTensor(1,3,127,127).cuda()
var = Variable(var)
net(var)
print('*************')
var = torch.FloatTensor(1,3,255,255).cuda()
var = Variable(var)
net(var)
| 31.706107
| 100
| 0.589142
|
import torch.nn as nn
import torch
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
from commons.siam_mask.models.features import Features
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
padding = 2 - stride
assert stride==1 or dilation==1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, layer4=False, layer3=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.feature_size = 128 * block.expansion
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x:x
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x:x
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
p0 = self.relu(x)
x = self.maxpool(p0)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
return p0, p1, p2, p3
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
net = resnet50()
print(net)
net = net.cuda()
var = torch.FloatTensor(1,3,127,127).cuda()
var = Variable(var)
net(var)
print('*************')
var = torch.FloatTensor(1,3,255,255).cuda()
var = Variable(var)
net(var)
| true
| true
|
f70378b1e6899af4a8706e20c4f8edf6329a186a
| 2,796
|
py
|
Python
|
tflib/distribute/distribute.py
|
AlexBlack2202/EigenGAN-Tensorflow
|
9668738852abdcd7161b64b7e6a074c7ebfea055
|
[
"MIT"
] | 302
|
2021-04-27T02:15:47.000Z
|
2022-03-13T07:51:07.000Z
|
tflib/distribute/distribute.py
|
gokulsg/EigenGAN-Tensorflow
|
86b21a47a824a2bb04a088c3e78b03d03a53735c
|
[
"MIT"
] | 7
|
2021-05-26T05:44:46.000Z
|
2021-12-28T02:38:47.000Z
|
tflib/distribute/distribute.py
|
gokulsg/EigenGAN-Tensorflow
|
86b21a47a824a2bb04a088c3e78b03d03a53735c
|
[
"MIT"
] | 34
|
2021-04-27T02:16:04.000Z
|
2022-01-28T12:18:17.000Z
|
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = get_available_gpus
def split_nest(nest, num_or_size_splits, axis=0):
"""Split nested structure.
Examples
--------
>>> split_nest({'a': shape(10, 20), 'b': shape(4, 15)}, 2, axis=0)
>>> [{'a': shape(5, 20), 'b': shape(2, 15)}, {'a': shape(5, 20), 'b': shape(2, 15)}]
"""
flatten = tf.nest.flatten(nest)
split_flatten = [tf.split(x, num_or_size_splits, axis=axis) for x in flatten]
return [tf.nest.pack_sequence_as(nest, x) for x in zip(*split_flatten)]
def parameter_server_strategy_run(devices, fn, split_args, split_kwargs=None):
split_kwargs = [{}] * len(devices) if split_kwargs is None else split_kwargs
assert len(devices) == len(split_args) == len(split_kwargs)
split_returns = []
for device, args, kwargs in zip(devices, split_args, split_kwargs):
with tf.device(device):
args = args if isinstance(args, (list, tuple)) else (args,)
split_returns.append(fn(*args, **kwargs))
return split_returns
parellel_run = parameter_server_strategy_run
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Parameters
----------
tower_grads:
List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns
-------
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| 33.285714
| 88
| 0.655222
|
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = get_available_gpus
def split_nest(nest, num_or_size_splits, axis=0):
flatten = tf.nest.flatten(nest)
split_flatten = [tf.split(x, num_or_size_splits, axis=axis) for x in flatten]
return [tf.nest.pack_sequence_as(nest, x) for x in zip(*split_flatten)]
def parameter_server_strategy_run(devices, fn, split_args, split_kwargs=None):
split_kwargs = [{}] * len(devices) if split_kwargs is None else split_kwargs
assert len(devices) == len(split_args) == len(split_kwargs)
split_returns = []
for device, args, kwargs in zip(devices, split_args, split_kwargs):
with tf.device(device):
args = args if isinstance(args, (list, tuple)) else (args,)
split_returns.append(fn(*args, **kwargs))
return split_returns
parellel_run = parameter_server_strategy_run
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| true
| true
|
f7037a2ff7ed82e1cd626b834ee5784390b3252e
| 4,466
|
py
|
Python
|
test.py
|
pyEtherCAT/Test-Source
|
32e7f36873cf311580acc25ab76db589e209e479
|
[
"MIT"
] | 9
|
2018-09-16T01:00:08.000Z
|
2021-04-21T03:08:38.000Z
|
test.py
|
pyEtherCAT/Test-Source
|
32e7f36873cf311580acc25ab76db589e209e479
|
[
"MIT"
] | null | null | null |
test.py
|
pyEtherCAT/Test-Source
|
32e7f36873cf311580acc25ab76db589e209e479
|
[
"MIT"
] | 5
|
2018-05-24T08:41:45.000Z
|
2020-09-26T12:04:16.000Z
|
# import gevent.monkey
# gevent.monkey.patch_socket()
from pyEtherCAT import MasterEtherCAT
import time
import os
#============================================================================#
# C95用の簡易EtherCATパッケージです。
# 本来は細かいパケットに付いて理解を深めた上で仕組みを構築していきますが、
# 説明も実験も追いつかず、ひとまずGPIOで高速にON/OFF出来る部分だけを纏めました。
# 動作は Linux(RaspberryPi含む) にて Python3 で動作します。
# sudo python3 test03.py
#============================================================================#
# ここから簡易ライブラリ
#============================================================================#
def EtherCAT_Init(nic):
cat = MasterEtherCAT.MasterEtherCAT(nic) # ネットワークカードのアドレスを記載
return cat
def EtherCAT_SetUp(cat):
cat.EEPROM_SetUp(cat.ADP) # EEPROMの設定、特に変更不要
cat.EEPROM_Stasus(enable=0x00, command=0x04) # EEPROMの設定、特に変更不要
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0004 # 4h: 安全動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0008 # 8h: 動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIOMode(cat, data):
ADDR = 0x0F00 # デジタル I/O 出力データレジスタ
# data = 0x00FF # 出力データ
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIO_Out(cat, data):
ADDR = 0x0F10
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[data & 0xFF, (data >> 8) & 0xFF])
#(DATA,WKC) = cat.socket_read()
#============================================================================#
# ここまで 簡易ライブラリ
#============================================================================#
def main():
cat = EtherCAT_Init("eth0") # EtherCATのネットワーク初期設定
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 # PCから1台目は0、2台目以降は-1していく
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 1 # 例 これは2台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 2 # 例 これは3台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
# -- 1台目のLEDをシフトする
TIME = 0.1
cat.ADP = 0x0000
flag = 0
CNT = 0
try:
while 1:
# time.sleep(TIME)
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
# for i in range(16):
# time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0001<<i);
# for i in range(3):
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0000);
# for i in range(0xFFFF):
# EtherCAT_GPIO_Out(cat,i);
except KeyboardInterrupt:
EtherCAT_GPIO_Out(cat, 0x0000)
print("")
print("End.")
if __name__ == "__main__":
main()
| 34.620155
| 85
| 0.537841
|
from pyEtherCAT import MasterEtherCAT
import time
import os
def EtherCAT_Init(nic):
cat = MasterEtherCAT.MasterEtherCAT(nic) return cat
def EtherCAT_SetUp(cat):
cat.EEPROM_SetUp(cat.ADP) cat.EEPROM_Stasus(enable=0x00, command=0x04) ADDR = 0x0120 data = 0x0002 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 data = 0x0002 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 data = 0x0004 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 data = 0x0008 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIOMode(cat, data):
ADDR = 0x0F00 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIO_Out(cat, data):
ADDR = 0x0F10
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[data & 0xFF, (data >> 8) & 0xFF])
def main():
cat = EtherCAT_Init("eth0") cat.ADP = 0x0000 EtherCAT_SetUp(cat) EtherCAT_GPIOMode(cat, 0xFFFF)
cat.ADP = 0x0000 - 1 EtherCAT_SetUp(cat) EtherCAT_GPIOMode(cat, 0xFFFF)
cat.ADP = 0x0000 - 2 EtherCAT_SetUp(cat) EtherCAT_GPIOMode(cat, 0xFFFF)
TIME = 0.1
cat.ADP = 0x0000
flag = 0
CNT = 0
try:
while 1:
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
except KeyboardInterrupt:
EtherCAT_GPIO_Out(cat, 0x0000)
print("")
print("End.")
if __name__ == "__main__":
main()
| true
| true
|
f7037afae16524014eb92d458ac6b7aedaa8b9b9
| 1,107
|
py
|
Python
|
porcupine/__init__.py
|
Akuli/editor
|
cf98c538e75a07d825f9066e25a3752fdf7c3c29
|
[
"MIT"
] | 1
|
2021-07-28T10:09:26.000Z
|
2021-07-28T10:09:26.000Z
|
porcupine/__init__.py
|
Akuli/editor
|
cf98c538e75a07d825f9066e25a3752fdf7c3c29
|
[
"MIT"
] | null | null | null |
porcupine/__init__.py
|
Akuli/editor
|
cf98c538e75a07d825f9066e25a3752fdf7c3c29
|
[
"MIT"
] | null | null | null |
"""Porcupine is a simple editor.
You are probably reading this because you want to learn how Porcupine
works or write fun plugins for it. I recommend getting started with the
plugin API documentation:
https://akuli.github.io/porcupine/
"""
import sys
import appdirs
version_info = (0, 99, 2) # this is updated with scripts/release.py
__version__ = "%d.%d.%d" % version_info
__author__ = "Akuli"
__copyright__ = "Copyright (c) 2017-2022 Akuli"
__license__ = "MIT"
if sys.platform in {"win32", "darwin"}:
# these platforms like path names like "Program Files" or "Application Support"
dirs = appdirs.AppDirs("Porcupine", "Akuli")
else:
dirs = appdirs.AppDirs("porcupine", "akuli")
# Must be after creating dirs
from porcupine import _state
# TODO: document get_*_panedwindow
get_main_window = _state.get_main_window
get_parsed_args = _state.get_parsed_args
get_horizontal_panedwindow = _state.get_horizontal_panedwindow
get_vertical_panedwindow = _state.get_vertical_panedwindow
get_tab_manager = _state.get_tab_manager
filedialog_kwargs = _state.filedialog_kwargs
quit = _state.quit
| 29.918919
| 83
| 0.775068
|
import sys
import appdirs
version_info = (0, 99, 2) __version__ = "%d.%d.%d" % version_info
__author__ = "Akuli"
__copyright__ = "Copyright (c) 2017-2022 Akuli"
__license__ = "MIT"
if sys.platform in {"win32", "darwin"}:
dirs = appdirs.AppDirs("Porcupine", "Akuli")
else:
dirs = appdirs.AppDirs("porcupine", "akuli")
from porcupine import _state
get_main_window = _state.get_main_window
get_parsed_args = _state.get_parsed_args
get_horizontal_panedwindow = _state.get_horizontal_panedwindow
get_vertical_panedwindow = _state.get_vertical_panedwindow
get_tab_manager = _state.get_tab_manager
filedialog_kwargs = _state.filedialog_kwargs
quit = _state.quit
| true
| true
|
f7037b3a3daadb1935f85a9a60ff3076e2dc7967
| 446
|
py
|
Python
|
loglizer/models/__init__.py
|
paulinko/loglizer
|
d3910370fa6752ef77232bef79ddb504ef245a9d
|
[
"MIT"
] | null | null | null |
loglizer/models/__init__.py
|
paulinko/loglizer
|
d3910370fa6752ef77232bef79ddb504ef245a9d
|
[
"MIT"
] | null | null | null |
loglizer/models/__init__.py
|
paulinko/loglizer
|
d3910370fa6752ef77232bef79ddb504ef245a9d
|
[
"MIT"
] | null | null | null |
from .PCA import PCA
from .InvariantsMiner import InvariantsMiner
from .LogClustering import LogClustering
from .LR import LR
from .SVM import SVM
from .DecisionTree import DecisionTree
from .IsolationForest import IsolationForest
from .DeepLog import DeepLog
from .Autoencoder import Autoencoder
from .AutoencoderLSTM import AutoencoderLSTM
from .AutoencoderCascade import AutoencoderCascade
from .AutoencoderConvolutional import AutoencoderConv
| 37.166667
| 53
| 0.867713
|
from .PCA import PCA
from .InvariantsMiner import InvariantsMiner
from .LogClustering import LogClustering
from .LR import LR
from .SVM import SVM
from .DecisionTree import DecisionTree
from .IsolationForest import IsolationForest
from .DeepLog import DeepLog
from .Autoencoder import Autoencoder
from .AutoencoderLSTM import AutoencoderLSTM
from .AutoencoderCascade import AutoencoderCascade
from .AutoencoderConvolutional import AutoencoderConv
| true
| true
|
f7037c6d6ec327ebf3ef4a26aa0079107e008b3c
| 1,165
|
py
|
Python
|
semantic_aware_models/models/classification/abstract_classifier.py
|
ITAINNOVA/SAME
|
d46dda98753fcb3606e04c3db2d20c9e700140e8
|
[
"OML"
] | null | null | null |
semantic_aware_models/models/classification/abstract_classifier.py
|
ITAINNOVA/SAME
|
d46dda98753fcb3606e04c3db2d20c9e700140e8
|
[
"OML"
] | null | null | null |
semantic_aware_models/models/classification/abstract_classifier.py
|
ITAINNOVA/SAME
|
d46dda98753fcb3606e04c3db2d20c9e700140e8
|
[
"OML"
] | 1
|
2020-03-19T12:41:54.000Z
|
2020-03-19T12:41:54.000Z
|
import abc
class AbstractClassifier:
""" Abstract class with specific methods for classifier models (training, validation and test) """
def __init__(self):
pass
@abc.abstractmethod
def train(self, config, train_data):
"""
Classifier training.
:param config: Model configuration.
:param train_data: Train dataset with the textual information of each item and its label.
:return: A model trained with train_data according to config.
"""
pass
@abc.abstractmethod
def validation(self, config, val_data):
"""
:param config: Model configuration.
:param val_data: Validation dataset with the textual information of each item and its label.
:return: Validation metrics
"""
pass
@abc.abstractmethod
def test(self, config, test_data):
"""
Classifier testing.
:param config: Model configuration.
:param test_data: Test dataset with the textual information of each item and its label.
:return: Predictions of the model in the test_data, according to config.
"""
pass
| 27.738095
| 102
| 0.64206
|
import abc
class AbstractClassifier:
def __init__(self):
pass
@abc.abstractmethod
def train(self, config, train_data):
pass
@abc.abstractmethod
def validation(self, config, val_data):
pass
@abc.abstractmethod
def test(self, config, test_data):
pass
| true
| true
|
f7037dd7dfb7d354c6a820cd68cc1823c6415a56
| 268
|
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_7/fix_purchase_receipt_status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_7/fix_purchase_receipt_status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_7/fix_purchase_receipt_status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
import frappe
def execute():
# there is no more status called "Submitted", there was an old issue that used
# to set it as Submitted, fixed in this commit
frappe.db.sql("""
update
`tabPurchase Receipt`
set
status = 'To Bill'
where
status = 'Submitted'""")
| 22.333333
| 79
| 0.697761
|
import frappe
def execute():
frappe.db.sql("""
update
`tabPurchase Receipt`
set
status = 'To Bill'
where
status = 'Submitted'""")
| true
| true
|
f7037e199865a1b95556700d0a7781ddb980ced3
| 67,271
|
py
|
Python
|
tests/store/model_registry/test_sqlalchemy_store.py
|
abhiramr/mlflow
|
2bbdc20f2d90d551fb7d40f982f2f799da9feca8
|
[
"Apache-2.0"
] | null | null | null |
tests/store/model_registry/test_sqlalchemy_store.py
|
abhiramr/mlflow
|
2bbdc20f2d90d551fb7d40f982f2f799da9feca8
|
[
"Apache-2.0"
] | null | null | null |
tests/store/model_registry/test_sqlalchemy_store.py
|
abhiramr/mlflow
|
2bbdc20f2d90d551fb7d40f982f2f799da9feca8
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
import tempfile
from unittest import mock
import uuid
import mlflow
import mlflow.db
import mlflow.store.db.base_sql_model
from mlflow.entities.model_registry import (
RegisteredModel,
ModelVersion,
RegisteredModelTag,
ModelVersionTag,
)
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
ErrorCode,
RESOURCE_DOES_NOT_EXIST,
INVALID_PARAMETER_VALUE,
RESOURCE_ALREADY_EXISTS,
)
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
from tests.helper_functions import random_str
DB_URI = "sqlite:///"
class TestSqlAlchemyStoreSqlite(unittest.TestCase):
def _get_store(self, db_uri=""):
return SqlAlchemyStore(db_uri)
def setUp(self):
self.maxDiff = None # print all differences on assert failures
fd, self.temp_dbfile = tempfile.mkstemp()
# Close handle immediately so that we can remove the file later on in Windows
os.close(fd)
self.db_url = "%s%s" % (DB_URI, self.temp_dbfile)
self.store = self._get_store(self.db_url)
def tearDown(self):
mlflow.store.db.base_sql_model.Base.metadata.drop_all(self.store.engine)
os.remove(self.temp_dbfile)
def _rm_maker(self, name, tags=None, description=None):
return self.store.create_registered_model(name, tags, description)
def _mv_maker(
self,
name,
source="path/to/source",
run_id=uuid.uuid4().hex,
tags=None,
run_link=None,
description=None,
):
return self.store.create_model_version(
name, source, run_id, tags, run_link=run_link, description=description
)
def _extract_latest_by_stage(self, latest_versions):
return {mvd.current_stage: mvd.version for mvd in latest_versions}
def test_create_registered_model(self):
name = random_str() + "abCD"
rm1 = self._rm_maker(name)
self.assertEqual(rm1.name, name)
self.assertEqual(rm1.description, None)
# error on duplicate
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={name}\) already exists"
) as exception_context:
self._rm_maker(name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
# slightly different name is ok
for name2 in [name + "extra", name.lower(), name.upper(), name + name]:
rm2 = self._rm_maker(name2)
self.assertEqual(rm2.name, name2)
# test create model with tags
name2 = random_str() + "tags"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
rm2 = self._rm_maker(name2, tags)
rmd2 = self.store.get_registered_model(name2)
self.assertEqual(rm2.name, name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(rmd2.name, name2)
self.assertEqual(rmd2.tags, {tag.key: tag.value for tag in tags})
# create with description
name3 = random_str() + "-description"
description = "the best model ever"
rm3 = self._rm_maker(name3, description=description)
rmd3 = self.store.get_registered_model(name3)
self.assertEqual(rm3.name, name3)
self.assertEqual(rm3.description, description)
self.assertEqual(rmd3.name, name3)
self.assertEqual(rmd3.description, description)
# invalid model name will fail
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker(None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker("")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_get_registered_model(self):
name = "model_1"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
# use fake clock
with mock.patch("time.time") as mock_time:
mock_time.return_value = 1234
rm = self._rm_maker(name, tags)
self.assertEqual(rm.name, name)
rmd = self.store.get_registered_model(name=name)
self.assertEqual(rmd.name, name)
self.assertEqual(rmd.creation_timestamp, 1234000)
self.assertEqual(rmd.last_updated_timestamp, 1234000)
self.assertEqual(rmd.description, None)
self.assertEqual(rmd.latest_versions, [])
self.assertEqual(rmd.tags, {tag.key: tag.value for tag in tags})
def test_update_registered_model(self):
name = "model_for_update_RM"
rm1 = self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rm1.name, name)
self.assertEqual(rmd1.description, None)
# update description
rm2 = self.store.update_registered_model(name=name, description="test model")
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(rm2.name, "model_for_update_RM")
self.assertEqual(rmd2.name, "model_for_update_RM")
self.assertEqual(rmd2.description, "test model")
def test_rename_registered_model(self):
original_name = "original name"
new_name = "new name"
self._rm_maker(original_name)
self._mv_maker(original_name)
self._mv_maker(original_name)
rm = self.store.get_registered_model(original_name)
mv1 = self.store.get_model_version(original_name, 1)
mv2 = self.store.get_model_version(original_name, 2)
self.assertEqual(rm.name, original_name)
self.assertEqual(mv1.name, original_name)
self.assertEqual(mv2.name, original_name)
# test renaming registered model also updates its model versions
self.store.rename_registered_model(original_name, new_name)
rm = self.store.get_registered_model(new_name)
mv1 = self.store.get_model_version(new_name, 1)
mv2 = self.store.get_model_version(new_name, 2)
self.assertEqual(rm.name, new_name)
self.assertEqual(mv1.name, new_name)
self.assertEqual(mv2.name, new_name)
# test accessing the model with the old name will fail
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={original_name} not found"
) as exception_context:
self.store.get_registered_model(original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test name another model with the replaced name is ok
self._rm_maker(original_name)
# cannot rename model to conflict with an existing model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={original_name}\) already exists"
) as exception_context:
self.store.rename_registered_model(new_name, original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
# invalid model name will fail
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, "")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model(self):
name = "model_for_delete_RM"
self._rm_maker(name)
self._mv_maker(name)
rm1 = self.store.get_registered_model(name=name)
mv1 = self.store.get_model_version(name, 1)
self.assertEqual(rm1.name, name)
self.assertEqual(mv1.name, name)
# delete model
self.store.delete_registered_model(name=name)
# cannot get model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.get_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot update a delete model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.update_registered_model(name=name, description="deleted")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot delete it again
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.delete_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# model versions are cascade deleted with the registered model
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name}, version=1\) not found"
) as exception_context:
self.store.get_model_version(name, 1)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def _list_registered_models(self, page_token=None, max_results=10):
result = self.store.list_registered_models(max_results, page_token)
for idx in range(len(result)):
result[idx] = result[idx].name
return result
def test_list_registered_model(self):
self._rm_maker("A")
registered_models = self.store.list_registered_models(max_results=10, page_token=None)
self.assertEqual(len(registered_models), 1)
self.assertEqual(registered_models[0].name, "A")
self.assertIsInstance(registered_models[0], RegisteredModel)
self._rm_maker("B")
self.assertEqual(set(self._list_registered_models()), set(["A", "B"]))
self._rm_maker("BB")
self._rm_maker("BA")
self._rm_maker("AB")
self._rm_maker("BBC")
self.assertEqual(
set(self._list_registered_models()), set(["A", "B", "BB", "BA", "AB", "BBC"])
)
# list should not return deleted models
self.store.delete_registered_model(name="BA")
self.store.delete_registered_model(name="B")
self.assertEqual(set(self._list_registered_models()), set(["A", "BB", "AB", "BBC"]))
def test_list_registered_model_paginated_last_page(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test flow with fixed max_results
returned_rms = []
result = self._list_registered_models(page_token=None, max_results=25)
returned_rms.extend(result)
while result.token:
result = self._list_registered_models(page_token=result.token, max_results=25)
self.assertEqual(len(result), 25)
returned_rms.extend(result)
self.assertEqual(result.token, None)
self.assertEqual(set(rms), set(returned_rms))
def test_list_registered_model_paginated_returns_in_correct_order(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test that pagination will return all valid results in sorted order
# by name ascending
result = self._list_registered_models(max_results=5)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[0:5])
result = self._list_registered_models(page_token=result.token, max_results=10)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[5:15])
result = self._list_registered_models(page_token=result.token, max_results=20)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[15:35])
result = self._list_registered_models(page_token=result.token, max_results=100)
# assert that page token is None
self.assertEqual(result.token, None)
self.assertEqual(result, rms[35:])
def test_list_registered_model_paginated_errors(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test that providing a completely invalid page token throws
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that providing too large of a max_results throws
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# list should not return deleted models
self.store.delete_registered_model(name="RM{0:03}".format(0))
self.assertEqual(set(self._list_registered_models(max_results=100)), set(rms[1:]))
def test_get_latest_versions(self):
name = "test_for_latest_versions"
self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rmd1.latest_versions, [])
mv1 = self._mv_maker(name)
self.assertEqual(mv1.version, 1)
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(self._extract_latest_by_stage(rmd2.latest_versions), {"None": 1})
# add a bunch more
mv2 = self._mv_maker(name)
self.assertEqual(mv2.version, 2)
self.store.transition_model_version_stage(
name=mv2.name, version=mv2.version, stage="Production", archive_existing_versions=False
)
mv3 = self._mv_maker(name)
self.assertEqual(mv3.version, 3)
self.store.transition_model_version_stage(
name=mv3.name, version=mv3.version, stage="Production", archive_existing_versions=False
)
mv4 = self._mv_maker(name)
self.assertEqual(mv4.version, 4)
self.store.transition_model_version_stage(
name=mv4.name, version=mv4.version, stage="Staging", archive_existing_versions=False
)
# test that correct latest versions are returned for each stage
rmd4 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd4.latest_versions),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=[])),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 3},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["production"])
),
{"Production": 3},
) # The stages are case insensitive.
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["pROduction"])
),
{"Production": 3},
) # The stages are case insensitive.
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["None", "Production"])
),
{"None": 1, "Production": 3},
)
# delete latest Production, and should point to previous one
self.store.delete_model_version(name=mv3.name, version=mv3.version)
rmd5 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd5.latest_versions),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 2},
)
def test_set_registered_model_tag(self):
name1 = "SetRegisteredModelTag_TestMod"
name2 = "SetRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
rm1 = self.store.get_registered_model(name=name1)
all_tags = initial_tags + [new_tag]
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
# test overriding a tag with the same key
overriding_tag = RegisteredModelTag("key", "overriding")
self.store.set_registered_model_tag(name1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
# does not affect other models with the same key
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
# can not set tag on deleted (non-existed) registered model
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.set_registered_model_tag(name1, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test cannot set tags that are too long
long_tag = RegisteredModelTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Registered model value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_registered_model_tag(name2, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test can set tags that are somewhat long
long_tag = RegisteredModelTag("longTagKey", "a" * 4999)
self.store.set_registered_model_tag(name2, long_tag)
# can not set invalid tag
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_registered_model_tag(name2, RegisteredModelTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_registered_model_tag(None, RegisteredModelTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model_tag(self):
name1 = "DeleteRegisteredModelTag_TestMod"
name2 = "DeleteRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
self.store.delete_registered_model_tag(name1, "randomTag")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in initial_tags})
# testing deleting a key does not affect other models with the same key
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
# delete tag that is already deleted does nothing
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
# can not delete tag on deleted (non-existed) registered model
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.delete_registered_model_tag(name1, "anotherKey")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# can not delete tag with invalid key
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_registered_model_tag(name2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_registered_model_tag(None, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_create_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
run_id = uuid.uuid4().hex
with mock.patch("time.time") as mock_time:
mock_time.return_value = 456778
mv1 = self._mv_maker(name, "a/b/CD", run_id)
self.assertEqual(mv1.name, name)
self.assertEqual(mv1.version, 1)
mvd1 = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
self.assertEqual(mvd1.creation_timestamp, 456778000)
self.assertEqual(mvd1.last_updated_timestamp, 456778000)
self.assertEqual(mvd1.description, None)
self.assertEqual(mvd1.source, "a/b/CD")
self.assertEqual(mvd1.run_id, run_id)
self.assertEqual(mvd1.status, "READY")
self.assertEqual(mvd1.status_message, None)
self.assertEqual(mvd1.tags, {})
# new model versions for same name autoincrement versions
mv2 = self._mv_maker(name)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mv2.version, 2)
self.assertEqual(mvd2.version, 2)
# create model version with tags return model version entity with tags
tags = [ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value")]
mv3 = self._mv_maker(name, tags=tags)
mvd3 = self.store.get_model_version(name=mv3.name, version=mv3.version)
self.assertEqual(mv3.version, 3)
self.assertEqual(mv3.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(mvd3.version, 3)
self.assertEqual(mvd3.tags, {tag.key: tag.value for tag in tags})
# create model versions with runLink
run_link = "http://localhost:3000/path/to/run/"
mv4 = self._mv_maker(name, run_link=run_link)
mvd4 = self.store.get_model_version(name, mv4.version)
self.assertEqual(mv4.version, 4)
self.assertEqual(mv4.run_link, run_link)
self.assertEqual(mvd4.version, 4)
self.assertEqual(mvd4.run_link, run_link)
# create model version with description
description = "the best model ever"
mv5 = self._mv_maker(name, description=description)
mvd5 = self.store.get_model_version(name, mv5.version)
self.assertEqual(mv5.version, 5)
self.assertEqual(mv5.description, description)
self.assertEqual(mvd5.version, 5)
self.assertEqual(mvd5.description, description)
# create model version without runId
mv6 = self._mv_maker(name, run_id=None)
mvd6 = self.store.get_model_version(name, mv6.version)
self.assertEqual(mv6.version, 6)
self.assertEqual(mv6.run_id, None)
self.assertEqual(mvd6.version, 6)
self.assertEqual(mvd6.run_id, None)
def test_update_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
# update stage
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="Production", archive_existing_versions=False
)
mvd2 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd2.name, name)
self.assertEqual(mvd2.version, 1)
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd2.description, None)
# update description
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="test model version"
)
mvd3 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd3.name, name)
self.assertEqual(mvd3.version, 1)
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd3.description, "test model version")
# only valid stages can be set
with self.assertRaisesRegex(
MlflowException, r"Invalid Model Version stage unknown"
) as exception_context:
self.store.transition_model_version_stage(
mv1.name, mv1.version, stage="unknown", archive_existing_versions=False
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# stages are case-insensitive and auto-corrected to system stage names
for stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(
name=mv1.name,
version=mv1.version,
stage=stage_name,
archive_existing_versions=False,
)
mvd5 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd5.current_stage, "Staging")
def test_transition_model_version_stage_when_archive_existing_versions_is_false(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
# test that when `archive_existing_versions` is False, transitioning a model version
# to the inactive stages ("Archived" and "None") does not throw.
for stage in ["Archived", "None"]:
self.store.transition_model_version_stage(name, mv1.version, stage, False)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.store.transition_model_version_stage(name, mv3.version, "Production", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Production")
def test_transition_model_version_stage_when_archive_existing_versions_is_true(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
msg = (
r"Model version transition cannot archive existing model versions "
r"because .+ is not an Active stage"
)
# test that when `archive_existing_versions` is True, transitioning a model version
# to the inactive stages ("Archived" and "None") throws.
for stage in ["Archived", "None"]:
with self.assertRaisesRegex(MlflowException, msg):
self.store.transition_model_version_stage(name, mv1.version, stage, True)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.assertEqual(mvd1.last_updated_timestamp, mvd3.last_updated_timestamp)
self.store.transition_model_version_stage(name, mv3.version, "Production", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Archived")
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd2.last_updated_timestamp, mvd3.last_updated_timestamp)
for uncanonical_stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(mv1.name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(mv2.name, mv2.version, "None", False)
# stage names are case-insensitive and auto-corrected to system stage names
self.store.transition_model_version_stage(
mv2.name, mv2.version, uncanonical_stage_name, True
)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Staging")
def test_delete_model_version(self):
name = "test_for_delete_MV"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name)
mv = self._mv_maker(name, tags=initial_tags)
mvd = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd.name, name)
self.store.delete_model_version(name=mv.name, version=mv.version)
# cannot get a deleted model version
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot update a delete
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.update_model_version(mv.name, mv.version, description="deleted!")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot delete it again
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.delete_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_delete_model_version_redaction(self):
name = "test_for_delete_MV_redaction"
run_link = "http://localhost:5000/path/to/run"
run_id = "12345"
source = "path/to/source"
self._rm_maker(name)
mv = self._mv_maker(name, source=source, run_id=run_id, run_link=run_link)
mvd = self.store.get_model_version(name=name, version=mv.version)
self.assertEqual(mvd.run_link, run_link)
self.assertEqual(mvd.run_id, run_id)
self.assertEqual(mvd.source, source)
# delete the MV now
self.store.delete_model_version(name, mv.version)
# verify that the relevant fields are redacted
mvd_deleted = self.store._get_sql_model_version_including_deleted(
name=name, version=mv.version
)
self.assertIn("REDACTED", mvd_deleted.run_link)
self.assertIn("REDACTED", mvd_deleted.source)
self.assertIn("REDACTED", mvd_deleted.run_id)
def test_get_model_version_download_uri(self):
name = "test_for_update_MV"
self._rm_maker(name)
source_path = "path/to/source"
mv = self._mv_maker(name, source=source_path, run_id=uuid.uuid4().hex)
mvd1 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.source, source_path)
# download location points to source
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
# download URI does not change even if model version is updated
self.store.transition_model_version_stage(
name=mv.name, version=mv.version, stage="Production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv.name, version=mv.version, description="Test for Path"
)
mvd2 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd2.source, source_path)
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
# cannot retrieve download URI for deleted model versions
self.store.delete_model_version(name=mv.name, version=mv.version)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version_download_uri(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_search_model_versions(self):
# create some model versions
name = "test_for_search_MV"
self._rm_maker(name)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
mv1 = self._mv_maker(name=name, source="A/B", run_id=run_id_1)
self.assertEqual(mv1.version, 1)
mv2 = self._mv_maker(name=name, source="A/C", run_id=run_id_2)
self.assertEqual(mv2.version, 2)
mv3 = self._mv_maker(name=name, source="A/D", run_id=run_id_2)
self.assertEqual(mv3.version, 3)
mv4 = self._mv_maker(name=name, source="A/D", run_id=run_id_3)
self.assertEqual(mv4.version, 4)
def search_versions(filter_string):
return [mvd.version for mvd in self.store.search_model_versions(filter_string)]
# search using name should return all 4 versions
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3, 4]))
# search using run_id_1 should return version 1
self.assertEqual(set(search_versions("run_id='%s'" % run_id_1)), set([1]))
# search using run_id_2 should return versions 2 and 3
self.assertEqual(set(search_versions("run_id='%s'" % run_id_2)), set([2, 3]))
# search using the IN operator should return all versions
self.assertEqual(
set(
search_versions(
"run_id IN ('{run_id_1}','{run_id_2}')".format(
run_id_1=run_id_1, run_id_2=run_id_2
)
)
),
set([1, 2, 3]),
)
# search using the IN operator with bad lists should return exceptions
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected string value or punctuation, "
r"but got different type in list"
),
) as exception_context:
search_versions("run_id IN (1,2,3)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using the IN operator with empty lists should return exceptions
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got empty list"
),
) as exception_context:
search_versions("run_id IN ()")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using an ill-formed IN operator correctly throws exception
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
search_versions("run_id IN (")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(MlflowException, r"Invalid filter '.+'") as exception_context:
search_versions("run_id IN")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN (,)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN ('runid1',,'runid2')")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using the IN operator is not allowed with other additional filters
with self.assertRaisesRegex(
MlflowException, r"Search filter '.+' contains multiple expressions"
) as exception_context:
search_versions(
"name='{name}]' AND run_id IN ('{run_id_1}','{run_id_2}')".format(
name=name, run_id_1=run_id_1, run_id_2=run_id_2
)
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using source_path "A/D" should return version 3 and 4
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3, 4]))
# search using source_path "A" should not return anything
self.assertEqual(len(search_versions("source_path = 'A'")), 0)
self.assertEqual(len(search_versions("source_path = 'A/'")), 0)
self.assertEqual(len(search_versions("source_path = ''")), 0)
# delete mv4. search should not return version 4
self.store.delete_model_version(name=mv4.name, version=mv4.version)
self.assertEqual(set(search_versions("")), set([1, 2, 3]))
self.assertEqual(set(search_versions(None)), set([1, 2, 3]))
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3]))
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3]))
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="Online prediction model!"
)
mvds = self.store.search_model_versions("run_id = '%s'" % run_id_1)
assert 1 == len(mvds)
assert isinstance(mvds[0], ModelVersion)
assert mvds[0].current_stage == "Production"
assert mvds[0].run_id == run_id_1
assert mvds[0].source == "A/B"
assert mvds[0].description == "Online prediction model!"
def _search_registered_models(
self, filter_string, max_results=10, order_by=None, page_token=None
):
result = self.store.search_registered_models(
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
return [registered_model.name for registered_model in result], result.token
def test_search_registered_models(self):
# create some registered models
prefix = "test_for_search_"
names = [prefix + name for name in ["RM1", "RM2", "RM3", "RM4", "RM4A", "RM4a"]]
for name in names:
self._rm_maker(name)
# search with no filter should return all registered models
rms, _ = self._search_registered_models(None)
self.assertEqual(rms, names)
# equality search using name should return exactly the 1 name
rms, _ = self._search_registered_models("name='{}'".format(names[0]))
self.assertEqual(rms, [names[0]])
# equality search using name that is not valid should return nothing
rms, _ = self._search_registered_models("name='{}'".format(names[0] + "cats"))
self.assertEqual(rms, [])
# case-sensitive prefix search using LIKE should return all the RMs
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix))
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE with surrounding % should return all the RMs
rms, _ = self._search_registered_models("name LIKE '%RM%'")
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE with surrounding % should return all the RMs
# _e% matches test_for_search_ , so all RMs should match
rms, _ = self._search_registered_models("name LIKE '_e%'")
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE should return just rm4
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
# case-sensitive prefix search using LIKE should return no models if no match
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
# confirm that LIKE is not case-sensitive
rms, _ = self._search_registered_models("name lIkE '%blah%'")
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name like '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, names[4:])
# case-insensitive postfix search with ILIKE
rms, _ = self._search_registered_models("name ILIKE '%RM4a'")
self.assertEqual(rms, names[4:])
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
# confirm that ILIKE is not case-sensitive
rms, _ = self._search_registered_models("name iLike '%blah%'")
self.assertEqual(rms, [])
# confirm that ILIKE works for empty query
rms, _ = self._search_registered_models("name iLike '%%'")
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name ilike '%RM4a'")
self.assertEqual(rms, names[4:])
# cannot search by invalid comparator types
with self.assertRaisesRegex(
MlflowException, r"Expected a quoted string value for attributes"
) as exception_context:
self._search_registered_models("name!=something")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by run_id
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("run_id='%s'" % "somerunID")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by source_path
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("source_path = 'A/D'")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by other params
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
self._search_registered_models("evilhax = true")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# delete last registered model. search should not return the first 5
self.store.delete_registered_model(name=names[-1])
self.assertEqual(self._search_registered_models(None, max_results=1000), (names[:-1], None))
# equality search using name should return no names
self.assertEqual(self._search_registered_models("name='{}'".format(names[-1])), ([], None))
# case-sensitive prefix search using LIKE should return all the RMs
self.assertEqual(
self._search_registered_models("name LIKE '{}%'".format(prefix)), (names[0:5], None)
)
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
self.assertEqual(
self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A")),
([names[4]], None),
)
def test_parse_search_registered_models_order_by(self):
# test that "registered_models.name ASC" is returned by default
parsed = SqlAlchemyStore._parse_search_registered_models_order_by([])
self.assertEqual([str(x) for x in parsed], ["registered_models.name ASC"])
# test that the given 'name' replaces the default one ('registered_models.name ASC')
parsed = SqlAlchemyStore._parse_search_registered_models_order_by(["name DESC"])
self.assertEqual([str(x) for x in parsed], ["registered_models.name DESC"])
# test that an exception is raised when order_by contains duplicate fields
msg = "`order_by` contains duplicate fields:"
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp"]
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(["timestamp", "timestamp"])
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["timestamp", "last_updated_timestamp"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp ASC", "last_updated_timestamp DESC"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp DESC"],
)
def test_search_registered_model_pagination(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test flow with fixed max_results
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(query, page_token=None, max_results=5)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(query, page_token=token, max_results=5)
returned_rms.extend(result)
self.assertEqual(rms, returned_rms)
# test that pagination will return all valid results in sorted order
# by name ascending
result, token1 = self._search_registered_models(query, max_results=5)
self.assertNotEqual(token1, None)
self.assertEqual(result, rms[0:5])
result, token2 = self._search_registered_models(query, page_token=token1, max_results=10)
self.assertNotEqual(token2, None)
self.assertEqual(result, rms[5:15])
result, token3 = self._search_registered_models(query, page_token=token2, max_results=20)
self.assertNotEqual(token3, None)
self.assertEqual(result, rms[15:35])
result, token4 = self._search_registered_models(query, page_token=token3, max_results=100)
# assert that page token is None
self.assertEqual(token4, None)
self.assertEqual(result, rms[35:])
# test that providing a completely invalid page token throws
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that providing too large of a max_results throws
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.assertIn(
"Invalid value for request parameter max_results", exception_context.exception.message
)
def test_search_registered_model_order_by(self):
rms = []
# explicitly mock the creation_timestamps because timestamps seem to be unstable in Windows
for i in range(50):
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=i):
rms.append(self._rm_maker("RM{:03}".format(i)).name)
# test flow with fixed max_results and order_by (test stable order across pages)
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(
query, page_token=None, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(
query, page_token=token, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
# name descending should be the opposite order of the current order
self.assertEqual(rms[::-1], returned_rms)
# last_updated_timestamp descending should have the newest RMs first
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
# last_updated_timestamp ascending should have the oldest RMs first
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp"], max_results=100
)
self.assertEqual(rms, result)
# name ascending should have the original order
result, _ = self._search_registered_models(
query, page_token=None, order_by=["name ASC"], max_results=100
)
self.assertEqual(rms, result)
# test that no ASC/DESC defaults to ASC
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp"], max_results=100
)
self.assertEqual(rms, result)
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=1):
rm1 = self._rm_maker("MR1").name
rm2 = self._rm_maker("MR2").name
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=2):
rm3 = self._rm_maker("MR3").name
rm4 = self._rm_maker("MR4").name
query = "name LIKE 'MR%'"
# test with multiple clauses
result, _ = self._search_registered_models(
query,
page_token=None,
order_by=["last_updated_timestamp ASC", "name DESC"],
max_results=100,
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC", "name DESC"], max_results=100
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
# confirm that name ascending is the default, even if ties exist on other fields
result, _ = self._search_registered_models(
query, page_token=None, order_by=[], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
# test default tiebreak with descending timestamps
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual([rm3, rm4, rm1, rm2], result)
# test timestamp parsing
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\tASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\r\rASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\nASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
# validate order by key is case-insensitive
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp asc"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp aSC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp desc", "name desc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp deSc", "name deSc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
def test_search_registered_model_order_by_errors(self):
query = "name LIKE 'RM%'"
# test that invalid columns throw even if they come after valid columns
with self.assertRaisesRegex(
MlflowException, r"Invalid order by key '.+' specified"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "creation_timestamp DESC"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that invalid columns with random text throw even if they come after valid columns
with self.assertRaisesRegex(
MlflowException, r"Invalid order_by clause '.+'"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "last_updated_timestamp DESC blah"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_set_model_version_tag(self):
name1 = "SetModelVersionTag_TestMod"
name2 = "SetModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
all_tags = initial_tags + [new_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
# test overriding a tag with the same key
overriding_tag = ModelVersionTag("key", "overriding")
self.store.set_model_version_tag(name1, 1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
# does not affect other model versions with the same key
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
# can not set tag on deleted (non-existed) model version
self.store.delete_model_version(name1, 2)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name1}, version=2\) not found"
) as exception_context:
self.store.set_model_version_tag(name1, 2, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test cannot set tags that are too long
long_tag = ModelVersionTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Model version value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_model_version_tag(name1, 1, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test can set tags that are somewhat long
long_tag = ModelVersionTag("longTagKey", "a" * 4999)
self.store.set_model_version_tag(name1, 1, long_tag)
# can not set invalid tag
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_model_version_tag(name2, 1, ModelVersionTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name or version
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_model_version_tag(None, 1, ModelVersionTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.set_model_version_tag(
name2, "I am not a version", ModelVersionTag(key="key", value="value")
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_model_version_tag(self):
name1 = "DeleteModelVersionTag_TestMod"
name2 = "DeleteModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
self.store.delete_model_version_tag(name1, 1, "randomTag")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in initial_tags})
# testing deleting a key does not affect other model versions with the same key
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
# delete tag that is already deleted does nothing
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
# can not delete tag on deleted (non-existed) model version
self.store.delete_model_version(name2, 1)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name2}, version=1\) not found"
) as exception_context:
self.store.delete_model_version_tag(name2, 1, "key")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# can not delete tag with invalid key
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_model_version_tag(name1, 2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name or version
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_model_version_tag(None, 2, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.delete_model_version_tag(name1, "I am not a version", "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
| 46.944173
| 100
| 0.663154
|
import os
import unittest
import tempfile
from unittest import mock
import uuid
import mlflow
import mlflow.db
import mlflow.store.db.base_sql_model
from mlflow.entities.model_registry import (
RegisteredModel,
ModelVersion,
RegisteredModelTag,
ModelVersionTag,
)
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
ErrorCode,
RESOURCE_DOES_NOT_EXIST,
INVALID_PARAMETER_VALUE,
RESOURCE_ALREADY_EXISTS,
)
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
from tests.helper_functions import random_str
DB_URI = "sqlite:///"
class TestSqlAlchemyStoreSqlite(unittest.TestCase):
def _get_store(self, db_uri=""):
return SqlAlchemyStore(db_uri)
def setUp(self):
self.maxDiff = None fd, self.temp_dbfile = tempfile.mkstemp()
os.close(fd)
self.db_url = "%s%s" % (DB_URI, self.temp_dbfile)
self.store = self._get_store(self.db_url)
def tearDown(self):
mlflow.store.db.base_sql_model.Base.metadata.drop_all(self.store.engine)
os.remove(self.temp_dbfile)
def _rm_maker(self, name, tags=None, description=None):
return self.store.create_registered_model(name, tags, description)
def _mv_maker(
self,
name,
source="path/to/source",
run_id=uuid.uuid4().hex,
tags=None,
run_link=None,
description=None,
):
return self.store.create_model_version(
name, source, run_id, tags, run_link=run_link, description=description
)
def _extract_latest_by_stage(self, latest_versions):
return {mvd.current_stage: mvd.version for mvd in latest_versions}
def test_create_registered_model(self):
name = random_str() + "abCD"
rm1 = self._rm_maker(name)
self.assertEqual(rm1.name, name)
self.assertEqual(rm1.description, None)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={name}\) already exists"
) as exception_context:
self._rm_maker(name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
for name2 in [name + "extra", name.lower(), name.upper(), name + name]:
rm2 = self._rm_maker(name2)
self.assertEqual(rm2.name, name2)
name2 = random_str() + "tags"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
rm2 = self._rm_maker(name2, tags)
rmd2 = self.store.get_registered_model(name2)
self.assertEqual(rm2.name, name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(rmd2.name, name2)
self.assertEqual(rmd2.tags, {tag.key: tag.value for tag in tags})
name3 = random_str() + "-description"
description = "the best model ever"
rm3 = self._rm_maker(name3, description=description)
rmd3 = self.store.get_registered_model(name3)
self.assertEqual(rm3.name, name3)
self.assertEqual(rm3.description, description)
self.assertEqual(rmd3.name, name3)
self.assertEqual(rmd3.description, description)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker(None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker("")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_get_registered_model(self):
name = "model_1"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
with mock.patch("time.time") as mock_time:
mock_time.return_value = 1234
rm = self._rm_maker(name, tags)
self.assertEqual(rm.name, name)
rmd = self.store.get_registered_model(name=name)
self.assertEqual(rmd.name, name)
self.assertEqual(rmd.creation_timestamp, 1234000)
self.assertEqual(rmd.last_updated_timestamp, 1234000)
self.assertEqual(rmd.description, None)
self.assertEqual(rmd.latest_versions, [])
self.assertEqual(rmd.tags, {tag.key: tag.value for tag in tags})
def test_update_registered_model(self):
name = "model_for_update_RM"
rm1 = self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rm1.name, name)
self.assertEqual(rmd1.description, None)
rm2 = self.store.update_registered_model(name=name, description="test model")
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(rm2.name, "model_for_update_RM")
self.assertEqual(rmd2.name, "model_for_update_RM")
self.assertEqual(rmd2.description, "test model")
def test_rename_registered_model(self):
original_name = "original name"
new_name = "new name"
self._rm_maker(original_name)
self._mv_maker(original_name)
self._mv_maker(original_name)
rm = self.store.get_registered_model(original_name)
mv1 = self.store.get_model_version(original_name, 1)
mv2 = self.store.get_model_version(original_name, 2)
self.assertEqual(rm.name, original_name)
self.assertEqual(mv1.name, original_name)
self.assertEqual(mv2.name, original_name)
self.store.rename_registered_model(original_name, new_name)
rm = self.store.get_registered_model(new_name)
mv1 = self.store.get_model_version(new_name, 1)
mv2 = self.store.get_model_version(new_name, 2)
self.assertEqual(rm.name, new_name)
self.assertEqual(mv1.name, new_name)
self.assertEqual(mv2.name, new_name)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={original_name} not found"
) as exception_context:
self.store.get_registered_model(original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
self._rm_maker(original_name)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={original_name}\) already exists"
) as exception_context:
self.store.rename_registered_model(new_name, original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, "")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model(self):
name = "model_for_delete_RM"
self._rm_maker(name)
self._mv_maker(name)
rm1 = self.store.get_registered_model(name=name)
mv1 = self.store.get_model_version(name, 1)
self.assertEqual(rm1.name, name)
self.assertEqual(mv1.name, name)
self.store.delete_registered_model(name=name)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.get_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.update_registered_model(name=name, description="deleted")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.delete_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name}, version=1\) not found"
) as exception_context:
self.store.get_model_version(name, 1)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def _list_registered_models(self, page_token=None, max_results=10):
result = self.store.list_registered_models(max_results, page_token)
for idx in range(len(result)):
result[idx] = result[idx].name
return result
def test_list_registered_model(self):
self._rm_maker("A")
registered_models = self.store.list_registered_models(max_results=10, page_token=None)
self.assertEqual(len(registered_models), 1)
self.assertEqual(registered_models[0].name, "A")
self.assertIsInstance(registered_models[0], RegisteredModel)
self._rm_maker("B")
self.assertEqual(set(self._list_registered_models()), set(["A", "B"]))
self._rm_maker("BB")
self._rm_maker("BA")
self._rm_maker("AB")
self._rm_maker("BBC")
self.assertEqual(
set(self._list_registered_models()), set(["A", "B", "BB", "BA", "AB", "BBC"])
)
self.store.delete_registered_model(name="BA")
self.store.delete_registered_model(name="B")
self.assertEqual(set(self._list_registered_models()), set(["A", "BB", "AB", "BBC"]))
def test_list_registered_model_paginated_last_page(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
returned_rms = []
result = self._list_registered_models(page_token=None, max_results=25)
returned_rms.extend(result)
while result.token:
result = self._list_registered_models(page_token=result.token, max_results=25)
self.assertEqual(len(result), 25)
returned_rms.extend(result)
self.assertEqual(result.token, None)
self.assertEqual(set(rms), set(returned_rms))
def test_list_registered_model_paginated_returns_in_correct_order(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
result = self._list_registered_models(max_results=5)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[0:5])
result = self._list_registered_models(page_token=result.token, max_results=10)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[5:15])
result = self._list_registered_models(page_token=result.token, max_results=20)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[15:35])
result = self._list_registered_models(page_token=result.token, max_results=100)
self.assertEqual(result.token, None)
self.assertEqual(result, rms[35:])
def test_list_registered_model_paginated_errors(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.store.delete_registered_model(name="RM{0:03}".format(0))
self.assertEqual(set(self._list_registered_models(max_results=100)), set(rms[1:]))
def test_get_latest_versions(self):
name = "test_for_latest_versions"
self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rmd1.latest_versions, [])
mv1 = self._mv_maker(name)
self.assertEqual(mv1.version, 1)
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(self._extract_latest_by_stage(rmd2.latest_versions), {"None": 1})
mv2 = self._mv_maker(name)
self.assertEqual(mv2.version, 2)
self.store.transition_model_version_stage(
name=mv2.name, version=mv2.version, stage="Production", archive_existing_versions=False
)
mv3 = self._mv_maker(name)
self.assertEqual(mv3.version, 3)
self.store.transition_model_version_stage(
name=mv3.name, version=mv3.version, stage="Production", archive_existing_versions=False
)
mv4 = self._mv_maker(name)
self.assertEqual(mv4.version, 4)
self.store.transition_model_version_stage(
name=mv4.name, version=mv4.version, stage="Staging", archive_existing_versions=False
)
rmd4 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd4.latest_versions),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=[])),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 3},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["production"])
),
{"Production": 3},
) self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["pROduction"])
),
{"Production": 3},
) self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["None", "Production"])
),
{"None": 1, "Production": 3},
)
self.store.delete_model_version(name=mv3.name, version=mv3.version)
rmd5 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd5.latest_versions),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 2},
)
def test_set_registered_model_tag(self):
name1 = "SetRegisteredModelTag_TestMod"
name2 = "SetRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
rm1 = self.store.get_registered_model(name=name1)
all_tags = initial_tags + [new_tag]
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
overriding_tag = RegisteredModelTag("key", "overriding")
self.store.set_registered_model_tag(name1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.set_registered_model_tag(name1, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
long_tag = RegisteredModelTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Registered model value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_registered_model_tag(name2, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
long_tag = RegisteredModelTag("longTagKey", "a" * 4999)
self.store.set_registered_model_tag(name2, long_tag)
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_registered_model_tag(name2, RegisteredModelTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_registered_model_tag(None, RegisteredModelTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model_tag(self):
name1 = "DeleteRegisteredModelTag_TestMod"
name2 = "DeleteRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
self.store.delete_registered_model_tag(name1, "randomTag")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.delete_registered_model_tag(name1, "anotherKey")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_registered_model_tag(name2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_registered_model_tag(None, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_create_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
run_id = uuid.uuid4().hex
with mock.patch("time.time") as mock_time:
mock_time.return_value = 456778
mv1 = self._mv_maker(name, "a/b/CD", run_id)
self.assertEqual(mv1.name, name)
self.assertEqual(mv1.version, 1)
mvd1 = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
self.assertEqual(mvd1.creation_timestamp, 456778000)
self.assertEqual(mvd1.last_updated_timestamp, 456778000)
self.assertEqual(mvd1.description, None)
self.assertEqual(mvd1.source, "a/b/CD")
self.assertEqual(mvd1.run_id, run_id)
self.assertEqual(mvd1.status, "READY")
self.assertEqual(mvd1.status_message, None)
self.assertEqual(mvd1.tags, {})
mv2 = self._mv_maker(name)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mv2.version, 2)
self.assertEqual(mvd2.version, 2)
tags = [ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value")]
mv3 = self._mv_maker(name, tags=tags)
mvd3 = self.store.get_model_version(name=mv3.name, version=mv3.version)
self.assertEqual(mv3.version, 3)
self.assertEqual(mv3.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(mvd3.version, 3)
self.assertEqual(mvd3.tags, {tag.key: tag.value for tag in tags})
run_link = "http://localhost:3000/path/to/run/"
mv4 = self._mv_maker(name, run_link=run_link)
mvd4 = self.store.get_model_version(name, mv4.version)
self.assertEqual(mv4.version, 4)
self.assertEqual(mv4.run_link, run_link)
self.assertEqual(mvd4.version, 4)
self.assertEqual(mvd4.run_link, run_link)
description = "the best model ever"
mv5 = self._mv_maker(name, description=description)
mvd5 = self.store.get_model_version(name, mv5.version)
self.assertEqual(mv5.version, 5)
self.assertEqual(mv5.description, description)
self.assertEqual(mvd5.version, 5)
self.assertEqual(mvd5.description, description)
mv6 = self._mv_maker(name, run_id=None)
mvd6 = self.store.get_model_version(name, mv6.version)
self.assertEqual(mv6.version, 6)
self.assertEqual(mv6.run_id, None)
self.assertEqual(mvd6.version, 6)
self.assertEqual(mvd6.run_id, None)
def test_update_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="Production", archive_existing_versions=False
)
mvd2 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd2.name, name)
self.assertEqual(mvd2.version, 1)
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd2.description, None)
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="test model version"
)
mvd3 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd3.name, name)
self.assertEqual(mvd3.version, 1)
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd3.description, "test model version")
with self.assertRaisesRegex(
MlflowException, r"Invalid Model Version stage unknown"
) as exception_context:
self.store.transition_model_version_stage(
mv1.name, mv1.version, stage="unknown", archive_existing_versions=False
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
for stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(
name=mv1.name,
version=mv1.version,
stage=stage_name,
archive_existing_versions=False,
)
mvd5 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd5.current_stage, "Staging")
def test_transition_model_version_stage_when_archive_existing_versions_is_false(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
for stage in ["Archived", "None"]:
self.store.transition_model_version_stage(name, mv1.version, stage, False)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.store.transition_model_version_stage(name, mv3.version, "Production", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Production")
def test_transition_model_version_stage_when_archive_existing_versions_is_true(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
msg = (
r"Model version transition cannot archive existing model versions "
r"because .+ is not an Active stage"
)
for stage in ["Archived", "None"]:
with self.assertRaisesRegex(MlflowException, msg):
self.store.transition_model_version_stage(name, mv1.version, stage, True)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.assertEqual(mvd1.last_updated_timestamp, mvd3.last_updated_timestamp)
self.store.transition_model_version_stage(name, mv3.version, "Production", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Archived")
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd2.last_updated_timestamp, mvd3.last_updated_timestamp)
for uncanonical_stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(mv1.name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(mv2.name, mv2.version, "None", False)
self.store.transition_model_version_stage(
mv2.name, mv2.version, uncanonical_stage_name, True
)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Staging")
def test_delete_model_version(self):
name = "test_for_delete_MV"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name)
mv = self._mv_maker(name, tags=initial_tags)
mvd = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd.name, name)
self.store.delete_model_version(name=mv.name, version=mv.version)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.update_model_version(mv.name, mv.version, description="deleted!")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.delete_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_delete_model_version_redaction(self):
name = "test_for_delete_MV_redaction"
run_link = "http://localhost:5000/path/to/run"
run_id = "12345"
source = "path/to/source"
self._rm_maker(name)
mv = self._mv_maker(name, source=source, run_id=run_id, run_link=run_link)
mvd = self.store.get_model_version(name=name, version=mv.version)
self.assertEqual(mvd.run_link, run_link)
self.assertEqual(mvd.run_id, run_id)
self.assertEqual(mvd.source, source)
self.store.delete_model_version(name, mv.version)
mvd_deleted = self.store._get_sql_model_version_including_deleted(
name=name, version=mv.version
)
self.assertIn("REDACTED", mvd_deleted.run_link)
self.assertIn("REDACTED", mvd_deleted.source)
self.assertIn("REDACTED", mvd_deleted.run_id)
def test_get_model_version_download_uri(self):
name = "test_for_update_MV"
self._rm_maker(name)
source_path = "path/to/source"
mv = self._mv_maker(name, source=source_path, run_id=uuid.uuid4().hex)
mvd1 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.source, source_path)
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
self.store.transition_model_version_stage(
name=mv.name, version=mv.version, stage="Production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv.name, version=mv.version, description="Test for Path"
)
mvd2 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd2.source, source_path)
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
self.store.delete_model_version(name=mv.name, version=mv.version)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version_download_uri(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_search_model_versions(self):
name = "test_for_search_MV"
self._rm_maker(name)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
mv1 = self._mv_maker(name=name, source="A/B", run_id=run_id_1)
self.assertEqual(mv1.version, 1)
mv2 = self._mv_maker(name=name, source="A/C", run_id=run_id_2)
self.assertEqual(mv2.version, 2)
mv3 = self._mv_maker(name=name, source="A/D", run_id=run_id_2)
self.assertEqual(mv3.version, 3)
mv4 = self._mv_maker(name=name, source="A/D", run_id=run_id_3)
self.assertEqual(mv4.version, 4)
def search_versions(filter_string):
return [mvd.version for mvd in self.store.search_model_versions(filter_string)]
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3, 4]))
self.assertEqual(set(search_versions("run_id='%s'" % run_id_1)), set([1]))
self.assertEqual(set(search_versions("run_id='%s'" % run_id_2)), set([2, 3]))
self.assertEqual(
set(
search_versions(
"run_id IN ('{run_id_1}','{run_id_2}')".format(
run_id_1=run_id_1, run_id_2=run_id_2
)
)
),
set([1, 2, 3]),
)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected string value or punctuation, "
r"but got different type in list"
),
) as exception_context:
search_versions("run_id IN (1,2,3)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got empty list"
),
) as exception_context:
search_versions("run_id IN ()")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
search_versions("run_id IN (")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(MlflowException, r"Invalid filter '.+'") as exception_context:
search_versions("run_id IN")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN (,)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN ('runid1',,'runid2')")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Search filter '.+' contains multiple expressions"
) as exception_context:
search_versions(
"name='{name}]' AND run_id IN ('{run_id_1}','{run_id_2}')".format(
name=name, run_id_1=run_id_1, run_id_2=run_id_2
)
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3, 4]))
self.assertEqual(len(search_versions("source_path = 'A'")), 0)
self.assertEqual(len(search_versions("source_path = 'A/'")), 0)
self.assertEqual(len(search_versions("source_path = ''")), 0)
self.store.delete_model_version(name=mv4.name, version=mv4.version)
self.assertEqual(set(search_versions("")), set([1, 2, 3]))
self.assertEqual(set(search_versions(None)), set([1, 2, 3]))
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3]))
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3]))
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="Online prediction model!"
)
mvds = self.store.search_model_versions("run_id = '%s'" % run_id_1)
assert 1 == len(mvds)
assert isinstance(mvds[0], ModelVersion)
assert mvds[0].current_stage == "Production"
assert mvds[0].run_id == run_id_1
assert mvds[0].source == "A/B"
assert mvds[0].description == "Online prediction model!"
def _search_registered_models(
self, filter_string, max_results=10, order_by=None, page_token=None
):
result = self.store.search_registered_models(
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
return [registered_model.name for registered_model in result], result.token
def test_search_registered_models(self):
prefix = "test_for_search_"
names = [prefix + name for name in ["RM1", "RM2", "RM3", "RM4", "RM4A", "RM4a"]]
for name in names:
self._rm_maker(name)
rms, _ = self._search_registered_models(None)
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name='{}'".format(names[0]))
self.assertEqual(rms, [names[0]])
rms, _ = self._search_registered_models("name='{}'".format(names[0] + "cats"))
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix))
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name LIKE '%RM%'")
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name LIKE '_e%'")
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name lIkE '%blah%'")
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name like '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, names[4:])
rms, _ = self._search_registered_models("name ILIKE '%RM4a'")
self.assertEqual(rms, names[4:])
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name iLike '%blah%'")
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name iLike '%%'")
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name ilike '%RM4a'")
self.assertEqual(rms, names[4:])
with self.assertRaisesRegex(
MlflowException, r"Expected a quoted string value for attributes"
) as exception_context:
self._search_registered_models("name!=something")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("run_id='%s'" % "somerunID")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("source_path = 'A/D'")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
self._search_registered_models("evilhax = true")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.store.delete_registered_model(name=names[-1])
self.assertEqual(self._search_registered_models(None, max_results=1000), (names[:-1], None))
self.assertEqual(self._search_registered_models("name='{}'".format(names[-1])), ([], None))
self.assertEqual(
self._search_registered_models("name LIKE '{}%'".format(prefix)), (names[0:5], None)
)
self.assertEqual(
self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A")),
([names[4]], None),
)
def test_parse_search_registered_models_order_by(self):
parsed = SqlAlchemyStore._parse_search_registered_models_order_by([])
self.assertEqual([str(x) for x in parsed], ["registered_models.name ASC"])
parsed = SqlAlchemyStore._parse_search_registered_models_order_by(["name DESC"])
self.assertEqual([str(x) for x in parsed], ["registered_models.name DESC"])
msg = "`order_by` contains duplicate fields:"
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp"]
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(["timestamp", "timestamp"])
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["timestamp", "last_updated_timestamp"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp ASC", "last_updated_timestamp DESC"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp DESC"],
)
def test_search_registered_model_pagination(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(query, page_token=None, max_results=5)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(query, page_token=token, max_results=5)
returned_rms.extend(result)
self.assertEqual(rms, returned_rms)
result, token1 = self._search_registered_models(query, max_results=5)
self.assertNotEqual(token1, None)
self.assertEqual(result, rms[0:5])
result, token2 = self._search_registered_models(query, page_token=token1, max_results=10)
self.assertNotEqual(token2, None)
self.assertEqual(result, rms[5:15])
result, token3 = self._search_registered_models(query, page_token=token2, max_results=20)
self.assertNotEqual(token3, None)
self.assertEqual(result, rms[15:35])
result, token4 = self._search_registered_models(query, page_token=token3, max_results=100)
self.assertEqual(token4, None)
self.assertEqual(result, rms[35:])
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.assertIn(
"Invalid value for request parameter max_results", exception_context.exception.message
)
def test_search_registered_model_order_by(self):
rms = []
for i in range(50):
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=i):
rms.append(self._rm_maker("RM{:03}".format(i)).name)
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(
query, page_token=None, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(
query, page_token=token, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
self.assertEqual(rms[::-1], returned_rms)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp"], max_results=100
)
self.assertEqual(rms, result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["name ASC"], max_results=100
)
self.assertEqual(rms, result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp"], max_results=100
)
self.assertEqual(rms, result)
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=1):
rm1 = self._rm_maker("MR1").name
rm2 = self._rm_maker("MR2").name
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=2):
rm3 = self._rm_maker("MR3").name
rm4 = self._rm_maker("MR4").name
query = "name LIKE 'MR%'"
result, _ = self._search_registered_models(
query,
page_token=None,
order_by=["last_updated_timestamp ASC", "name DESC"],
max_results=100,
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC", "name DESC"], max_results=100
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=[], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual([rm3, rm4, rm1, rm2], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\tASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\r\rASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\nASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp asc"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp aSC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp desc", "name desc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp deSc", "name deSc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
def test_search_registered_model_order_by_errors(self):
query = "name LIKE 'RM%'"
with self.assertRaisesRegex(
MlflowException, r"Invalid order by key '.+' specified"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "creation_timestamp DESC"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Invalid order_by clause '.+'"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "last_updated_timestamp DESC blah"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_set_model_version_tag(self):
name1 = "SetModelVersionTag_TestMod"
name2 = "SetModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
all_tags = initial_tags + [new_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
overriding_tag = ModelVersionTag("key", "overriding")
self.store.set_model_version_tag(name1, 1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_model_version(name1, 2)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name1}, version=2\) not found"
) as exception_context:
self.store.set_model_version_tag(name1, 2, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
long_tag = ModelVersionTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Model version value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_model_version_tag(name1, 1, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
long_tag = ModelVersionTag("longTagKey", "a" * 4999)
self.store.set_model_version_tag(name1, 1, long_tag)
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_model_version_tag(name2, 1, ModelVersionTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_model_version_tag(None, 1, ModelVersionTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.set_model_version_tag(
name2, "I am not a version", ModelVersionTag(key="key", value="value")
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_model_version_tag(self):
name1 = "DeleteModelVersionTag_TestMod"
name2 = "DeleteModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
self.store.delete_model_version_tag(name1, 1, "randomTag")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
self.store.delete_model_version(name2, 1)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name2}, version=1\) not found"
) as exception_context:
self.store.delete_model_version_tag(name2, 1, "key")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_model_version_tag(name1, 2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_model_version_tag(None, 2, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.delete_model_version_tag(name1, "I am not a version", "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
| true
| true
|
f7037f546b10ab4102b4b86bf61a04e560b49f85
| 13,327
|
py
|
Python
|
digits/tools/test_parse_s3.py
|
Benedict93/DIGITS
|
4be733f02ae79a2e75f5d07af396f38d356e1ff4
|
[
"BSD-3-Clause"
] | 4,552
|
2015-03-17T17:24:11.000Z
|
2022-03-27T04:07:58.000Z
|
digits/tools/test_parse_s3.py
|
Benedict93/DIGITS
|
4be733f02ae79a2e75f5d07af396f38d356e1ff4
|
[
"BSD-3-Clause"
] | 1,994
|
2015-03-17T21:46:44.000Z
|
2022-03-19T18:20:29.000Z
|
digits/tools/test_parse_s3.py
|
Benedict93/DIGITS
|
4be733f02ae79a2e75f5d07af396f38d356e1ff4
|
[
"BSD-3-Clause"
] | 1,791
|
2015-03-17T17:51:05.000Z
|
2022-03-08T13:44:40.000Z
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import itertools
import os
import shutil
import tempfile
import mock
from nose.tools import raises, assert_raises
try:
from . import parse_s3
from digits.tools.mock_s3_walker import MockS3Walker
import_failed = False
except ImportError:
import_failed = True
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestUnescape():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_hello(self):
assert parse_s3.unescape('hello') == 'hello'
def test_space(self):
assert parse_s3.unescape('%20') == ' '
class TestValidateS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.mock_walker = MockS3Walker()
def test_non_existent_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'nonexistentbucket', '')
assert not result
def test_empty_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'emptybucket', '')
assert not result
def test_valid_endpoint(self):
result = parse_s3.validate_s3(self.mock_walker, 'validbucket', '')
assert result
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except IOError:
pass
def test_missing_file(self):
assert parse_s3.validate_output_file(None) is True, 'all new files should be valid'
def test_file(self):
assert parse_s3.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert parse_s3.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission'
def test_existing_file(self):
assert parse_s3.validate_output_file(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_s3.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) is False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert parse_s3.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission'
class TestValidateRange():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_no_range(self):
assert parse_s3.validate_range(0) is True
def test_min_less(self):
assert parse_s3.validate_range(-1, min_value=0) is False
def test_min_equal(self):
assert parse_s3.validate_range(0, min_value=0) is True
def test_min_more(self):
assert parse_s3.validate_range(1, min_value=0) is True
def test_max_less(self):
assert parse_s3.validate_range(9, max_value=10) is True
def test_max_equal(self):
assert parse_s3.validate_range(10, max_value=10) is True
def test_max_more(self):
assert parse_s3.validate_range(11, max_value=10) is False
def test_allow_none_true(self):
assert parse_s3.validate_range(None, allow_none=True) is True
def test_allow_none_false(self):
assert parse_s3.validate_range(None, allow_none=False) is False
def test_string(self):
assert parse_s3.validate_range('foo') is False
@mock.patch('digits.tools.parse_s3.validate_output_file')
@mock.patch('digits.tools.parse_s3.validate_input_file')
class TestCalculatePercentages():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
parse_s3.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = parse_s3.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k + '_file': '' for k in supplied})
args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)})
# Tricky line. itertools returns combinations in sorted order, always.
# The order of the returned non-zero values should always be correct.
output = [x for x in parse_s3.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
# should raise AssertionError because percentages not between 0-100 are invalid
parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, parse_s3.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
# Nothing
('', [], []),
# Apache 2.2.22
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
# Apache 2.4.7
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
# Nginx
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('digits.tools.parse_s3.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert parse_s3.parse_web_listing('any_url') == rc
class TestSplitIndices():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100 - percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c) / 100.0
idealb = size * float(100 - pct_c) / 100.0
idxa, idxb = parse_s3.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_all_train(self):
classes = range(10)
mock_walker = MockS3Walker(classes)
try:
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
parse_s3.parse_s3(mock_walker, 'validbucket', 'train/', labels_file[1],
percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0)
with open(labels_file[1]) as infile:
parsed_classes = [line.strip() for line in infile]
expected_classes = [str(i) for i in classes]
assert parsed_classes == expected_classes, '%s != %s' % (parsed_classes, classes)
finally:
shutil.rmtree(tmpdir)
def test_neg_all_train(self):
try:
classes = range(1)
mock_walker = MockS3Walker(classes)
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
assert not parse_s3.parse_s3(mock_walker, 'invalidbucket', 'train/', labels_file[1], percent_train=100,
train_file=train_file[1], percent_val=0, percent_test=0)
finally:
shutil.rmtree(tmpdir)
| 36.214674
| 115
| 0.619869
|
import itertools
import os
import shutil
import tempfile
import mock
from nose.tools import raises, assert_raises
try:
from . import parse_s3
from digits.tools.mock_s3_walker import MockS3Walker
import_failed = False
except ImportError:
import_failed = True
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestUnescape():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_hello(self):
assert parse_s3.unescape('hello') == 'hello'
def test_space(self):
assert parse_s3.unescape('%20') == ' '
class TestValidateS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.mock_walker = MockS3Walker()
def test_non_existent_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'nonexistentbucket', '')
assert not result
def test_empty_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'emptybucket', '')
assert not result
def test_valid_endpoint(self):
result = parse_s3.validate_s3(self.mock_walker, 'validbucket', '')
assert result
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except IOError:
pass
def test_missing_file(self):
assert parse_s3.validate_output_file(None) is True, 'all new files should be valid'
def test_file(self):
assert parse_s3.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert parse_s3.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission'
def test_existing_file(self):
assert parse_s3.validate_output_file(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_s3.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) is False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert parse_s3.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission'
class TestValidateRange():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_no_range(self):
assert parse_s3.validate_range(0) is True
def test_min_less(self):
assert parse_s3.validate_range(-1, min_value=0) is False
def test_min_equal(self):
assert parse_s3.validate_range(0, min_value=0) is True
def test_min_more(self):
assert parse_s3.validate_range(1, min_value=0) is True
def test_max_less(self):
assert parse_s3.validate_range(9, max_value=10) is True
def test_max_equal(self):
assert parse_s3.validate_range(10, max_value=10) is True
def test_max_more(self):
assert parse_s3.validate_range(11, max_value=10) is False
def test_allow_none_true(self):
assert parse_s3.validate_range(None, allow_none=True) is True
def test_allow_none_false(self):
assert parse_s3.validate_range(None, allow_none=False) is False
def test_string(self):
assert parse_s3.validate_range('foo') is False
@mock.patch('digits.tools.parse_s3.validate_output_file')
@mock.patch('digits.tools.parse_s3.validate_input_file')
class TestCalculatePercentages():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
parse_s3.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = parse_s3.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k + '_file': '' for k in supplied})
args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)})
output = [x for x in parse_s3.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, parse_s3.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
('', [], []),
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('digits.tools.parse_s3.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert parse_s3.parse_web_listing('any_url') == rc
class TestSplitIndices():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100 - percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c) / 100.0
idealb = size * float(100 - pct_c) / 100.0
idxa, idxb = parse_s3.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_all_train(self):
classes = range(10)
mock_walker = MockS3Walker(classes)
try:
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
parse_s3.parse_s3(mock_walker, 'validbucket', 'train/', labels_file[1],
percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0)
with open(labels_file[1]) as infile:
parsed_classes = [line.strip() for line in infile]
expected_classes = [str(i) for i in classes]
assert parsed_classes == expected_classes, '%s != %s' % (parsed_classes, classes)
finally:
shutil.rmtree(tmpdir)
def test_neg_all_train(self):
try:
classes = range(1)
mock_walker = MockS3Walker(classes)
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
assert not parse_s3.parse_s3(mock_walker, 'invalidbucket', 'train/', labels_file[1], percent_train=100,
train_file=train_file[1], percent_val=0, percent_test=0)
finally:
shutil.rmtree(tmpdir)
| true
| true
|
f70380311cfd080c813d27a91a6974abd86c2888
| 12,424
|
py
|
Python
|
broccoli/plugins.py
|
vulnersCom/broccoli
|
273c9f60c72ca0de2fb1588f88b6da255f3e0322
|
[
"MIT"
] | 3
|
2018-04-24T09:33:03.000Z
|
2021-02-16T02:14:58.000Z
|
broccoli/plugins.py
|
vulnersCom/broccoli
|
273c9f60c72ca0de2fb1588f88b6da255f3e0322
|
[
"MIT"
] | null | null | null |
broccoli/plugins.py
|
vulnersCom/broccoli
|
273c9f60c72ca0de2fb1588f88b6da255f3e0322
|
[
"MIT"
] | 1
|
2019-11-08T15:14:09.000Z
|
2019-11-08T15:14:09.000Z
|
import re
import time
import typing
import logging
from calendar import monthrange
from datetime import datetime
from collections import Iterable
from heapq import heappush, heappop
from . import types # noqa
from . exceptions import BrokerError
from . interfaces import App, Plugin, Logger
from . utils import cached_property, rewind
class MasterLogger(Plugin):
def __init__(self,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.logger = logger
def register_master_handlers(self):
level = self.logger.level
ret = {}
if level <= logging.ERROR:
ret['task_exception'] = self.on_task_exception
ret['task_unknown'] = self.on_task_unknown
ret['worker_error'] = self.on_worker_error
ret['broker_error'] = self.on_broker_error
if level <= logging.INFO:
ret['worker_start'] = self.on_worker_start
ret['task_start'] = self.on_task_start
ret['task_done'] = self.on_task_done
ret['task_interrupt'] = self.on_task_interrupt
if level <= logging.DEBUG:
ret['task_expires'] = self.on_task_expires
return ret
def on_worker_start(self, w, **kwargs):
self.logger.info('worker process [%d] started.', w.pid)
def on_task_start(self, w, task_name, task_request, **kwargs):
self.logger.info('[%d] - received task: %s[%s].',
w.pid, task_name, task_request['id'])
def on_task_done(self, w, task_name, task_request, running_time, **kwargs):
self.logger.info('[%d] - task %s[%s] successed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_interrupt(self, w, task_name, task_request, running_time,
**kwargs):
self.logger.info('[%d] - task %s[%s] killed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_expires(self, w, task_name, task_request, **kwargs):
self.logger.debug('[%d] - task %s[%s] expires.',
w.pid, task_name, task_request['id'])
def on_task_unknown(self, w, task_name, **kwargs):
self.logger.error('[%d] - received unregistered task `%s`.',
w.pid, task_name)
def on_task_exception(self, w, task_name, task_request, exc, traceback,
running_time, **kwargs):
self.logger.error('[%d] - task %s[%s] raised exception: %s\n%s',
w.pid, task_name, task_request['id'], repr(exc),
traceback)
def on_broker_error(self, w, **kwargs):
self.logger.error('[%d] - broker error', w.pid)
def on_worker_error(self, w, exc, traceback, **kwargs):
self.logger.error('[%d] - got exception: %s\n%s',
w.pid, repr(exc), traceback)
class TaskKiller(Plugin):
def __init__(self,
app: App,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.running_tasks = set() # type: typing.Set[types.TaskId]
self.heap = [] # type: typing.List[typing.Tuple[float, types.TaskId]]
def run_in_master(self, curtime):
if not self.heap:
return
while self.heap and self.heap[0][0] <= curtime:
tm, task_id = heappop(self.heap)
if task_id not in self.running_tasks:
continue
self.logger.debug('[taskkiller] - kill task %s due to time limit',
task_id)
if self.heap:
return self.heap[0][0] - time.time()
def on_task_start(self, w,
task_name,
task_id,
task_headers,
start_time,
**kwargs):
limit = task_headers.get('time_limit')
if limit is None:
return
self.running_tasks.add(task_id)
heappush(self.heap, (start_time + limit, task_id))
def on_task_done(self, w, task_name, task_id):
if task_id not in self.running_tasks:
return
self.running_tasks.remove(task_id)
class CronBeat(Plugin):
def __init__(self,
app: App,
schedule: str,
error_timeout: int,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.error_timeout = error_timeout
self.schedule = schedule
self.next_run = 0
@classmethod
def add_console_args(cls, parser) -> None:
parser.add_argument('--schedule',
dest='schedule',
default='schedule.py',
help='Schedule rules')
def get_applied_conf(self):
return {
'schedule': self.schedule
}
@cached_property
def heap(self):
dct = {'crontab': crontab}
with open(self.schedule, 'rt') as f:
rules = eval(f.read(), dct)
if not isinstance(rules, dict):
raise TypeError('Must be a dict')
start = datetime.now()
heap = []
for key, entry in rules.items():
if not entry.get('task'):
raise TypeError('`task` must be set')
schedule = entry.get('schedule')
if not isinstance(schedule, crontab):
raise TypeError('`schedule` must be a crontab')
schedule = schedule.start(start)
heappush(heap, (next(schedule).timestamp(), schedule, entry))
return heap
def master_idle(self, curtime):
if not self.heap:
return
if self.next_run > curtime:
return self.next_run - curtime
task_sent = False
while self.heap and self.heap[0][0] <= curtime:
_, schedule, entry = self.heap[0]
try:
self.app.send_task(entry['task'],
args=entry.get('args', ()),
kwargs=entry.get('kwargs', {}))
except BrokerError:
self.logger.error('[beat] - cant send task, retry in %ss.',
self.error_timeout)
self.next_run = self.error_timeout + curtime
return self.error_timeout
else:
self.logger.debug('[beat] - %s sent.', entry['task'])
heappop(self.heap)
heappush(self.heap, (
next(schedule).timestamp(), schedule, entry))
task_sent = True
if self.heap:
self.next_run = self.heap[0][0]
timeout = self.next_run - curtime
if task_sent:
self.logger.debug('[beat] - next task in %fs.', timeout)
return timeout
class crontab_parser:
"""Parser for Crontab expressions."""
_range = r'(\d+?)-(\d+)'
_steps = r'/(\d+)'
_number = r'(\d+)'
_star = r'\*'
def __init__(self, min_, max_):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile('^' + self._range + self._steps + '$'),
self._range_steps),
(re.compile('^' + self._range + '$'),
self._expand_range),
(re.compile('^' + self._star + self._steps + '$'),
self._star_steps),
(re.compile('^' + self._star + '$'),
self._expand_star),
(re.compile('^' + self._number + '$'),
self._expand_range)
)
def parse(self, spec):
acc = set()
for part in spec.split(','):
if not part:
raise ValueError('empty part')
acc |= set(self._parse_part(part))
return sorted(acc)
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
raise ValueError('invalid filter: %r' % part)
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr:
raise ValueError('invalid range')
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise ValueError('empty filter')
return self._expand_range(toks[:2])[::int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise ValueError('empty filter')
return self._expand_star()[::int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_ + 1))
def _expand_number(self, s):
try:
i = int(s)
except ValueError:
raise ValueError('invalid number: %r' % s)
if i > self.max_:
raise ValueError(
'invalid end range: {0} > {1}.'.format(i, self.max_))
if i < self.min_:
raise ValueError(
'invalid beginning range: {0} < {1}.'.format(i, self.min_))
return i
class crontab:
def __init__(self,
minute='*',
hour='*',
day_of_month='*',
month_of_year='*',
day_of_week='*'):
self._orig_minute = minute
self._orig_hour = hour
self._orig_day_of_week = day_of_week
self._orig_day_of_month = day_of_month
self._orig_month_of_year = month_of_year
self.hour = self._expand_spec(hour, 0, 23)
self.minute = self._expand_spec(minute, 0, 59)
self.day_of_week = self._expand_spec(day_of_week, 0, 6)
self.day_of_month = self._expand_spec(day_of_month, 1, 31)
self.month_of_year = self._expand_spec(month_of_year, 1, 12)
def __repr__(self):
return ('<crontab: {0._orig_minute} {0._orig_hour} '
'{0._orig_day_of_week} {0._orig_day_of_month} '
'{0._orig_month_of_year}>').format(self)
@staticmethod
def _expand_spec(cronspec, min_, max_):
"""Expand cron specification."""
if isinstance(cronspec, int):
result = [cronspec]
elif isinstance(cronspec, str):
result = crontab_parser(min_, max_).parse(cronspec)
elif isinstance(cronspec, (list, tuple, set)):
result = sorted(cronspec)
elif isinstance(cronspec, Iterable):
result = sorted(cronspec)
else:
raise TypeError("Argument cronspec needs to be of any of the "
"following types: int, str, or an iterable type. "
"%r was given." % type(cronspec))
for number in result:
if not isinstance(number, int):
raise ValueError("Argument cronspec needs to be an int: "
"%r was given." % type(number))
for number in [result[0], result[-1]]:
if result[0] < min_ or result[0] > max_:
raise ValueError(
"Invalid crontab pattern. Valid range is {%d}-{%d}. "
"'%r' was found." % (min_, max_, result[0]))
return result
def start(self, start_date=None):
y = start_date.year
complete, (month_of_year, day_of_month, hour, minute) = rewind(
start_date.timetuple()[1:5], (
self.month_of_year,
self.day_of_month,
self.hour,
self.minute
))
if complete:
y += 1
while 1:
for m in month_of_year:
max_d = monthrange(y, m)[1]
for d in day_of_month:
if d > max_d:
break
for h in hour:
for mi in minute:
yield datetime(y, m, d, h, mi)
minute = self.minute
hour = self.hour
day_of_month = self.day_of_month
month_of_year = self.month_of_year
y += 1
| 34.898876
| 79
| 0.525998
|
import re
import time
import typing
import logging
from calendar import monthrange
from datetime import datetime
from collections import Iterable
from heapq import heappush, heappop
from . import types from . exceptions import BrokerError
from . interfaces import App, Plugin, Logger
from . utils import cached_property, rewind
class MasterLogger(Plugin):
def __init__(self,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.logger = logger
def register_master_handlers(self):
level = self.logger.level
ret = {}
if level <= logging.ERROR:
ret['task_exception'] = self.on_task_exception
ret['task_unknown'] = self.on_task_unknown
ret['worker_error'] = self.on_worker_error
ret['broker_error'] = self.on_broker_error
if level <= logging.INFO:
ret['worker_start'] = self.on_worker_start
ret['task_start'] = self.on_task_start
ret['task_done'] = self.on_task_done
ret['task_interrupt'] = self.on_task_interrupt
if level <= logging.DEBUG:
ret['task_expires'] = self.on_task_expires
return ret
def on_worker_start(self, w, **kwargs):
self.logger.info('worker process [%d] started.', w.pid)
def on_task_start(self, w, task_name, task_request, **kwargs):
self.logger.info('[%d] - received task: %s[%s].',
w.pid, task_name, task_request['id'])
def on_task_done(self, w, task_name, task_request, running_time, **kwargs):
self.logger.info('[%d] - task %s[%s] successed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_interrupt(self, w, task_name, task_request, running_time,
**kwargs):
self.logger.info('[%d] - task %s[%s] killed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_expires(self, w, task_name, task_request, **kwargs):
self.logger.debug('[%d] - task %s[%s] expires.',
w.pid, task_name, task_request['id'])
def on_task_unknown(self, w, task_name, **kwargs):
self.logger.error('[%d] - received unregistered task `%s`.',
w.pid, task_name)
def on_task_exception(self, w, task_name, task_request, exc, traceback,
running_time, **kwargs):
self.logger.error('[%d] - task %s[%s] raised exception: %s\n%s',
w.pid, task_name, task_request['id'], repr(exc),
traceback)
def on_broker_error(self, w, **kwargs):
self.logger.error('[%d] - broker error', w.pid)
def on_worker_error(self, w, exc, traceback, **kwargs):
self.logger.error('[%d] - got exception: %s\n%s',
w.pid, repr(exc), traceback)
class TaskKiller(Plugin):
def __init__(self,
app: App,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.running_tasks = set() self.heap = []
def run_in_master(self, curtime):
if not self.heap:
return
while self.heap and self.heap[0][0] <= curtime:
tm, task_id = heappop(self.heap)
if task_id not in self.running_tasks:
continue
self.logger.debug('[taskkiller] - kill task %s due to time limit',
task_id)
if self.heap:
return self.heap[0][0] - time.time()
def on_task_start(self, w,
task_name,
task_id,
task_headers,
start_time,
**kwargs):
limit = task_headers.get('time_limit')
if limit is None:
return
self.running_tasks.add(task_id)
heappush(self.heap, (start_time + limit, task_id))
def on_task_done(self, w, task_name, task_id):
if task_id not in self.running_tasks:
return
self.running_tasks.remove(task_id)
class CronBeat(Plugin):
def __init__(self,
app: App,
schedule: str,
error_timeout: int,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.error_timeout = error_timeout
self.schedule = schedule
self.next_run = 0
@classmethod
def add_console_args(cls, parser) -> None:
parser.add_argument('--schedule',
dest='schedule',
default='schedule.py',
help='Schedule rules')
def get_applied_conf(self):
return {
'schedule': self.schedule
}
@cached_property
def heap(self):
dct = {'crontab': crontab}
with open(self.schedule, 'rt') as f:
rules = eval(f.read(), dct)
if not isinstance(rules, dict):
raise TypeError('Must be a dict')
start = datetime.now()
heap = []
for key, entry in rules.items():
if not entry.get('task'):
raise TypeError('`task` must be set')
schedule = entry.get('schedule')
if not isinstance(schedule, crontab):
raise TypeError('`schedule` must be a crontab')
schedule = schedule.start(start)
heappush(heap, (next(schedule).timestamp(), schedule, entry))
return heap
def master_idle(self, curtime):
if not self.heap:
return
if self.next_run > curtime:
return self.next_run - curtime
task_sent = False
while self.heap and self.heap[0][0] <= curtime:
_, schedule, entry = self.heap[0]
try:
self.app.send_task(entry['task'],
args=entry.get('args', ()),
kwargs=entry.get('kwargs', {}))
except BrokerError:
self.logger.error('[beat] - cant send task, retry in %ss.',
self.error_timeout)
self.next_run = self.error_timeout + curtime
return self.error_timeout
else:
self.logger.debug('[beat] - %s sent.', entry['task'])
heappop(self.heap)
heappush(self.heap, (
next(schedule).timestamp(), schedule, entry))
task_sent = True
if self.heap:
self.next_run = self.heap[0][0]
timeout = self.next_run - curtime
if task_sent:
self.logger.debug('[beat] - next task in %fs.', timeout)
return timeout
class crontab_parser:
_range = r'(\d+?)-(\d+)'
_steps = r'/(\d+)'
_number = r'(\d+)'
_star = r'\*'
def __init__(self, min_, max_):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile('^' + self._range + self._steps + '$'),
self._range_steps),
(re.compile('^' + self._range + '$'),
self._expand_range),
(re.compile('^' + self._star + self._steps + '$'),
self._star_steps),
(re.compile('^' + self._star + '$'),
self._expand_star),
(re.compile('^' + self._number + '$'),
self._expand_range)
)
def parse(self, spec):
acc = set()
for part in spec.split(','):
if not part:
raise ValueError('empty part')
acc |= set(self._parse_part(part))
return sorted(acc)
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
raise ValueError('invalid filter: %r' % part)
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr:
raise ValueError('invalid range')
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise ValueError('empty filter')
return self._expand_range(toks[:2])[::int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise ValueError('empty filter')
return self._expand_star()[::int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_ + 1))
def _expand_number(self, s):
try:
i = int(s)
except ValueError:
raise ValueError('invalid number: %r' % s)
if i > self.max_:
raise ValueError(
'invalid end range: {0} > {1}.'.format(i, self.max_))
if i < self.min_:
raise ValueError(
'invalid beginning range: {0} < {1}.'.format(i, self.min_))
return i
class crontab:
def __init__(self,
minute='*',
hour='*',
day_of_month='*',
month_of_year='*',
day_of_week='*'):
self._orig_minute = minute
self._orig_hour = hour
self._orig_day_of_week = day_of_week
self._orig_day_of_month = day_of_month
self._orig_month_of_year = month_of_year
self.hour = self._expand_spec(hour, 0, 23)
self.minute = self._expand_spec(minute, 0, 59)
self.day_of_week = self._expand_spec(day_of_week, 0, 6)
self.day_of_month = self._expand_spec(day_of_month, 1, 31)
self.month_of_year = self._expand_spec(month_of_year, 1, 12)
def __repr__(self):
return ('<crontab: {0._orig_minute} {0._orig_hour} '
'{0._orig_day_of_week} {0._orig_day_of_month} '
'{0._orig_month_of_year}>').format(self)
@staticmethod
def _expand_spec(cronspec, min_, max_):
if isinstance(cronspec, int):
result = [cronspec]
elif isinstance(cronspec, str):
result = crontab_parser(min_, max_).parse(cronspec)
elif isinstance(cronspec, (list, tuple, set)):
result = sorted(cronspec)
elif isinstance(cronspec, Iterable):
result = sorted(cronspec)
else:
raise TypeError("Argument cronspec needs to be of any of the "
"following types: int, str, or an iterable type. "
"%r was given." % type(cronspec))
for number in result:
if not isinstance(number, int):
raise ValueError("Argument cronspec needs to be an int: "
"%r was given." % type(number))
for number in [result[0], result[-1]]:
if result[0] < min_ or result[0] > max_:
raise ValueError(
"Invalid crontab pattern. Valid range is {%d}-{%d}. "
"'%r' was found." % (min_, max_, result[0]))
return result
def start(self, start_date=None):
y = start_date.year
complete, (month_of_year, day_of_month, hour, minute) = rewind(
start_date.timetuple()[1:5], (
self.month_of_year,
self.day_of_month,
self.hour,
self.minute
))
if complete:
y += 1
while 1:
for m in month_of_year:
max_d = monthrange(y, m)[1]
for d in day_of_month:
if d > max_d:
break
for h in hour:
for mi in minute:
yield datetime(y, m, d, h, mi)
minute = self.minute
hour = self.hour
day_of_month = self.day_of_month
month_of_year = self.month_of_year
y += 1
| true
| true
|
f703808ba89e85e0b17a1b390f31606eb6d19386
| 2,313
|
py
|
Python
|
torch/distributed/elastic/rendezvous/registry.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | 1
|
2021-05-11T11:53:47.000Z
|
2021-05-11T11:53:47.000Z
|
torch/distributed/elastic/rendezvous/registry.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | null | null | null |
torch/distributed/elastic/rendezvous/registry.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler
def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import static_tcp_rendezvous
return static_tcp_rendezvous.create_rdzv_handler(params)
def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import etcd_rendezvous
return etcd_rendezvous.create_rdzv_handler(params)
def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:
from .etcd_rendezvous_backend import create_backend
from .etcd_store import EtcdStore
backend = create_backend(params)
store = EtcdStore(backend.client, "/torch/elastic/store")
return create_handler(store, backend, params)
def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler:
from .c10d_rendezvous_backend import create_backend
backend = create_backend(params)
return create_handler(backend.store, backend, params)
def _register_default_handlers() -> None:
handler_registry.register("etcd", _create_etcd_handler)
handler_registry.register("etcd-v2", _create_etcd_v2_handler)
handler_registry.register("c10d", _create_c10d_handler)
handler_registry.register("static", _create_static_handler)
def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
This method is used to obtain a reference to a :py:class`RendezvousHandler`.
Custom rendezvous handlers can be registered by
::
from torch.distributed.elastid.rendezvous import rendezvous_handler_registry
from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler
def create_my_rdzv(params: RendezvousParameters):
return MyCustomRdzv(params)
rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv)
my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters)
"""
return handler_registry.create_handler(params)
| 34.014706
| 92
| 0.793342
|
from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler
def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import static_tcp_rendezvous
return static_tcp_rendezvous.create_rdzv_handler(params)
def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import etcd_rendezvous
return etcd_rendezvous.create_rdzv_handler(params)
def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:
from .etcd_rendezvous_backend import create_backend
from .etcd_store import EtcdStore
backend = create_backend(params)
store = EtcdStore(backend.client, "/torch/elastic/store")
return create_handler(store, backend, params)
def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler:
from .c10d_rendezvous_backend import create_backend
backend = create_backend(params)
return create_handler(backend.store, backend, params)
def _register_default_handlers() -> None:
handler_registry.register("etcd", _create_etcd_handler)
handler_registry.register("etcd-v2", _create_etcd_v2_handler)
handler_registry.register("c10d", _create_c10d_handler)
handler_registry.register("static", _create_static_handler)
def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler:
return handler_registry.create_handler(params)
| true
| true
|
f70380c5f6f598fd66459071a4078c0cc10c2a22
| 6,058
|
py
|
Python
|
redlure-console.py
|
redlure/redlure-console
|
e59dabaea240976bffe645148a0eb95d1075143d
|
[
"BSD-3-Clause"
] | 21
|
2020-08-09T04:28:46.000Z
|
2022-02-23T10:29:50.000Z
|
redlure-console.py
|
redlure/redlure-console
|
e59dabaea240976bffe645148a0eb95d1075143d
|
[
"BSD-3-Clause"
] | 9
|
2021-03-31T15:43:17.000Z
|
2022-03-26T16:40:18.000Z
|
redlure-console.py
|
redlure/redlure-console
|
e59dabaea240976bffe645148a0eb95d1075143d
|
[
"BSD-3-Clause"
] | 7
|
2020-08-14T20:48:42.000Z
|
2022-03-16T22:02:36.000Z
|
#!/usr/bin/env python3
from app import app, db, functions
from app.functions import Color
import subprocess
import os
import shlex
import shutil
from config import Config
from datetime import datetime
from cryptography.fernet import InvalidToken
from app.cipher import CipherTest, Cipher, new_cipher_key, encrypt, decrypt
from app.workspace import Workspace
from app.role import Role
from app.user import User
from app.profile import Profile
from app.list import List, Person
from app.email import Email
from app.page import Page
from app.domain import Domain
from app.campaign import Campaign, Campaignpages, WorkerCampaignSchema
from app.result import Result, Form, Event
from app.server import Server
from app.apikey import APIKey
import urllib3
# suppress insecure requests warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# objects to initialize 'flask shell' with
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'User': User,
'Profile': Profile,
'Role': Role,
'Workspace': Workspace,
'List': List,
'Person': Person,
'Email': Email,
'Page': Page,
'Domain': Domain,
'Campaign': Campaign,
'Result': Result,
'Server': Server,
'APIKey': APIKey,
'Form': Form,
'Campaignpages': Campaignpages,
'Event': Event
}
def init_cipher():
passphrase = ''
print(f'{Color.gray}[*] redlure encrypts sensitive database fields{Color.end}')
print(f'{Color.gray}[*] Enter a passphrase that will be used in generating the key\n{Color.end}')
while passphrase == '':
passphrase = input(f'{Color.gray}[+] Passphrase: {Color.red}').encode()
print(f'\n[!] WARNING: Do not lose your passphrase - doing so will result in losing access to parts of your database{Color.end}')
new_cipher_key(passphrase)
input(f'\n{Color.gray}[+] Press enter to continue: {Color.end}')
def get_cipher():
cipher_text = CipherTest.query.first().value
str = cipher_text.decode()
print(f'{Color.gray}{str[:len(str)//2]}\n{str[len(str)//2:]}{Color.end}\n')
passphrase = input(f'{Color.gray}[+] Enter the cipher passphrase: {Color.red}').encode()
new_cipher_key(passphrase)
try:
plain_text = decrypt(cipher_text)
print(f'[+] {plain_text.decode()}\n{Color.end}')
except InvalidToken:
print(f'\n[!] Decryption failed - invalid passphrase{Color.end}')
exit()
def init_db():
if os.path.isdir('migrations'):
shutil.rmtree('migrations')
print(f'\n{Color.red}[*] Creating database{Color.end}')
proc = subprocess.Popen(shlex.split('flask db init'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db migrate'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db upgrade'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
print(f'{Color.red}[+] Initializing database values\n{Color.end}')
general_ws = Workspace(name='General')
db.session.add(general_ws)
db.session.commit()
administrator = Role(name='redlure admin', role_type='Administrator')
general_ws = Workspace.query.filter_by(id=1, name='General').first()
if general_ws is not None:
administrator.workspaces.append(general_ws)
db.session.add(administrator)
db.session.commit()
admin = User(username='admin', role_id=1)
admin.set_password('redlure')
db.session.add(admin)
db.session.commit()
encrypted_val = encrypt(b'Bingo. Welcome to redlure')
cipher_test = CipherTest(value=encrypted_val)
db.session.add(cipher_test)
db.session.commit()
key = APIKey()
# check for scheduled campaigns that need to be rentered into the queue
def check_campaigns():
campaigns = Campaign.query.filter_by(status='Scheduled').all()
for campaign in campaigns:
if datetime.now() < campaign.start_time:
#schema = WorkerCampaignSchema()
#campaign_data = schema.dump(campaign)
campaign.cast()
else:
campaign.status = 'Start time missed (server outage)'
db.session.commit()
def gen_certs():
proc = subprocess.Popen(shlex.split('openssl req -x509 -newkey rsa:4096 -nodes -subj "/" -out redlure-cert.pem -keyout redlure-key.pem -days 365'))
proc.wait()
def banner():
print(f'''
{Color.red} .___{Color.gray}.__ {Color.end}
{Color.red}_______ ____ __| _/{Color.gray}| | __ _________ ____ {Color.end}
{Color.red}\_ __ \_/ __ \ / __ | {Color.gray}| | | | \_ __ \_/ __ \ {Color.end}
{Color.red} | | \/\ ___// /_/ | {Color.gray}| |_| | /| | \/\ ___/ {Color.end}
{Color.red} |__| \___ >____ | {Color.gray}|____/____/ |__| \___ >{Color.end}
{Color.red} \/ \/ {Color.gray} \/ {Color.end}
''')
if __name__ == '__main__':
banner()
# SECRET_KEY is required
if Config.SECRET_KEY == '':
print('[!] A secret key is required - set the SECRET_KEY attribute in config.py')
print(f'[!] New suggested random secret key: {os.urandom(24)}')
exit()
# check if db exists yet
if not os.path.isfile('redlure.db'):
init_cipher()
init_db()
else:
get_cipher()
check_campaigns()
# generate certs if they dont exist
if Config.CERT_PATH == 'redlure-cert.pem' and Config.KEY_PATH == 'redlure-key.pem':
if not os.path.isfile('redlure-cert.pem') or not os.path.isfile('redlure-key.pem'):
gen_certs()
# start the server
app.logger.info('redlure-console starting up')
#server = subprocess.Popen(['gunicorn', 'app:app', '-b 0.0.0.0:5000', '--certfile', Config.CERT_PATH, '--keyfile', Config.KEY_PATH])
#server.wait()
app.run(host='0.0.0.0', ssl_context=(Config.CERT_PATH, Config.KEY_PATH), use_reloader=False)
| 35.017341
| 151
| 0.651535
|
from app import app, db, functions
from app.functions import Color
import subprocess
import os
import shlex
import shutil
from config import Config
from datetime import datetime
from cryptography.fernet import InvalidToken
from app.cipher import CipherTest, Cipher, new_cipher_key, encrypt, decrypt
from app.workspace import Workspace
from app.role import Role
from app.user import User
from app.profile import Profile
from app.list import List, Person
from app.email import Email
from app.page import Page
from app.domain import Domain
from app.campaign import Campaign, Campaignpages, WorkerCampaignSchema
from app.result import Result, Form, Event
from app.server import Server
from app.apikey import APIKey
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'User': User,
'Profile': Profile,
'Role': Role,
'Workspace': Workspace,
'List': List,
'Person': Person,
'Email': Email,
'Page': Page,
'Domain': Domain,
'Campaign': Campaign,
'Result': Result,
'Server': Server,
'APIKey': APIKey,
'Form': Form,
'Campaignpages': Campaignpages,
'Event': Event
}
def init_cipher():
passphrase = ''
print(f'{Color.gray}[*] redlure encrypts sensitive database fields{Color.end}')
print(f'{Color.gray}[*] Enter a passphrase that will be used in generating the key\n{Color.end}')
while passphrase == '':
passphrase = input(f'{Color.gray}[+] Passphrase: {Color.red}').encode()
print(f'\n[!] WARNING: Do not lose your passphrase - doing so will result in losing access to parts of your database{Color.end}')
new_cipher_key(passphrase)
input(f'\n{Color.gray}[+] Press enter to continue: {Color.end}')
def get_cipher():
cipher_text = CipherTest.query.first().value
str = cipher_text.decode()
print(f'{Color.gray}{str[:len(str)//2]}\n{str[len(str)//2:]}{Color.end}\n')
passphrase = input(f'{Color.gray}[+] Enter the cipher passphrase: {Color.red}').encode()
new_cipher_key(passphrase)
try:
plain_text = decrypt(cipher_text)
print(f'[+] {plain_text.decode()}\n{Color.end}')
except InvalidToken:
print(f'\n[!] Decryption failed - invalid passphrase{Color.end}')
exit()
def init_db():
if os.path.isdir('migrations'):
shutil.rmtree('migrations')
print(f'\n{Color.red}[*] Creating database{Color.end}')
proc = subprocess.Popen(shlex.split('flask db init'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db migrate'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db upgrade'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
print(f'{Color.red}[+] Initializing database values\n{Color.end}')
general_ws = Workspace(name='General')
db.session.add(general_ws)
db.session.commit()
administrator = Role(name='redlure admin', role_type='Administrator')
general_ws = Workspace.query.filter_by(id=1, name='General').first()
if general_ws is not None:
administrator.workspaces.append(general_ws)
db.session.add(administrator)
db.session.commit()
admin = User(username='admin', role_id=1)
admin.set_password('redlure')
db.session.add(admin)
db.session.commit()
encrypted_val = encrypt(b'Bingo. Welcome to redlure')
cipher_test = CipherTest(value=encrypted_val)
db.session.add(cipher_test)
db.session.commit()
key = APIKey()
def check_campaigns():
campaigns = Campaign.query.filter_by(status='Scheduled').all()
for campaign in campaigns:
if datetime.now() < campaign.start_time:
campaign.cast()
else:
campaign.status = 'Start time missed (server outage)'
db.session.commit()
def gen_certs():
proc = subprocess.Popen(shlex.split('openssl req -x509 -newkey rsa:4096 -nodes -subj "/" -out redlure-cert.pem -keyout redlure-key.pem -days 365'))
proc.wait()
def banner():
print(f'''
{Color.red} .___{Color.gray}.__ {Color.end}
{Color.red}_______ ____ __| _/{Color.gray}| | __ _________ ____ {Color.end}
{Color.red}\_ __ \_/ __ \ / __ | {Color.gray}| | | | \_ __ \_/ __ \ {Color.end}
{Color.red} | | \/\ ___// /_/ | {Color.gray}| |_| | /| | \/\ ___/ {Color.end}
{Color.red} |__| \___ >____ | {Color.gray}|____/____/ |__| \___ >{Color.end}
{Color.red} \/ \/ {Color.gray} \/ {Color.end}
''')
if __name__ == '__main__':
banner()
if Config.SECRET_KEY == '':
print('[!] A secret key is required - set the SECRET_KEY attribute in config.py')
print(f'[!] New suggested random secret key: {os.urandom(24)}')
exit()
if not os.path.isfile('redlure.db'):
init_cipher()
init_db()
else:
get_cipher()
check_campaigns()
if Config.CERT_PATH == 'redlure-cert.pem' and Config.KEY_PATH == 'redlure-key.pem':
if not os.path.isfile('redlure-cert.pem') or not os.path.isfile('redlure-key.pem'):
gen_certs()
app.logger.info('redlure-console starting up')
app.run(host='0.0.0.0', ssl_context=(Config.CERT_PATH, Config.KEY_PATH), use_reloader=False)
| true
| true
|
f70380d80d551bbc351d9ece8199eacdade9c151
| 6,731
|
py
|
Python
|
rwt_robot_monitor/test/test_rwt_robot_monitor_plotter.py
|
Affonso-Gui/visualization_rwt
|
9c015164e95ec13492dac9bf433c082355168ce4
|
[
"BSD-3-Clause"
] | 33
|
2015-05-27T00:07:15.000Z
|
2022-02-05T21:55:14.000Z
|
rwt_robot_monitor/test/test_rwt_robot_monitor_plotter.py
|
Affonso-Gui/visualization_rwt
|
9c015164e95ec13492dac9bf433c082355168ce4
|
[
"BSD-3-Clause"
] | 41
|
2015-02-06T09:33:24.000Z
|
2022-03-21T05:06:29.000Z
|
rwt_robot_monitor/test/test_rwt_robot_monitor_plotter.py
|
Affonso-Gui/visualization_rwt
|
9c015164e95ec13492dac9bf433c082355168ce4
|
[
"BSD-3-Clause"
] | 38
|
2016-01-27T05:34:34.000Z
|
2022-03-21T05:03:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, Kei Okada
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Copyright holder. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import sys
import time
import rospy
import rostest
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from std_msgs.msg import Float64
CLASSNAME = 'rwt_robot_monitor'
class TestRwtRobotMonitor(unittest.TestCase):
def sin_cb(self, msg):
self.sin_msg = msg
self.sin_msg_received = self.sin_msg_received + 1
def __init__(self, *args):
super(TestRwtRobotMonitor, self).__init__(*args)
rospy.init_node('test_rwt_robot_monitor')
def setUp(self):
parser = argparse.ArgumentParser()
parser.add_argument('--no-headless', action='store_true',
help='start webdriver with headless mode')
args, unknown = parser.parse_known_args()
self.sin_msg = None
self.sin_msg_received = 0
rospy.Subscriber('/sin', Float64, self.sin_cb)
self.url_base = rospy.get_param("url_roswww_testserver")
opts = webdriver.firefox.options.Options()
if not args.no_headless:
opts.add_argument('-headless')
self.browser = webdriver.Firefox(options=opts)
self.wait = webdriver.support.ui.WebDriverWait(self.browser, 10)
# maximize screen
self.browser.find_element_by_tag_name("html").send_keys(Keys.F11)
def tearDown(self):
try:
self.browser.close()
self.browser.quit()
except:
pass
def set_ros_websocket_port_settings(self):
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-settings")))
settings = self.browser.find_element_by_id("button-ros-master-settings")
self.assertIsNotNone(settings, "Object id=button-ros-master-settings not found")
settings.click()
self.wait.until(EC.presence_of_element_located((By.ID, "input-ros-master-uri")))
uri = self.browser.find_element_by_id("input-ros-master-uri")
self.assertIsNotNone(uri, "Object id=input-ros-master-uri not found")
uri.clear();
uri.send_keys('ws://localhost:9090/')
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-connect")))
connect = self.browser.find_element_by_id("button-ros-master-connect")
self.assertIsNotNone(connect, "Object id=button-ros-master-connect")
connect.click()
def test_rwt_robot_monitor_plotter(self):
url = '%s/rwt_robot_monitor/plotter.html' % (self.url_base)
rospy.logwarn("Accessing to %s" % url)
self.browser.get(url)
# check settings
self.set_ros_websocket_port_settings()
# wait for /First/pref1a topic
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "name-select")))
topic = self.browser.find_element_by_id("name-select")
self.assertIsNotNone(topic, "Object id=name-select not found")
topic_text = topic.text
self.assertTrue(u'/First/pref1a' in topic_text)
Select(topic).select_by_value('/First/pref1a')
# wait for test topic
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "plot-field-select")))
topic = self.browser.find_element_by_id("plot-field-select")
self.assertIsNotNone(topic, "Object id=plot-field-select not found")
topic_text = topic.text
self.assertTrue(u'test' in topic_text)
Select(topic).select_by_value('test')
self.wait.until(EC.presence_of_element_located((By.ID, "add-button")))
add = self.browser.find_element_by_id("add-button")
self.assertIsNotNone(add, "Object id=add-button")
add.click()
# check plot is updated
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "g.y")))
y_axis = self.browser.find_element_by_css_selector("g.y")
self.assertIsNotNone(y_axis, "Object id=y_axis")
y_axis_value = y_axis.text
loop = 0
y_axis_value_updated = 0
while loop < 60:
loop = loop + 1
time.sleep(1)
y_axis = self.browser.find_element_by_css_selector("g.y")
rospy.logwarn("check if tick updated {} < {} ({})".format(y_axis_value, y_axis.text, y_axis_value_updated))
if y_axis_value != y_axis.text:
y_axis_value_updated = y_axis_value_updated + 1
if y_axis_value_updated >= 2:
break
y_axis_value = y_axis.text
self.assertNotEqual(y_axis_value, y_axis.text)
if __name__ == '__main__':
try:
rostest.run('rwt_robot_monitor', CLASSNAME, TestRwtRobotMonitor, sys.argv)
except KeyboardInterrupt:
pass
print("{} exiting".format(CLASSNAME))
| 39.133721
| 119
| 0.679988
|
import argparse
import sys
import time
import rospy
import rostest
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from std_msgs.msg import Float64
CLASSNAME = 'rwt_robot_monitor'
class TestRwtRobotMonitor(unittest.TestCase):
def sin_cb(self, msg):
self.sin_msg = msg
self.sin_msg_received = self.sin_msg_received + 1
def __init__(self, *args):
super(TestRwtRobotMonitor, self).__init__(*args)
rospy.init_node('test_rwt_robot_monitor')
def setUp(self):
parser = argparse.ArgumentParser()
parser.add_argument('--no-headless', action='store_true',
help='start webdriver with headless mode')
args, unknown = parser.parse_known_args()
self.sin_msg = None
self.sin_msg_received = 0
rospy.Subscriber('/sin', Float64, self.sin_cb)
self.url_base = rospy.get_param("url_roswww_testserver")
opts = webdriver.firefox.options.Options()
if not args.no_headless:
opts.add_argument('-headless')
self.browser = webdriver.Firefox(options=opts)
self.wait = webdriver.support.ui.WebDriverWait(self.browser, 10)
self.browser.find_element_by_tag_name("html").send_keys(Keys.F11)
def tearDown(self):
try:
self.browser.close()
self.browser.quit()
except:
pass
def set_ros_websocket_port_settings(self):
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-settings")))
settings = self.browser.find_element_by_id("button-ros-master-settings")
self.assertIsNotNone(settings, "Object id=button-ros-master-settings not found")
settings.click()
self.wait.until(EC.presence_of_element_located((By.ID, "input-ros-master-uri")))
uri = self.browser.find_element_by_id("input-ros-master-uri")
self.assertIsNotNone(uri, "Object id=input-ros-master-uri not found")
uri.clear();
uri.send_keys('ws://localhost:9090/')
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-connect")))
connect = self.browser.find_element_by_id("button-ros-master-connect")
self.assertIsNotNone(connect, "Object id=button-ros-master-connect")
connect.click()
def test_rwt_robot_monitor_plotter(self):
url = '%s/rwt_robot_monitor/plotter.html' % (self.url_base)
rospy.logwarn("Accessing to %s" % url)
self.browser.get(url)
self.set_ros_websocket_port_settings()
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "name-select")))
topic = self.browser.find_element_by_id("name-select")
self.assertIsNotNone(topic, "Object id=name-select not found")
topic_text = topic.text
self.assertTrue(u'/First/pref1a' in topic_text)
Select(topic).select_by_value('/First/pref1a')
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "plot-field-select")))
topic = self.browser.find_element_by_id("plot-field-select")
self.assertIsNotNone(topic, "Object id=plot-field-select not found")
topic_text = topic.text
self.assertTrue(u'test' in topic_text)
Select(topic).select_by_value('test')
self.wait.until(EC.presence_of_element_located((By.ID, "add-button")))
add = self.browser.find_element_by_id("add-button")
self.assertIsNotNone(add, "Object id=add-button")
add.click()
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "g.y")))
y_axis = self.browser.find_element_by_css_selector("g.y")
self.assertIsNotNone(y_axis, "Object id=y_axis")
y_axis_value = y_axis.text
loop = 0
y_axis_value_updated = 0
while loop < 60:
loop = loop + 1
time.sleep(1)
y_axis = self.browser.find_element_by_css_selector("g.y")
rospy.logwarn("check if tick updated {} < {} ({})".format(y_axis_value, y_axis.text, y_axis_value_updated))
if y_axis_value != y_axis.text:
y_axis_value_updated = y_axis_value_updated + 1
if y_axis_value_updated >= 2:
break
y_axis_value = y_axis.text
self.assertNotEqual(y_axis_value, y_axis.text)
if __name__ == '__main__':
try:
rostest.run('rwt_robot_monitor', CLASSNAME, TestRwtRobotMonitor, sys.argv)
except KeyboardInterrupt:
pass
print("{} exiting".format(CLASSNAME))
| true
| true
|
f70380dd5f4275cd1df2a70863a2cb9bd97885b2
| 2,135
|
py
|
Python
|
tests/test_config.py
|
pete-twibill/alertlogic-sdk-python
|
5449dc3db312ba42de43cd8c9d86a68732c4c319
|
[
"MIT"
] | 4
|
2020-05-14T11:18:07.000Z
|
2021-09-30T13:20:34.000Z
|
tests/test_config.py
|
pete-twibill/alertlogic-sdk-python
|
5449dc3db312ba42de43cd8c9d86a68732c4c319
|
[
"MIT"
] | 26
|
2020-05-18T14:58:12.000Z
|
2021-11-29T16:57:04.000Z
|
tests/test_config.py
|
pete-twibill/alertlogic-sdk-python
|
5449dc3db312ba42de43cd8c9d86a68732c4c319
|
[
"MIT"
] | 23
|
2020-02-10T09:14:05.000Z
|
2022-01-27T23:44:54.000Z
|
import unittest
from almdrlib.session import Session
import re
MOCK_AUTH = {
"authentication": {
"user": {
"id": "589B64BB-AE91-4FA9-A6D8-37AC6759BB5D",
"account_id": "2",
"created": {
"at": 1443713420,
"by": "693BA145-78C0-4C77-AC1A-5385461839CD"
},
"modified": {
"at": 1610707251,
"by": "system"
}
},
"account": {
"id": "2",
"name": "Alert Logic, Inc."
},
"token": "123",
}
}
class NameSpace:
def __init__(self, **kwargs):
self.__dict__ = kwargs
class MockResponse():
elapsed = NameSpace(total_seconds=lambda: 123)
def __init__(self, code_body):
(self.code, self.body) = code_body
def json(self):
return self.body
def status_code(self):
return self.code
def raise_for_status(self):
return None
class MockSession():
def __init__(self, map):
self.map = map
def post(self, url):
return MockResponse(self.get_status_body(url))
def request(self, method, url, **kwargs):
print("URL", url)
return MockResponse(self.get_status_body(url))
def get_status_body(self, url):
for k, v in self.map.items():
if re.match(k, url):
return v
return 200, {}
class TestConf(unittest.TestCase):
def test_globalep(self):
session = Session(global_endpoint="http://api.aesolo.com:8100")
assert session.get_url("aetuner", "1234567") == "http://api.aesolo.com:8100"
session = Session(global_endpoint="production")
session._session = MockSession({r".*aims/v1/authenticate":
(200, MOCK_AUTH),
r".*residency/default/services/aetuner/endpoint":
(200, {"aetuner":"api.alertlogic.com"})})
assert session.get_url("aetuner", "1234567") == "https://api.alertlogic.com"
if __name__ == '__main__':
unittest.main()
| 26.358025
| 89
| 0.533489
|
import unittest
from almdrlib.session import Session
import re
MOCK_AUTH = {
"authentication": {
"user": {
"id": "589B64BB-AE91-4FA9-A6D8-37AC6759BB5D",
"account_id": "2",
"created": {
"at": 1443713420,
"by": "693BA145-78C0-4C77-AC1A-5385461839CD"
},
"modified": {
"at": 1610707251,
"by": "system"
}
},
"account": {
"id": "2",
"name": "Alert Logic, Inc."
},
"token": "123",
}
}
class NameSpace:
def __init__(self, **kwargs):
self.__dict__ = kwargs
class MockResponse():
elapsed = NameSpace(total_seconds=lambda: 123)
def __init__(self, code_body):
(self.code, self.body) = code_body
def json(self):
return self.body
def status_code(self):
return self.code
def raise_for_status(self):
return None
class MockSession():
def __init__(self, map):
self.map = map
def post(self, url):
return MockResponse(self.get_status_body(url))
def request(self, method, url, **kwargs):
print("URL", url)
return MockResponse(self.get_status_body(url))
def get_status_body(self, url):
for k, v in self.map.items():
if re.match(k, url):
return v
return 200, {}
class TestConf(unittest.TestCase):
def test_globalep(self):
session = Session(global_endpoint="http://api.aesolo.com:8100")
assert session.get_url("aetuner", "1234567") == "http://api.aesolo.com:8100"
session = Session(global_endpoint="production")
session._session = MockSession({r".*aims/v1/authenticate":
(200, MOCK_AUTH),
r".*residency/default/services/aetuner/endpoint":
(200, {"aetuner":"api.alertlogic.com"})})
assert session.get_url("aetuner", "1234567") == "https://api.alertlogic.com"
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70381bc71cd4b54071ed982ffd4ba5b95af32fa
| 1,866
|
py
|
Python
|
synapse/__init__.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | 1
|
2020-10-10T13:23:05.000Z
|
2020-10-10T13:23:05.000Z
|
synapse/__init__.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | null | null | null |
synapse/__init__.py
|
schildbach/synapse
|
3aa36b782c715a2d3c965d5d9b37f7a49a5f17e1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-9 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix homeserver.
"""
import json
import os
import sys
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 5):
print("Synapse requires Python 3.5 or above.")
sys.exit(1)
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
# actually start Synapse will import these libraries fine.
try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory
from twisted.names.dns import DNSDatagramProtocol
protocol.Factory.noisy = False
Factory.noisy = False
DNSDatagramProtocol.noisy = False
except ImportError:
pass
# Use the standard library json implementation instead of simplejson.
try:
from canonicaljson import set_json_library
set_json_library(json)
except ImportError:
pass
__version__ = "1.18.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
# running the packaging tox test.
from synapse.util.patch_inline_callbacks import do_patch
do_patch()
| 31.627119
| 79
| 0.752947
|
import json
import os
import sys
if sys.version_info < (3, 5):
print("Synapse requires Python 3.5 or above.")
sys.exit(1)
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory
from twisted.names.dns import DNSDatagramProtocol
protocol.Factory.noisy = False
Factory.noisy = False
DNSDatagramProtocol.noisy = False
except ImportError:
pass
try:
from canonicaljson import set_json_library
set_json_library(json)
except ImportError:
pass
__version__ = "1.18.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# running the packaging tox test.
from synapse.util.patch_inline_callbacks import do_patch
do_patch()
| true
| true
|
f7038372acea8564b8305e0beb5f6d7f87ed3aa9
| 6,454
|
py
|
Python
|
experiments/increasing_dim/Exp_1/kron.py
|
vdutor/VFF
|
459be5b480bba49e8c15dc7daeca5fd1ddd762df
|
[
"Apache-2.0"
] | 75
|
2016-11-21T21:50:12.000Z
|
2022-03-22T17:56:36.000Z
|
experiments/increasing_dim/Exp_1/kron.py
|
vdutor/VFF
|
459be5b480bba49e8c15dc7daeca5fd1ddd762df
|
[
"Apache-2.0"
] | 4
|
2018-03-22T19:47:08.000Z
|
2021-10-06T14:49:54.000Z
|
experiments/increasing_dim/Exp_1/kron.py
|
vdutor/VFF
|
459be5b480bba49e8c15dc7daeca5fd1ddd762df
|
[
"Apache-2.0"
] | 26
|
2016-11-22T14:14:58.000Z
|
2022-02-03T18:29:20.000Z
|
import numpy as np
import sys
import gpflow
import VFF
from time import time
from config import *
dim = sys.argv[1]
rep = sys.argv[2]
print('vff: dimension {}, replicate {}'.format(dim, r))
# data
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0))
# full_gp
def prodkern(dim):
return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale)
for i in range(dim)])
k = prodkern(dim)
m = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k)
m.likelihood.variance = noise_var
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
file = open("results/full.csv","a")
file.write("{}, {}, {}, {}".format(dim, rep, marg_lik, mean_log_pred))
file.close()
##########################
# kron
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim)
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=prodkern(dim).kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# only optimize q(u)
m.kerns.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/kron.csv')
##########################
# kron_opt
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron_opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=k.kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# build kronecker GP model
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/kron_opt.csv')
##########################
# Sparse
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('Sparse replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
start = time()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_kmeans.csv')
##########################
# Sparse GP opt
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('sparse opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
# only optimize Z
m.kern.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_opt.csv')
##########################
#
| 37.523256
| 101
| 0.503719
|
import numpy as np
import sys
import gpflow
import VFF
from time import time
from config import *
dim = sys.argv[1]
rep = sys.argv[2]
print('vff: dimension {}, replicate {}'.format(dim, r))
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0))
def prodkern(dim):
return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale)
for i in range(dim)])
k = prodkern(dim)
m = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k)
m.likelihood.variance = noise_var
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
file = open("results/full.csv","a")
file.write("{}, {}, {}, {}".format(dim, rep, marg_lik, mean_log_pred))
file.close()
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim)
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=prodkern(dim).kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
m.kerns.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/kron.csv')
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron_opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=k.kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/kron_opt.csv')
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('Sparse replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
start = time()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/sparse_kmeans.csv')
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('sparse opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
m.kern.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/sparse_opt.csv')
| true
| true
|
f70383c83f8ac45d0ac95cb44577dc706c18ae28
| 8,317
|
py
|
Python
|
datanator/data_source/rna_halflife/doi_10_1093_nar_gks1019.py
|
KarrLab/Kinetic-Datanator
|
8aff047fd117033b98eca8ee3b21a8f07c430dec
|
[
"CC-BY-3.0",
"CC0-1.0",
"CC-BY-4.0",
"MIT"
] | 10
|
2018-11-20T17:04:09.000Z
|
2021-08-24T18:29:06.000Z
|
datanator/data_source/rna_halflife/doi_10_1093_nar_gks1019.py
|
KarrLab/Kinetic-Datanator
|
8aff047fd117033b98eca8ee3b21a8f07c430dec
|
[
"CC-BY-3.0",
"CC0-1.0",
"CC-BY-4.0",
"MIT"
] | 59
|
2018-11-23T20:42:11.000Z
|
2020-11-08T19:51:36.000Z
|
datanator/data_source/rna_halflife/doi_10_1093_nar_gks1019.py
|
KarrLab/Kinetic-Datanator
|
8aff047fd117033b98eca8ee3b21a8f07c430dec
|
[
"CC-BY-3.0",
"CC0-1.0",
"CC-BY-4.0",
"MIT"
] | 3
|
2018-12-15T00:53:54.000Z
|
2021-08-24T18:29:08.000Z
|
import pandas as pd
from datanator.util import rna_halflife_util
import datetime
import datanator.config.core
import datetime
from pymongo.collation import Collation, CollationStrength
class Halflife(rna_halflife_util.RnaHLUtil):
def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None,
authDB=None, readPreference=None, username=None, password=None,
verbose=None, max_entries=None, des_db=None, rna_col=None):
"""Init
Args:
cache_dir (:obj:`str`, optional): Cache directory for logs. Defaults to None.
server (:obj:`str`, optional): MongoDB server address. Defaults to None.
db (:obj:`str`, optional): Database where initial uniprot collection resides. Defaults to None.
collection_str (:obj:`str`, optional): name of collection. Defaults to None.
authDB (:obj:`str`, optional): MongoDB authentication database. Defaults to None.
readPreference (:obj:`str`, optional): MongoDB read preference. Defaults to None.
username (:obj:`str`, optional): MongoDB username. Defaults to None.
password (:obj:`str`, optional): MongoDB password. Defaults to None.
verbose (:obj:`bool`, optional): Wheter to display verbose messages. Defaults to None.
max_entries (:obj:`int`, optional): Number of records to be processed. Defaults to None.
uniprot_col_db (:obj:`int`, optional): Database to which new uniprot records will be inserted. Defaults to None.
"""
super().__init__(server=server, username=username, password=password, src_db=src_db,
des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference,
max_entries=max_entries, verbose=verbose)
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
def fill_uniprot(self, url, sheet_name, usercols='B:D', skiprows=[0,1,2],
insertion=True):
"""Fill uniprot colleciton with ordered_locus_name
from excel sheet
Args:
url (:obj:`str`): URL for Excel sheet.
sheet_name (:obj:`str`): sheet name within Excel.
usecols (:obj:`int` or :obj:`list` or :obj:`str`): Return a subset of the columns.
skiprows (:obj:`list`): rows to skip (0-indexed)
insertion (:obj:`bool`): whether to insert new records to uniprot collection.
Return:
(:obj:`pandas.DataFrame`): Dataframe
"""
df = self.make_df(url, sheet_name, usecols=usercols, skiprows=skiprows,
names=['ordered_locus_name', 'half_life', 'r_squared'])
row_count = len(df.index)
if insertion:
for index, row in df.iterrows():
if index == self.max_entries:
break
if index % 10 == 0 and self.verbose:
print("Inserting locus {} out of {} into uniprot collection.".format(index, row_count))
oln = row['ordered_locus_name']
self.fill_uniprot_by_oln(oln)
return df
def fill_rna_halflife(self, df, species):
"""load data into rna_halflife collection
Args:
df (:obj:`pandas.DataFrame`): dataframe to be loaded into the database
species (:obj:`list`): species name and ncbi_id
"""
row_count = len(df.index)
for i, row in df.iterrows():
if i == self.max_entries:
break
if i % 10 == 0 and self.verbose:
print("Processing locus {} out {}".format(i, row_count))
halflives = {}
oln = row['ordered_locus_name']
halflives['halflife'] = row['half_life'] * 60
halflives['r_squared'] = row['r_squared']
halflives['unit'] = 's'
halflives['reference'] = [{'doi': '10.1093/nar/gks1019', 'pubmed_id': '23125364'}]
halflives['growth_medium'] = 'Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.'
halflives['ordered_locus_name'] = oln
halflives['species'] = species[0]
halflives['ncbi_taxonomy_id'] = species[1]
gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln)
if gene_name is not None: # record exists in uniprot collection with gene_name
self.rna_hl_collection.update_one({'gene_name': gene_name},
{'$set': {'modified': datetime.datetime.utcnow()},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
elif (gene_name is None and protein_name is not None and
protein_name != 'Uncharacterized protein'): # record exists in uniprot collection with non-filler protein_name
self.rna_hl_collection.update_one({'protein_name': protein_name},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
query = {'halflives.ordered_locus_name': oln}
doc = self.rna_hl_collection.find_one(filter=query, collation=self.collation)
if doc is not None:
self.rna_hl_collection.update_one({'halflives.ordered_locus_name': oln},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
doc = {'halflives': [halflives], 'modified': datetime.datetime.utcnow(),
'gene_name': gene_name, 'protein_name': protein_name}
self.rna_hl_collection.insert_one(doc)
def main():
src_db = 'datanator'
des_db = 'datanator'
rna_col = 'rna_halflife'
protein_col = 'uniprot'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
server = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
src = Halflife(server=server, src_db=src_db,
protein_col=protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=float('inf'),
des_db=des_db, rna_col=rna_col)
url = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA'
# df = src.fill_uniprot(url, 'Supplementary Table 1', insertion=False)
# src.fill_rna_halflife(df, ['Mycobacterium tuberculosis H37Rv', 83332])
df = src.fill_uniprot(url, 'Supplementary Table 2', skiprows=list(range(0,6)))
src.fill_rna_halflife(df, ['Mycolicibacterium smegmatis MC2 155', 246196])
if __name__ == '__main__':
main()
| 59.407143
| 555
| 0.593123
|
import pandas as pd
from datanator.util import rna_halflife_util
import datetime
import datanator.config.core
import datetime
from pymongo.collation import Collation, CollationStrength
class Halflife(rna_halflife_util.RnaHLUtil):
def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None,
authDB=None, readPreference=None, username=None, password=None,
verbose=None, max_entries=None, des_db=None, rna_col=None):
super().__init__(server=server, username=username, password=password, src_db=src_db,
des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference,
max_entries=max_entries, verbose=verbose)
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
def fill_uniprot(self, url, sheet_name, usercols='B:D', skiprows=[0,1,2],
insertion=True):
df = self.make_df(url, sheet_name, usecols=usercols, skiprows=skiprows,
names=['ordered_locus_name', 'half_life', 'r_squared'])
row_count = len(df.index)
if insertion:
for index, row in df.iterrows():
if index == self.max_entries:
break
if index % 10 == 0 and self.verbose:
print("Inserting locus {} out of {} into uniprot collection.".format(index, row_count))
oln = row['ordered_locus_name']
self.fill_uniprot_by_oln(oln)
return df
def fill_rna_halflife(self, df, species):
row_count = len(df.index)
for i, row in df.iterrows():
if i == self.max_entries:
break
if i % 10 == 0 and self.verbose:
print("Processing locus {} out {}".format(i, row_count))
halflives = {}
oln = row['ordered_locus_name']
halflives['halflife'] = row['half_life'] * 60
halflives['r_squared'] = row['r_squared']
halflives['unit'] = 's'
halflives['reference'] = [{'doi': '10.1093/nar/gks1019', 'pubmed_id': '23125364'}]
halflives['growth_medium'] = 'Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.'
halflives['ordered_locus_name'] = oln
halflives['species'] = species[0]
halflives['ncbi_taxonomy_id'] = species[1]
gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln)
if gene_name is not None: self.rna_hl_collection.update_one({'gene_name': gene_name},
{'$set': {'modified': datetime.datetime.utcnow()},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
elif (gene_name is None and protein_name is not None and
protein_name != 'Uncharacterized protein'): self.rna_hl_collection.update_one({'protein_name': protein_name},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
query = {'halflives.ordered_locus_name': oln}
doc = self.rna_hl_collection.find_one(filter=query, collation=self.collation)
if doc is not None:
self.rna_hl_collection.update_one({'halflives.ordered_locus_name': oln},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
doc = {'halflives': [halflives], 'modified': datetime.datetime.utcnow(),
'gene_name': gene_name, 'protein_name': protein_name}
self.rna_hl_collection.insert_one(doc)
def main():
src_db = 'datanator'
des_db = 'datanator'
rna_col = 'rna_halflife'
protein_col = 'uniprot'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
server = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
src = Halflife(server=server, src_db=src_db,
protein_col=protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=float('inf'),
des_db=des_db, rna_col=rna_col)
url = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA'
df = src.fill_uniprot(url, 'Supplementary Table 2', skiprows=list(range(0,6)))
src.fill_rna_halflife(df, ['Mycolicibacterium smegmatis MC2 155', 246196])
if __name__ == '__main__':
main()
| true
| true
|
f703863d27b157411139d10736ab6cddb11462a0
| 4,522
|
py
|
Python
|
tests/integration/issues/hanging_termination/test_hanging_termination.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | 1
|
2021-12-18T06:54:49.000Z
|
2021-12-18T06:54:49.000Z
|
tests/integration/issues/hanging_termination/test_hanging_termination.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | 2
|
2021-12-17T15:22:12.000Z
|
2021-12-18T07:19:06.000Z
|
tests/integration/issues/hanging_termination/test_hanging_termination.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | null | null | null |
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Flow, Document
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.parsers import set_client_cli_parser
from typing import Dict
from jina import DocumentArray, Executor, requests
class DumpExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, parameters: Dict, **kwargs):
shards = int(parameters['shards'])
dump_path = parameters['dump_path']
shard_size = len(docs) / shards
os.makedirs(dump_path, exist_ok=True)
for i in range(shards):
dump_file = f'{dump_path}/{i}.ndjson'
docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)]
docs_to_be_dumped.save(dump_file)
class ErrorExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, **kwargs):
if len(docs) > 0:
assert False
class ReloadExecutor(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# backwards compatibility
assert 'dump_path' in kwargs['runtime_args'].keys()
if dump_path is not None:
shard_id = getattr(self.runtime_args, 'pea_id', None)
shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson')
self._docs = DocumentArray.load(shard_dump_path)
else:
self._docs = DocumentArray()
@requests
def search(self, docs: DocumentArray, **kwargs):
docs.clear()
docs.extend(self._docs)
class MergeExecutor(Executor):
@requests
def merge(self, docs_matrix: DocumentArray, **kwargs):
merged_docs = DocumentArray()
for docs in docs_matrix:
merged_docs.extend(docs)
return merged_docs
def get_client(port):
args = set_client_cli_parser().parse_args(
['--host', 'localhost', '--port', str(port)]
)
return Client(args)
def get_documents(count=10, emb_size=7):
for i in range(count):
yield Document(
id=i,
text=f'hello world {i}',
embedding=np.random.random(emb_size),
tags={'tag_field': f'tag data {i}'},
)
def path_size(dump_path):
return (
sum(
f.stat().st_size
for f in Path(dump_path).glob('**/*')
if f.is_file()
)
/ 1e6
)
@pytest.mark.repeat(20)
@pytest.mark.parametrize('shards', [5, 3, 1])
@pytest.mark.parametrize('nr_docs', [7])
@pytest.mark.parametrize('emb_size', [10])
def test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2):
"""showcases using replicas + dump + rolling update with independent clients"""
with Flow().add(uses=DumpExecutor, name='dump_exec').add(
uses=ErrorExecutor, name='error_exec'
) as flow_dump:
merge_executor = MergeExecutor if shards > 1 else None
with Flow().add(
uses=ReloadExecutor,
name='reload_exec',
replicas=2,
shards=shards,
uses_after=merge_executor,
) as flow_reload:
for run_number in range(times_to_index):
dump_path = os.path.join(tmpdir, f'dump-{run_number}')
client_dbms = get_client(flow_dump.port_expose)
client_query = get_client(flow_reload.port_expose)
docs = list(
get_documents(
count=nr_docs * (run_number + 1),
emb_size=emb_size,
)
)
with TimeContext(f'### dumping {len(docs)} docs'):
client_dbms.post(
on='/dump',
inputs=docs,
target_peapod='dump_exec',
parameters={'dump_path': dump_path, 'shards': shards},
)
print(f'### dump path size: {path_size(dump_path)} MBs')
with TimeContext(f'### rolling update on {len(docs)}'):
# flow object is used for ctrl requests
flow_reload.rolling_update('reload_exec', dump_path)
for _ in range(5):
result = client_query.post(
on='/search', inputs=[Document()], return_results=True
)
assert len(docs) == len(result[0].docs)
| 32.3
| 85
| 0.568996
|
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Flow, Document
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.parsers import set_client_cli_parser
from typing import Dict
from jina import DocumentArray, Executor, requests
class DumpExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, parameters: Dict, **kwargs):
shards = int(parameters['shards'])
dump_path = parameters['dump_path']
shard_size = len(docs) / shards
os.makedirs(dump_path, exist_ok=True)
for i in range(shards):
dump_file = f'{dump_path}/{i}.ndjson'
docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)]
docs_to_be_dumped.save(dump_file)
class ErrorExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, **kwargs):
if len(docs) > 0:
assert False
class ReloadExecutor(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
assert 'dump_path' in kwargs['runtime_args'].keys()
if dump_path is not None:
shard_id = getattr(self.runtime_args, 'pea_id', None)
shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson')
self._docs = DocumentArray.load(shard_dump_path)
else:
self._docs = DocumentArray()
@requests
def search(self, docs: DocumentArray, **kwargs):
docs.clear()
docs.extend(self._docs)
class MergeExecutor(Executor):
@requests
def merge(self, docs_matrix: DocumentArray, **kwargs):
merged_docs = DocumentArray()
for docs in docs_matrix:
merged_docs.extend(docs)
return merged_docs
def get_client(port):
args = set_client_cli_parser().parse_args(
['--host', 'localhost', '--port', str(port)]
)
return Client(args)
def get_documents(count=10, emb_size=7):
for i in range(count):
yield Document(
id=i,
text=f'hello world {i}',
embedding=np.random.random(emb_size),
tags={'tag_field': f'tag data {i}'},
)
def path_size(dump_path):
return (
sum(
f.stat().st_size
for f in Path(dump_path).glob('**/*')
if f.is_file()
)
/ 1e6
)
@pytest.mark.repeat(20)
@pytest.mark.parametrize('shards', [5, 3, 1])
@pytest.mark.parametrize('nr_docs', [7])
@pytest.mark.parametrize('emb_size', [10])
def test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2):
with Flow().add(uses=DumpExecutor, name='dump_exec').add(
uses=ErrorExecutor, name='error_exec'
) as flow_dump:
merge_executor = MergeExecutor if shards > 1 else None
with Flow().add(
uses=ReloadExecutor,
name='reload_exec',
replicas=2,
shards=shards,
uses_after=merge_executor,
) as flow_reload:
for run_number in range(times_to_index):
dump_path = os.path.join(tmpdir, f'dump-{run_number}')
client_dbms = get_client(flow_dump.port_expose)
client_query = get_client(flow_reload.port_expose)
docs = list(
get_documents(
count=nr_docs * (run_number + 1),
emb_size=emb_size,
)
)
with TimeContext(f'### dumping {len(docs)} docs'):
client_dbms.post(
on='/dump',
inputs=docs,
target_peapod='dump_exec',
parameters={'dump_path': dump_path, 'shards': shards},
)
print(f'### dump path size: {path_size(dump_path)} MBs')
with TimeContext(f'### rolling update on {len(docs)}'):
flow_reload.rolling_update('reload_exec', dump_path)
for _ in range(5):
result = client_query.post(
on='/search', inputs=[Document()], return_results=True
)
assert len(docs) == len(result[0].docs)
| true
| true
|
f70386457425b8a79d1bf8bba8f25e6afd14e258
| 2,013
|
py
|
Python
|
utils/timeout.py
|
trilogy-group/kayako-dd-agent
|
4a4cce9a367a6a522f3f4290819cbcd3ef637832
|
[
"BSD-3-Clause"
] | 76
|
2015-02-09T12:20:12.000Z
|
2022-03-31T04:29:35.000Z
|
utils/timeout.py
|
fotinakis/dd-agent
|
b8642a36f26190b7bed3208e145bd3ac4cc4f59b
|
[
"BSD-3-Clause"
] | 58
|
2015-01-12T08:57:23.000Z
|
2021-09-24T23:06:23.000Z
|
utils/timeout.py
|
fotinakis/dd-agent
|
b8642a36f26190b7bed3208e145bd3ac4cc4f59b
|
[
"BSD-3-Clause"
] | 31
|
2015-03-30T15:30:19.000Z
|
2021-01-12T07:37:22.000Z
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from threading import Thread
import functools
_thread_by_func = {}
class TimeoutException(Exception):
"""
Raised when a function runtime exceeds the limit set.
"""
pass
class ThreadMethod(Thread):
"""
Descendant of `Thread` class.
Run the specified target method with the specified arguments.
Store result and exceptions.
From: https://code.activestate.com/recipes/440569/
"""
def __init__(self, target, args, kwargs):
Thread.__init__(self)
self.setDaemon(True)
self.target, self.args, self.kwargs = target, args, kwargs
self.start()
def run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
else:
self.exception = None
def timeout(timeout):
"""
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
# A thread for the same function already exists.
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator
| 26.84
| 99
| 0.606557
|
from threading import Thread
import functools
_thread_by_func = {}
class TimeoutException(Exception):
pass
class ThreadMethod(Thread):
def __init__(self, target, args, kwargs):
Thread.__init__(self)
self.setDaemon(True)
self.target, self.args, self.kwargs = target, args, kwargs
self.start()
def run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
else:
self.exception = None
def timeout(timeout):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator
| true
| true
|
f703876b792a5aa0ae1f2b68123b4b424e567173
| 2,939
|
py
|
Python
|
designate/scheduler/filters/pool_id_attribute_filter.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 145
|
2015-01-02T09:35:53.000Z
|
2021-12-14T17:03:53.000Z
|
designate/scheduler/filters/pool_id_attribute_filter.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 6
|
2015-03-15T00:22:27.000Z
|
2019-12-16T09:37:38.000Z
|
designate/scheduler/filters/pool_id_attribute_filter.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 109
|
2015-01-13T16:47:34.000Z
|
2021-03-15T13:18:48.000Z
|
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate import objects
from designate import policy
from designate.scheduler.filters import base
LOG = logging.getLogger(__name__)
class PoolIDAttributeFilter(base.Filter):
"""This allows users with the correct role to specify the exact pool_id
to schedule the supplied zone to.
This is supplied as an attribute on the zone
.. code-block:: python
:emphasize-lines: 3
{
"attributes": {
"pool_id": "794ccc2c-d751-44fe-b57f-8894c9f5c842"
},
"email": "user@example.com",
"name": "example.com."
}
The pool is loaded to ensure it exists, and then a policy check is
performed to ensure the user has the correct role.
.. warning::
This should only be enabled if required, as it will raise a
403 Forbidden if a user without the correct role uses it.
"""
name = 'pool_id_attribute'
"""Name to enable in the ``[designate:central:scheduler].filters`` option
list
"""
def filter(self, context, pools, zone):
"""Attempt to load and set the pool to the one provided in the
Zone attributes.
:param context: :class:`designate.context.DesignateContext` - Context
Object from request
:param pools: :class:`designate.objects.pool.PoolList` - List of pools
to choose from
:param zone: :class:`designate.objects.zone.Zone` - Zone to be created
:return: :class:`designate.objects.pool.PoolList` -- A PoolList with
containing a single pool.
:raises: Forbidden, PoolNotFound
"""
try:
if zone.attributes.get('pool_id'):
pool_id = zone.attributes.get('pool_id')
try:
pool = self.storage.get_pool(context, pool_id)
except Exception:
return objects.PoolList()
policy.check('zone_create_forced_pool', context, pool)
if pool in pools:
pools = objects.PoolList()
pools.append(pool)
return pools
else:
return pools
except exceptions.RelationNotLoaded:
return pools
| 34.576471
| 78
| 0.636611
|
from oslo_log import log as logging
from designate import exceptions
from designate import objects
from designate import policy
from designate.scheduler.filters import base
LOG = logging.getLogger(__name__)
class PoolIDAttributeFilter(base.Filter):
name = 'pool_id_attribute'
def filter(self, context, pools, zone):
try:
if zone.attributes.get('pool_id'):
pool_id = zone.attributes.get('pool_id')
try:
pool = self.storage.get_pool(context, pool_id)
except Exception:
return objects.PoolList()
policy.check('zone_create_forced_pool', context, pool)
if pool in pools:
pools = objects.PoolList()
pools.append(pool)
return pools
else:
return pools
except exceptions.RelationNotLoaded:
return pools
| true
| true
|
f703884b878ba570bfebe96909363b76a6c9c45f
| 34
|
py
|
Python
|
operation/__init__.py
|
gretchi/twim
|
20a4bf459182a01d535cfb948d8b30de087585f1
|
[
"MIT"
] | null | null | null |
operation/__init__.py
|
gretchi/twim
|
20a4bf459182a01d535cfb948d8b30de087585f1
|
[
"MIT"
] | null | null | null |
operation/__init__.py
|
gretchi/twim
|
20a4bf459182a01d535cfb948d8b30de087585f1
|
[
"MIT"
] | null | null | null |
from .operation import Operation
| 11.333333
| 32
| 0.823529
|
from .operation import Operation
| true
| true
|
f703884d0e67a0ffdabbd1f89173bd1ecbd974bb
| 8,899
|
py
|
Python
|
aiida/orm/utils/__init__.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
aiida/orm/utils/__init__.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/orm/utils/__init__.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utilities related to the ORM."""
__all__ = ('load_code', 'load_computer', 'load_group', 'load_node')
def load_entity(
entity_loader=None, identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True
):
# pylint: disable=too-many-arguments
"""
Load an entity instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import OrmEntityLoader, IdentifierType
if entity_loader is None or not issubclass(entity_loader, OrmEntityLoader):
raise TypeError(f'entity_loader should be a sub class of {type(OrmEntityLoader)}')
inputs_provided = [value is not None for value in (identifier, pk, uuid, label)].count(True)
if inputs_provided == 0:
raise ValueError("one of the parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
elif inputs_provided > 1:
raise ValueError("only one of parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
if pk is not None:
if not isinstance(pk, int):
raise TypeError('a pk has to be an integer')
identifier = pk
identifier_type = IdentifierType.ID
elif uuid is not None:
if not isinstance(uuid, str):
raise TypeError('uuid has to be a string type')
identifier = uuid
identifier_type = IdentifierType.UUID
elif label is not None:
if not isinstance(label, str):
raise TypeError('label has to be a string type')
identifier = label
identifier_type = IdentifierType.LABEL
else:
identifier = str(identifier)
identifier_type = None
return entity_loader.load_entity(
identifier, identifier_type, sub_classes=sub_classes, query_with_dashes=query_with_dashes
)
def load_code(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Code instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import CodeEntityLoader
return load_entity(
CodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_computer(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Computer instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Computer
:param pk: pk of a Computer
:param uuid: uuid of a Computer, or the beginning of the uuid
:param label: label of a Computer
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Computer instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Computer is found
:raise aiida.common.MultipleObjectsError: if more than one Computer was found
"""
from aiida.orm.utils.loaders import ComputerEntityLoader
return load_entity(
ComputerEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_group(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Group instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Group
:param pk: pk of a Group
:param uuid: uuid of a Group, or the beginning of the uuid
:param label: label of a Group
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Group instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Group is found
:raise aiida.common.MultipleObjectsError: if more than one Group was found
"""
from aiida.orm.utils.loaders import GroupEntityLoader
return load_entity(
GroupEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_node(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a node by one of its identifiers: pk or uuid. If the type of the identifier is unknown
simply pass it without a keyword and the loader will attempt to infer the type
:param identifier: pk (integer) or uuid (string)
:param pk: pk of a node
:param uuid: uuid of a node, or the beginning of the uuid
:param label: label of a Node
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the node instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Node is found
:raise aiida.common.MultipleObjectsError: if more than one Node was found
"""
from aiida.orm.utils.loaders import NodeEntityLoader
return load_entity(
NodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
| 43.199029
| 120
| 0.684234
|
__all__ = ('load_code', 'load_computer', 'load_group', 'load_node')
def load_entity(
entity_loader=None, identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True
):
from aiida.orm.utils.loaders import OrmEntityLoader, IdentifierType
if entity_loader is None or not issubclass(entity_loader, OrmEntityLoader):
raise TypeError(f'entity_loader should be a sub class of {type(OrmEntityLoader)}')
inputs_provided = [value is not None for value in (identifier, pk, uuid, label)].count(True)
if inputs_provided == 0:
raise ValueError("one of the parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
elif inputs_provided > 1:
raise ValueError("only one of parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
if pk is not None:
if not isinstance(pk, int):
raise TypeError('a pk has to be an integer')
identifier = pk
identifier_type = IdentifierType.ID
elif uuid is not None:
if not isinstance(uuid, str):
raise TypeError('uuid has to be a string type')
identifier = uuid
identifier_type = IdentifierType.UUID
elif label is not None:
if not isinstance(label, str):
raise TypeError('label has to be a string type')
identifier = label
identifier_type = IdentifierType.LABEL
else:
identifier = str(identifier)
identifier_type = None
return entity_loader.load_entity(
identifier, identifier_type, sub_classes=sub_classes, query_with_dashes=query_with_dashes
)
def load_code(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
from aiida.orm.utils.loaders import CodeEntityLoader
return load_entity(
CodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_computer(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
from aiida.orm.utils.loaders import ComputerEntityLoader
return load_entity(
ComputerEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_group(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
from aiida.orm.utils.loaders import GroupEntityLoader
return load_entity(
GroupEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_node(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
from aiida.orm.utils.loaders import NodeEntityLoader
return load_entity(
NodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
| true
| true
|
f703888ac8620fcfd52e21ef458eecb61c9abac7
| 4,366
|
py
|
Python
|
pynet/cam.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | null | null | null |
pynet/cam.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | null | null | null |
pynet/cam.py
|
claireguichon/pynet
|
92706375e61fb5cb523548303b7d04769c9de134
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides tools to compute class activation map.
"""
# Imports
import logging
import skimage
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as func
# Global parameters
logger = logging.getLogger("pynet")
class FeatureExtractor(object):
""" Class for extracting activations and registering gradients from
targetted intermediate layers.
"""
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs(object):
""" Class for making a forward pass, and getting:
1- the network output.
2- activations from intermeddiate targetted layers.
3- gradients from intermeddiate targetted layers.
"""
def __init__(self, model, target_layers):
self.model = model
self.feature_extractor = FeatureExtractor(
self.model.features, target_layers)
def get_activations_gradient(self):
return self.feature_extractor.gradients
def get_activations(self, x):
return self.feature_extractor(x)
def __call__(self, x):
if hasattr(self.model, "pre"):
x = self.model.pre(x)
target_activations, output = self.feature_extractor(x)
if hasattr(self.model, "pool"):
output = self.model.pool(output)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
class GradCam(object):
""" Class for computing class activation map.
"""
def __init__(self, model, target_layers, labels, top=1):
self.model = model
self.labels = labels
self.top = top
self.model.eval()
self.extractor = ModelOutputs(self.model, target_layers)
def forward(self, input):
return self.model(input)
def __call__(self, input):
features, output = self.extractor(input)
pred_prob = func.softmax(output, dim=1).data.squeeze()
probs, indices = pred_prob.sort(0, True)
probs = probs.data.numpy()
indices = indices.data.numpy()
heatmaps = {}
for cnt, (prob, index) in enumerate(zip(probs, indices)):
if cnt == self.top:
break
label = self.labels[str(index)][1]
line = "{0:.3f} -> {1}".format(prob, label)
logger.info(line)
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
self.model.features.zero_grad()
self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
gradients = self.extractor.get_activations_gradient()[-1]
gradients = gradients.cpu().data.numpy()
pooled_gradients = np.mean(gradients, axis=(0, 2, 3))
activations = features[-1]
activations = activations.cpu().data.numpy()
for cnt, weight in enumerate(pooled_gradients):
activations[:, cnt] *= weight
heatmap = np.mean(activations, axis=1).squeeze()
heatmap = np.maximum(heatmap, 0)
heatmap -= np.min(heatmap)
heatmap /= np.max(heatmap)
heatmap_highres = skimage.transform.resize(
heatmap, input.shape[2:])
heatmaps[label] = (input, heatmap, heatmap_highres)
return heatmaps
| 34.109375
| 77
| 0.598717
|
import logging
import skimage
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as func
logger = logging.getLogger("pynet")
class FeatureExtractor(object):
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs(object):
def __init__(self, model, target_layers):
self.model = model
self.feature_extractor = FeatureExtractor(
self.model.features, target_layers)
def get_activations_gradient(self):
return self.feature_extractor.gradients
def get_activations(self, x):
return self.feature_extractor(x)
def __call__(self, x):
if hasattr(self.model, "pre"):
x = self.model.pre(x)
target_activations, output = self.feature_extractor(x)
if hasattr(self.model, "pool"):
output = self.model.pool(output)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
class GradCam(object):
def __init__(self, model, target_layers, labels, top=1):
self.model = model
self.labels = labels
self.top = top
self.model.eval()
self.extractor = ModelOutputs(self.model, target_layers)
def forward(self, input):
return self.model(input)
def __call__(self, input):
features, output = self.extractor(input)
pred_prob = func.softmax(output, dim=1).data.squeeze()
probs, indices = pred_prob.sort(0, True)
probs = probs.data.numpy()
indices = indices.data.numpy()
heatmaps = {}
for cnt, (prob, index) in enumerate(zip(probs, indices)):
if cnt == self.top:
break
label = self.labels[str(index)][1]
line = "{0:.3f} -> {1}".format(prob, label)
logger.info(line)
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
self.model.features.zero_grad()
self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
gradients = self.extractor.get_activations_gradient()[-1]
gradients = gradients.cpu().data.numpy()
pooled_gradients = np.mean(gradients, axis=(0, 2, 3))
activations = features[-1]
activations = activations.cpu().data.numpy()
for cnt, weight in enumerate(pooled_gradients):
activations[:, cnt] *= weight
heatmap = np.mean(activations, axis=1).squeeze()
heatmap = np.maximum(heatmap, 0)
heatmap -= np.min(heatmap)
heatmap /= np.max(heatmap)
heatmap_highres = skimage.transform.resize(
heatmap, input.shape[2:])
heatmaps[label] = (input, heatmap, heatmap_highres)
return heatmaps
| true
| true
|
f7038a4e3cc3a6dbe03fde86c4a7001c8c8b8b51
| 9,495
|
py
|
Python
|
src/UI_assign_group_window.py
|
GatherLab/OLED-evaluation
|
419dfd5d2c3773f5f90d76aef634f8b1cc0b6378
|
[
"MIT"
] | null | null | null |
src/UI_assign_group_window.py
|
GatherLab/OLED-evaluation
|
419dfd5d2c3773f5f90d76aef634f8b1cc0b6378
|
[
"MIT"
] | null | null | null |
src/UI_assign_group_window.py
|
GatherLab/OLED-evaluation
|
419dfd5d2c3773f5f90d76aef634f8b1cc0b6378
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PySide2 import QtCore, QtGui, QtWidgets
import json
import core_functions as cf
import numpy as np
from UI_labeled_slider import LabeledSlider
class Ui_AssignGroup(object):
def setupUi(self, AssignGroups):
# Note: this is not how it should be done but currently I don't know
# how to do it differently. This is only needed to be able to emit
# signals to the main window
AssignGroups.setObjectName("AssignGroups")
AssignGroups.setWindowTitle("Group Assignement Dialog")
AssignGroups.resize(509, 317)
AssignGroups.setStyleSheet(
"QWidget {\n"
" background-color: rgb(44, 49, 60);\n"
" color: rgb(255, 255, 255);\n"
' font: 63 10pt "Segoe UI";\n'
"}\n"
"QPushButton {\n"
" border: 2px solid rgb(52, 59, 72);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(57, 65, 80);\n"
" border: 2px solid rgb(61, 70, 86);\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
"QPushButton:checked {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(85, 170, 255);\n"
"}"
"QLineEdit {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QDoubleSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
)
self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups)
self.verticalLayout.setContentsMargins(25, 10, 25, 10)
self.verticalLayout.setObjectName("verticalLayout")
# # Device settings
# self.device_settings_header_label = QtWidgets.QLabel(AssignGroups)
# self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20))
# self.device_settings_header_label.setStyleSheet(
# 'font: 75 bold 10pt "Segoe UI";'
# )
# self.device_settings_header_label.setObjectName("device_settings_header_label")
# self.verticalLayout.addWidget(self.device_settings_header_label)
# self.header_line_1 = QtWidgets.QFrame()
# self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine)
# self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken)
# self.verticalLayout.addWidget(self.header_line_1)
# self.header_line_1.setStyleSheet(
# "QFrame {\n" " border: 2px solid rgb(52, 59, 72);\n" "}\n"
# )
# self.manualRowCountGridLayout = 1
# Define dialog in which parameters should be entered
# dialog = QtWidgets.QDialog()
# dialog.setWindowTitle("Group Assignement Dialog")
# Select the scan that shall be evaluated
if not self.include_all_scans:
self.select_scan_number_label = QtWidgets.QLabel()
self.select_scan_number_label.setObjectName("select_scan_number_label")
self.verticalLayout.addWidget(self.select_scan_number_label)
self.select_scan_number_ComboBox = QtWidgets.QComboBox()
self.select_scan_number_ComboBox.setObjectName(
"select_scan_number_ComboBox"
)
for i in range(self.parameters["no_of_scans"]):
self.select_scan_number_ComboBox.addItem(str(int(i + 1)))
self.select_scan_number_ComboBox.setCurrentIndex(0)
self.verticalLayout.addWidget(self.select_scan_number_ComboBox)
# Select the number of groups to define
self.no_groups_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.no_groups_label)
self.no_groups_LabeledSlider = LabeledSlider(
1,
int(np.size(np.unique(self.parameters["device_number"]))),
interval=1,
orientation=QtCore.Qt.Horizontal,
)
self.verticalLayout.addWidget(self.no_groups_LabeledSlider)
self.available_devices_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.available_devices_label)
# if np.size(self.paths) == 1:
# verticalLayout.addWidget(self.no_groups_LabeledSlider)
# Define the group assignement fields
self.group_definition_gridLayout = QtWidgets.QGridLayout()
self.group_definition_gridLayout.setSpacing(10)
# Group names and its container
self.group_name_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1)
self.group_name_LineEdit_container = np.empty(0, dtype="object")
self.group_name_LineEdit_container = np.append(
self.group_name_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.group_name_LineEdit_container[0], 2, 0
)
# Enter device numbers and its container
self.device_assignment_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.device_assignment_label, 1, 1, 1, 1
)
self.device_assignment_LineEdit_container = np.empty(0, dtype="object")
self.device_assignment_LineEdit_container = np.append(
self.device_assignment_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.device_assignment_LineEdit_container[0], 2, 1
)
# Assign a spectrum file to the group
if not self.autodetect_spectrum:
self.spectrum_file_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.spectrum_file_label, 1, 2, 1, 1
)
self.group_spectrum_PushButton_container = np.empty(0, dtype="object")
self.group_spectrum_PushButton_container = np.append(
self.group_spectrum_PushButton_container, QtWidgets.QPushButton("")
)
self.group_spectrum_PushButton_container[0].setStyleSheet(
"background-color: red"
)
self.group_definition_gridLayout.addWidget(
self.group_spectrum_PushButton_container[0], 2, 2
)
# Definition of a plotting color for the group
self.group_color_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1)
self.group_colors_PushButton_container = np.empty(0, dtype="object")
self.group_colors_PushButton_container = np.append(
self.group_colors_PushButton_container, QtWidgets.QPushButton("")
)
self.group_colors_PushButton_container[0].setStyleSheet(
"background-color: " + str(self.group_color[0])
)
self.group_definition_gridLayout.addWidget(
self.group_colors_PushButton_container[0], 2, 3
)
# Define the bottom pushbuttons that allows to close and save the dialog
self.leave_horizontalLayout = QtWidgets.QHBoxLayout()
self.close_pushButton = QtWidgets.QPushButton("Close")
self.save_pushButton = QtWidgets.QPushButton("Save")
self.leave_horizontalLayout.addWidget(self.close_pushButton)
self.leave_horizontalLayout.addWidget(self.save_pushButton)
self.verticalLayout.addLayout(self.group_definition_gridLayout)
self.verticalLayout.addLayout(self.leave_horizontalLayout)
self.setLayout(self.verticalLayout)
self.retranslateUi(AssignGroups)
QtCore.QMetaObject.connectSlotsByName(AssignGroups)
def retranslateUi(self, AssignGroups):
_translate = QtCore.QCoreApplication.translate
AssignGroups.setWindowTitle(_translate("AssignGroups", "Assign Groups"))
if not self.include_all_scans:
self.select_scan_number_label.setText(
_translate("AssignGroups", "Select Scan")
)
self.no_groups_label.setText(
_translate("AssignGroups", "Select Number of Groups")
)
self.available_devices_label.setText(
_translate(
"AssignGroups",
"Available Devices for Assignment "
+ str(self.parameters["device_number"]),
)
)
self.group_name_label.setText(_translate("AssignGroups", "Group Name"))
self.device_assignment_label.setText(
_translate("AssignGroups", "Assign Devices (seperated by ,)")
)
self.group_color_label.setText(_translate("AssignGroups", "Color"))
if not self.autodetect_spectrum:
self.spectrum_file_label.setText(_translate("AssignGroups", "Spectrum"))
| 42.013274
| 89
| 0.618325
|
from PySide2 import QtCore, QtGui, QtWidgets
import json
import core_functions as cf
import numpy as np
from UI_labeled_slider import LabeledSlider
class Ui_AssignGroup(object):
def setupUi(self, AssignGroups):
# how to do it differently. This is only needed to be able to emit
# signals to the main window
AssignGroups.setObjectName("AssignGroups")
AssignGroups.setWindowTitle("Group Assignement Dialog")
AssignGroups.resize(509, 317)
AssignGroups.setStyleSheet(
"QWidget {\n"
" background-color: rgb(44, 49, 60);\n"
" color: rgb(255, 255, 255);\n"
' font: 63 10pt "Segoe UI";\n'
"}\n"
"QPushButton {\n"
" border: 2px solid rgb(52, 59, 72);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(57, 65, 80);\n"
" border: 2px solid rgb(61, 70, 86);\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
"QPushButton:checked {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(85, 170, 255);\n"
"}"
"QLineEdit {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QDoubleSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
)
self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups)
self.verticalLayout.setContentsMargins(25, 10, 25, 10)
self.verticalLayout.setObjectName("verticalLayout")
# # Device settings
# self.device_settings_header_label = QtWidgets.QLabel(AssignGroups)
# self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20))
# self.device_settings_header_label.setStyleSheet(
# 'font: 75 bold 10pt "Segoe UI";'
# )
# self.device_settings_header_label.setObjectName("device_settings_header_label")
# self.verticalLayout.addWidget(self.device_settings_header_label)
# self.header_line_1 = QtWidgets.QFrame()
# self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine)
# self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken)
# self.verticalLayout.addWidget(self.header_line_1)
# self.header_line_1.setStyleSheet(
# "QFrame {\n" " border: 2px solid rgb(52, 59, 72);\n" "}\n"
# )
# self.manualRowCountGridLayout = 1
# Define dialog in which parameters should be entered
# dialog = QtWidgets.QDialog()
# dialog.setWindowTitle("Group Assignement Dialog")
# Select the scan that shall be evaluated
if not self.include_all_scans:
self.select_scan_number_label = QtWidgets.QLabel()
self.select_scan_number_label.setObjectName("select_scan_number_label")
self.verticalLayout.addWidget(self.select_scan_number_label)
self.select_scan_number_ComboBox = QtWidgets.QComboBox()
self.select_scan_number_ComboBox.setObjectName(
"select_scan_number_ComboBox"
)
for i in range(self.parameters["no_of_scans"]):
self.select_scan_number_ComboBox.addItem(str(int(i + 1)))
self.select_scan_number_ComboBox.setCurrentIndex(0)
self.verticalLayout.addWidget(self.select_scan_number_ComboBox)
# Select the number of groups to define
self.no_groups_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.no_groups_label)
self.no_groups_LabeledSlider = LabeledSlider(
1,
int(np.size(np.unique(self.parameters["device_number"]))),
interval=1,
orientation=QtCore.Qt.Horizontal,
)
self.verticalLayout.addWidget(self.no_groups_LabeledSlider)
self.available_devices_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.available_devices_label)
# if np.size(self.paths) == 1:
# verticalLayout.addWidget(self.no_groups_LabeledSlider)
# Define the group assignement fields
self.group_definition_gridLayout = QtWidgets.QGridLayout()
self.group_definition_gridLayout.setSpacing(10)
# Group names and its container
self.group_name_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1)
self.group_name_LineEdit_container = np.empty(0, dtype="object")
self.group_name_LineEdit_container = np.append(
self.group_name_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.group_name_LineEdit_container[0], 2, 0
)
# Enter device numbers and its container
self.device_assignment_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.device_assignment_label, 1, 1, 1, 1
)
self.device_assignment_LineEdit_container = np.empty(0, dtype="object")
self.device_assignment_LineEdit_container = np.append(
self.device_assignment_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.device_assignment_LineEdit_container[0], 2, 1
)
# Assign a spectrum file to the group
if not self.autodetect_spectrum:
self.spectrum_file_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.spectrum_file_label, 1, 2, 1, 1
)
self.group_spectrum_PushButton_container = np.empty(0, dtype="object")
self.group_spectrum_PushButton_container = np.append(
self.group_spectrum_PushButton_container, QtWidgets.QPushButton("")
)
self.group_spectrum_PushButton_container[0].setStyleSheet(
"background-color: red"
)
self.group_definition_gridLayout.addWidget(
self.group_spectrum_PushButton_container[0], 2, 2
)
# Definition of a plotting color for the group
self.group_color_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1)
self.group_colors_PushButton_container = np.empty(0, dtype="object")
self.group_colors_PushButton_container = np.append(
self.group_colors_PushButton_container, QtWidgets.QPushButton("")
)
self.group_colors_PushButton_container[0].setStyleSheet(
"background-color: " + str(self.group_color[0])
)
self.group_definition_gridLayout.addWidget(
self.group_colors_PushButton_container[0], 2, 3
)
# Define the bottom pushbuttons that allows to close and save the dialog
self.leave_horizontalLayout = QtWidgets.QHBoxLayout()
self.close_pushButton = QtWidgets.QPushButton("Close")
self.save_pushButton = QtWidgets.QPushButton("Save")
self.leave_horizontalLayout.addWidget(self.close_pushButton)
self.leave_horizontalLayout.addWidget(self.save_pushButton)
self.verticalLayout.addLayout(self.group_definition_gridLayout)
self.verticalLayout.addLayout(self.leave_horizontalLayout)
self.setLayout(self.verticalLayout)
self.retranslateUi(AssignGroups)
QtCore.QMetaObject.connectSlotsByName(AssignGroups)
def retranslateUi(self, AssignGroups):
_translate = QtCore.QCoreApplication.translate
AssignGroups.setWindowTitle(_translate("AssignGroups", "Assign Groups"))
if not self.include_all_scans:
self.select_scan_number_label.setText(
_translate("AssignGroups", "Select Scan")
)
self.no_groups_label.setText(
_translate("AssignGroups", "Select Number of Groups")
)
self.available_devices_label.setText(
_translate(
"AssignGroups",
"Available Devices for Assignment "
+ str(self.parameters["device_number"]),
)
)
self.group_name_label.setText(_translate("AssignGroups", "Group Name"))
self.device_assignment_label.setText(
_translate("AssignGroups", "Assign Devices (seperated by ,)")
)
self.group_color_label.setText(_translate("AssignGroups", "Color"))
if not self.autodetect_spectrum:
self.spectrum_file_label.setText(_translate("AssignGroups", "Spectrum"))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.