input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
round(float(values['NUMBER_OF_UNIVERSES']))
NUMBER_OF_START_THINGS = round(float(values['NUMBER_OF_START_THINGS']))
CELL_RADIUS_AUGMENTATION = round(float(values['CELL_RADIUS_AUGMENTATION']))
SIDES_PER_NEIGHBORHOOD = round(float(values['SIDES_PER_NEIGHBORHOOD']))
NEIGHBORHOODS_INCLUDE_CORNERS = bool(values['NEIGHBORHOODS_INCLUDE_CORNERS'])
FRAME_RATE_TEST = bool(values['FRAME_RATE_TEST'])
ROCKIT_SPEED = iof(values['ROCKIT_SPEED'])
PERCENTAGE_OF_TILT_TO_START_WITH = iof(values['PERCENTAGE_OF_TILT_TO_START_WITH'])
MAXIMUM_TILT = iof(values['MAXIMUM_TILT'])
PROCESS_RED_SEPARATELY = bool(values['PROCESS_RED_SEPARATELY'])
PROCESS_GREEN_SEPARATELY = bool(values['PROCESS_GREEN_SEPARATELY'])
PROCESS_BLUE_SEPARATELY = bool(values['PROCESS_BLUE_SEPARATELY'])
HUE_OFFSET = round(float(values['HUE_OFFSET']))
CPU_BIRTHS = round(float(values['CPU_BIRTHS']))
RED_DIES_BLUE = bool(values['RED_DIES_BLUE'])
RED_DIES_GREEN = bool(values['RED_DIES_GREEN'])
GREEN_DIES_RED = bool(values['GREEN_DIES_RED'])
GREEN_DIES_BLUE =bool(values['GREEN_DIES_BLUE'])
BLUE_DIES_GREEN = bool(values['BLUE_DIES_GREEN'])
BLUE_DIES_RED = bool(values['BLUE_DIES_RED'])
COLOR_BY_UNIVERSE = iof(values['COLOR_BY_UNIVERSE'])
COLOR_BY_ITEM = iof(values['COLOR_BY_ITEM'])
COLOR_BY_CELL = iof(values['COLOR_BY_CELL'])
RANDOM_START_COLORS = iof(values['RANDOM_START_COLORS'])
COLOR_DESATURATION = bool(values['COLOR_DESATURATION'])
if COLOR_BY_UNIVERSE+COLOR_BY_ITEM+COLOR_BY_CELL+RANDOM_START_COLORS+COLOR_DESATURATION == 0:
COLOR_DESATURATION = 1
ALLOW_SURVIVAL_MUTATION = bool(values['ALLOW_SURVIVAL_MUTATION'])
RANDOM_PLACEMENT = iof(values['RANDOM_PLACEMENT'])
INITIAL_ITEM_ROTATION = iof(values['INITIAL_ITEM_ROTATION'])
U_BLINKER = bool(values['U_BLINKER'])
U_RPENT = bool(values['U_RPENT'])
U_GLIDER0 = bool(values['U_GLIDER0'])
U_GLIDER1 = bool(values['U_GLIDER1'])
U_ACORN = bool(values['U_ACORN'])
U_TENCELL = bool(values['U_TENCELL'])
U_TPENT = bool(values['U_TPENT'])
U_QPENT = bool(values['U_QPENT'])
U_OPENT = bool(values['U_OPENT'])
U_XPENT = bool(values['U_XPENT'])
if event == 'Quit':
exit()
sine_wave_angle = mpmath.asin(PERCENTAGE_OF_TILT_TO_START_WITH / 100)
pygame.init()
display_surface = pygame.display.set_mode()
FWS = (display_surface.get_width(), display_surface.get_height()) # Actual graphic display size FULL_WINDOW_SIZE
mfws = max(FWS[0],FWS[1])
UPPER_LEFT = (0,0)
MAXIMUM_WS = WS = (int((FWS[0] + mfws) / SCALE / 2) | 1, int((FWS[1] + mfws) / SCALE / 2) | 1) # Actual process window size, including borders. WINDOW_SIZE
current_process_window = target_process_window = ((0,0), WS)
mws = max(WS[0],WS[1])
print(f"Process area center is at {WS[0] /2}, {WS[1] / 2}")
POTENTIAL_CELL_COUNT = WS[0] * WS[1]
MWS1 = ((WS[0]+FWS[0])//3,(WS[1]+FWS[1])//3)
MWS2 = ((WS[0]+FWS[0])//2,(WS[1]+FWS[1])//2)
MWS3 = ((WS[0]+FWS[0])*2//3,(WS[1]+FWS[1])*2//3)
BS0 = WS[0] * HORIZONTAL_BORDER_PERCENTAGE // 100
BS1 = WS[1] * VERTICAL_BORDER_PERCENTAGE // 100
OBS0 = int(BS0 * (FWS[0] / WS[0]))
OBS1 = int(BS1 * (FWS[1] / WS[1]))
OWS = (FWS[0] + OBS0 + OBS0, FWS[1] + OBS1 + OBS1)
MW = BS0 + WS[0] + BS0
MH = BS1 + WS[1] + BS1
MWS = (MW, MH)
fr = mfws/mws
OBS0 = int(BS0 * fr)
OBS1 = int(BS1 * fr)
OWS = (int(WS[0] * fr + OBS0 + OBS0), int(WS[1] * fr + OBS1 + OBS1))
IMBORDER = ((FWS[0]-MWS3[0])//2,(FWS[1]-MWS3[1])//2)
CC_DS = ((BS0, BS1),pygame.Rect(BS0, BS1, WS[0], WS[1]))
TC_DS = ((BS0, 0), pygame.Rect(BS0, MH - BS1 - BS1, WS[0], BS1))
TR_DS = ((MW - BS0, 0), pygame.Rect(BS0, MH - BS1 - BS1, BS0, BS1))
CR_DS = ((MW - BS0, BS1), pygame.Rect(BS0, BS1, BS0, MH - BS1))
BR_DS = ((MW - BS0, MH - BS1), pygame.Rect(BS0, BS1, BS0, BS1))
BC_DS = ((BS0, MH - BS1), pygame.Rect(BS0, BS1, WS[0], BS1))
BL_DS = ((0, MH - BS1), pygame.Rect(MW - BS0 - BS0, BS1, BS0, BS1))
CL_DS = ((0, BS1), pygame.Rect(MW - BS0 - BS0, BS1, BS0, MH - BS1))
TL_DS = ((0, 0), pygame.Rect(MW - BS0 - BS0, MH - BS1 - BS1, BS0, BS1))
print(f"Working internal simulation grid size, WS == {WS}. To exit, press and hold the Escape key while viewing the graphics screen.")
def hrgb(h):
return [round(i * 255) for i in colorsys.hsv_to_rgb(h % 1, 1, .75)]
def universal_color(universe_number):
return hrgb(((universe_number + HUE_OFFSET * NUMBER_OF_UNIVERSES / 100) % NUMBER_OF_UNIVERSES) / NUMBER_OF_UNIVERSES)
def unique_list(m):
return list(map(list,set(map(tuple,m))))
class AnyDict(dict):
def __init__(self,any=None,*args,**kwargs):
dict.__init__(self,*args,**kwargs)
self.any = any
def __missing__(self,key):
if self.any == any:
self.any = AnyDict()
r = copy.deepcopy(self.any)
return r
def __repr__(self):
return f'AnyDict({str(dict(self))})#{repr(self.any)}'
class Cells(AnyDict):
bg_color = BG_COLOR.copy()
def __init__(self,any=AnyDict(),*args,**kwargs):
AnyDict.__init__(self,*args,**kwargs)
self.any = any
self.any['neighborhoods'] = [{'number_of_sides':0, 'relative locations':[]}]
self.any['color'] = BG_COLOR
self.any['alive'] = False
def __repr__(self):
return f'Cell({str(dict(self))})#{repr(self.any)}'
class ColorGrid(object):
def __init__(self):
self.fill = pygame.Surface.fill
data = pygame.transform.scale(display_surface, (WS))
self.Surface = data
self.__width__ = self.Surface.get_width()
self.__height__ = self.Surface.get_height()
self.mirror_width_on_wrap_height = True
self.mirror_height_on_wrap_width = True
self.wrapped = [False, False]
self.wrapped_location = [None,None]
def __repr__(self):
return(f'States({self.Surface})')
def __str__(self):
return(f'States({self.Surface})')
def __len__(self):
return len(self.__width__ * self.__height__)
def __getitem__(self, index):
global current_process_window
if not hasattr(index,'__len__'):
index = [index]
if len(index) == 1:
index = (index[0] % self.__width__ ,index[0] // self.__width__)
else:
index = list(index)
if AUTO_RESCALE:
index[0] -= current_process_window[0][0]
index[1] -= current_process_window[0][1]
inboundsx = 0 <= index[0] < self.__width__
inboundsy = 0 <= index[1] < self.__height__
if not (inboundsx or inboundsy):
self.wrapped[0] = index[1] // self.__height__
self.wrapped[1] = index[0] // self.__width__
index[0] %= self.__width__
index[1] %= self.__height__
if self.wrapped[0]:
index[0] = self.__width__ - 1 - index[0]
if self.wrapped[1]:
index[1] = self.__height__ - 1 - index[1]
elif not inboundsx:
self.wrapped[1] = index[0] // self.__width__
index[0] %= self.__width__
if self.wrapped[1]:
index[1] = self.__height__ - 1 - index[1]
elif not inboundsy:
self.wrapped[0] = index[1] // self.__height__
index[1] %= self.__height__
if self.wrapped[0]:
index[0] = self.__width__ - 1 - index[0]
self.wrapped_location = index
c = self.Surface.get_at(index)
return [c[0], c[1], c[2]]
def __setitem__(self, index, value):
global current_process_window
if not hasattr(index,'__len__'):
index = [index]
if len(index) == 1:
index = (index[0] % self.__width__ ,index[0] // self.__width__)
else:
index = list(index)
if AUTO_RESCALE:
index[0] -= current_process_window[0][0]
index[1] -= current_process_window[0][1]
inboundsx = 0 <= index[0] < self.__width__
inboundsy = 0 <= index[1] < self.__height__
if not (inboundsx or inboundsy):
self.wrapped[0] = index[1] // self.__height__
self.wrapped[1] = index[0] // self.__width__
index[0] %= self.__width__
index[1] %= self.__height__
if self.wrapped[0]:
index[0] = self.__width__ - 1 - index[0]
if self.wrapped[1]:
index[1] = self.__height__ - 1 - index[1]
elif not inboundsx:
self.wrapped[1] = index[0] // self.__width__
index[0] %= self.__width__
if self.wrapped[1]:
index[1] = self.__height__ - 1 - index[1]
elif not inboundsy:
self.wrapped[0] = index[1] // self.__height__
index[1] %= self.__height__
if self.wrapped[0]:
index[0] = self.__width__ - 1 - index[0]
self.wrapped_location = index
try:
self.Surface.set_at(index, value)
except Exception as e:
raise Exception(f'error refnum 783... index == {index} value == {value}. Exception == {e}')
process_buffer = ColorGrid()
future_buffer = ColorGrid()
def msin(n):
return mpmath.sin(n*mpmath.pi*2)
def mcos(n):
return mpmath.cos(n*mpmath.pi*2)
def calculate_neighborhood(orientation_fraction = 0, sides_count = 4, radius = any, has_corners = True, allow_zero_angle = False):
if radius == any:
radius = round(sides_count / 4) + 1
hp = 1 / sides_count
if allow_zero_angle:
q = 0
else:
q = 2 / sides_count
n = orientation_fraction / sides_count
neighborhood=[]
corners = []
for this_side in range(1, sides_count + 1):
cntr0 = (int(mcos(n+q+this_side*hp)*radius), int(msin(n+q+this_side*hp)*radius))
neighborhood.append(cntr0)
if has_corners:
for this_side in range(0, sides_count):
cntr0 = neighborhood[this_side]
cntr1 = neighborhood[(this_side + 1) % sides_count]
corner = cntr0[0]+cntr1[0], cntr0[1]+cntr1[1]
corners.append(corner)
neighborhood += corners
return neighborhood
def calculate_neighborhoods(orientation_count = 3, sides_count = 4, radius_augment = 1, has_corners = True, neighborhoods_so_far = []):
accepted_radius = False
radius = 1
while not radius == accepted_radius:
locations = []
neighborhoods=[]
for n in range(orientation_count):
neighborhood = calculate_neighborhood(n / orientation_count, sides_count, radius, has_corners)
locations += neighborhood
neighborhoods.append(neighborhood)
if len(set(locations)) == len(locations):
if CELL_RADIUS_AUGMENTATION and not accepted_radius:
accepted_radius = radius
radius += CELL_RADIUS_AUGMENTATION
else:
accepted_radius = radius
else:
if accepted_radius:
accepted_radius += 1
radius += 1
locations = []
neighborhoods = neighborhoods_so_far
for n in range(orientation_count):
neighborhood = calculate_neighborhood(n / orientation_count, sides_count, radius, has_corners)
locations += neighborhood
neighborhoods.append({'number of sides':sides_count, 'relative locations':neighborhood})
print(f'Neighborhood radius set to {radius}.')
return locations, neighborhoods
def unique(them):
if len(them) == 0:
return them
t = type(them[0])
tt = type(them)
them=map(tuple,them)
them=set(them)
them=map(set,them)
them=map(t,them)
them=tt(them)
return them
def display_process_buffer():
global sine_wave_angle
global process_buffer
global mirrored_surface
mirrored_surface = pygame.Surface(MWS)
oversized_surface = pygame.transform.scale(process_buffer.Surface, (OWS))
mirrored_surface.blit(pygame.transform.scale(process_buffer.Surface, (WS)), (BS0, BS1))
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, True), TL_DS[0], TL_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, False), TC_DS[0], TC_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, True), TR_DS[0], TR_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, False, True), CR_DS[0], CR_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, True), BR_DS[0], BR_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, False), BC_DS[0], BC_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, True, True), BL_DS[0], BL_DS[1])
mirrored_surface.blit(pygame.transform.flip(mirrored_surface, False, True), CL_DS[0], CL_DS[1])
coms = mirrored_surface.copy()
dark_surface = coms.copy()
mul_surface = pygame.Surface(MWS)
mul_surface.fill((95,95,95))
dark_surface.blit(mul_surface,(0,0), special_flags=pygame.BLEND_RGBA_MULT)
for location in halo:
coms.blit(dark_surface, (-location[0], -location[1]), special_flags=pygame.BLEND_RGBA_ADD)
coms.blit(mirrored_surface, (0, 0), special_flags=pygame.BLEND_RGBA_ADD)
mirrored_surface = coms
intermediate_surface = pygame.transform.smoothscale(mirrored_surface, (MWS1))
intermediate_surface = pygame.transform.smoothscale(intermediate_surface, (MWS2))
oversized_surface.blit(pygame.transform.scale(intermediate_surface, (OWS)), (0, 0))
oversized_surface.blit(pygame.transform.smoothscale(intermediate_surface, (OWS)), (0, 0), special_flags=pygame.BLEND_RGBA_ADD)
screen_angle = MAXIMUM_TILT * mpmath.sin(sine_wave_angle)
sine_wave_angle -= (mpmath.pi * ROCKIT_SPEED / 10000)
mp2 = mpmath.pi * 2
if sine_wave_angle < 0:
sine_wave_angle -= mp2
if sine_wave_angle > mp2:
sine_wave_angle += mp2
rotated_image = pygame.transform.rotate(oversized_surface, screen_angle)
rrect = rotated_image.get_rect(center = oversized_surface.get_rect(topleft = (-OBS0,-OBS1)).center)
surf = rotated_image.copy()
surf.blit(rotated_image, (2,-2), special_flags=pygame.BLEND_RGBA_ADD)
surf.blit(rotated_image, (2,2), special_flags=pygame.BLEND_RGBA_ADD)
surf.blit(rotated_image, (-2,2), special_flags=pygame.BLEND_RGBA_ADD)
surf.blit(rotated_image, (-2,-2), special_flags=pygame.BLEND_RGBA_ADD)
rotated_image.blit(surf, (0,0), special_flags=pygame.BLEND_RGBA_ADD)
display_surface.blit(rotated_image, ((FWS[0]-rotated_image.get_width())//2, (FWS[1]-rotated_image.get_height())//2))
pygame.display.update()
return
def initWorld(): #Initialize and draw some stuff to start with.
global WS
global MWS1
global MWS2
global MWS3
global BS0
global BS1
global OBS0
global OBS1
global OWS
global MW
global MH
global MWS
global OBS0
global OBS1
global OWS
global IMBORDER
global CC_DS
global TC_DS
global TR_DS
global CR_DS
global BR_DS
global BC_DS
global BL_DS
global CL_DS
global TL_DS
global neighborhood_shapes
global all_neighborhood_locations
global halo
global hix, lox, hiy, loy
global neighborhood_width
global neighborhood_height
global process_buffer
global | |
17994140 * uk_67
+ 134056343 * uk_68
+ 195236419 * uk_69
+ 217 * uk_7
+ 17094433 * uk_70
+ 18941200 * uk_71
+ 141111940 * uk_72
+ 205512020 * uk_73
+ 17994140 * uk_74
+ 1051283953 * uk_75
+ 1531064549 * uk_76
+ 134056343 * uk_77
+ 2229805417 * uk_78
+ 195236419 * uk_79
+ 19 * uk_8
+ 17094433 * uk_80
+ 250047 * uk_81
+ 476280 * uk_82
+ 75411 * uk_83
+ 79380 * uk_84
+ 591381 * uk_85
+ 861273 * uk_86
+ 75411 * uk_87
+ 907200 * uk_88
+ 143640 * uk_89
+ 2242306609 * uk_9
+ 151200 * uk_90
+ 1126440 * uk_91
+ 1640520 * uk_92
+ 143640 * uk_93
+ 22743 * uk_94
+ 23940 * uk_95
+ 178353 * uk_96
+ 259749 * uk_97
+ 22743 * uk_98
+ 25200 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 152208 * uk_100
+ 218736 * uk_101
+ 120960 * uk_102
+ 1436463 * uk_103
+ 2064321 * uk_104
+ 1141560 * uk_105
+ 2966607 * uk_106
+ 1640520 * uk_107
+ 907200 * uk_108
+ 729000 * uk_109
+ 4261770 * uk_11
+ 972000 * uk_110
+ 129600 * uk_111
+ 1223100 * uk_112
+ 1757700 * uk_113
+ 972000 * uk_114
+ 1296000 * uk_115
+ 172800 * uk_116
+ 1630800 * uk_117
+ 2343600 * uk_118
+ 1296000 * uk_119
+ 5682360 * uk_12
+ 23040 * uk_120
+ 217440 * uk_121
+ 312480 * uk_122
+ 172800 * uk_123
+ 2052090 * uk_124
+ 2949030 * uk_125
+ 1630800 * uk_126
+ 4238010 * uk_127
+ 2343600 * uk_128
+ 1296000 * uk_129
+ 757648 * uk_13
+ 1728000 * uk_130
+ 230400 * uk_131
+ 2174400 * uk_132
+ 3124800 * uk_133
+ 1728000 * uk_134
+ 30720 * uk_135
+ 289920 * uk_136
+ 416640 * uk_137
+ 230400 * uk_138
+ 2736120 * uk_139
+ 7150303 * uk_14
+ 3932040 * uk_140
+ 2174400 * uk_141
+ 5650680 * uk_142
+ 3124800 * uk_143
+ 1728000 * uk_144
+ 4096 * uk_145
+ 38656 * uk_146
+ 55552 * uk_147
+ 30720 * uk_148
+ 364816 * uk_149
+ 10275601 * uk_15
+ 524272 * uk_150
+ 289920 * uk_151
+ 753424 * uk_152
+ 416640 * uk_153
+ 230400 * uk_154
+ 3442951 * uk_155
+ 4947817 * uk_156
+ 2736120 * uk_157
+ 7110439 * uk_158
+ 3932040 * uk_159
+ 5682360 * uk_16
+ 2174400 * uk_160
+ 10218313 * uk_161
+ 5650680 * uk_162
+ 3124800 * uk_163
+ 1728000 * uk_164
+ 3969 * uk_17
+ 5670 * uk_18
+ 7560 * uk_19
+ 63 * uk_2
+ 1008 * uk_20
+ 9513 * uk_21
+ 13671 * uk_22
+ 7560 * uk_23
+ 8100 * uk_24
+ 10800 * uk_25
+ 1440 * uk_26
+ 13590 * uk_27
+ 19530 * uk_28
+ 10800 * uk_29
+ 90 * uk_3
+ 14400 * uk_30
+ 1920 * uk_31
+ 18120 * uk_32
+ 26040 * uk_33
+ 14400 * uk_34
+ 256 * uk_35
+ 2416 * uk_36
+ 3472 * uk_37
+ 1920 * uk_38
+ 22801 * uk_39
+ 120 * uk_4
+ 32767 * uk_40
+ 18120 * uk_41
+ 47089 * uk_42
+ 26040 * uk_43
+ 14400 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 201807594810 * uk_47
+ 269076793080 * uk_48
+ 35876905744 * uk_49
+ 16 * uk_5
+ 338588297959 * uk_50
+ 486580534153 * uk_51
+ 269076793080 * uk_52
+ 187944057 * uk_53
+ 268491510 * uk_54
+ 357988680 * uk_55
+ 47731824 * uk_56
+ 450469089 * uk_57
+ 647362863 * uk_58
+ 357988680 * uk_59
+ 151 * uk_6
+ 383559300 * uk_60
+ 511412400 * uk_61
+ 68188320 * uk_62
+ 643527270 * uk_63
+ 924804090 * uk_64
+ 511412400 * uk_65
+ 681883200 * uk_66
+ 90917760 * uk_67
+ 858036360 * uk_68
+ 1233072120 * uk_69
+ 217 * uk_7
+ 681883200 * uk_70
+ 12122368 * uk_71
+ 114404848 * uk_72
+ 164409616 * uk_73
+ 90917760 * uk_74
+ 1079695753 * uk_75
+ 1551615751 * uk_76
+ 858036360 * uk_77
+ 2229805417 * uk_78
+ 1233072120 * uk_79
+ 120 * uk_8
+ 681883200 * uk_80
+ 250047 * uk_81
+ 357210 * uk_82
+ 476280 * uk_83
+ 63504 * uk_84
+ 599319 * uk_85
+ 861273 * uk_86
+ 476280 * uk_87
+ 510300 * uk_88
+ 680400 * uk_89
+ 2242306609 * uk_9
+ 90720 * uk_90
+ 856170 * uk_91
+ 1230390 * uk_92
+ 680400 * uk_93
+ 907200 * uk_94
+ 120960 * uk_95
+ 1141560 * uk_96
+ 1640520 * uk_97
+ 907200 * uk_98
+ 16128 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 154224 * uk_100
+ 218736 * uk_101
+ 90720 * uk_102
+ 1474767 * uk_103
+ 2091663 * uk_104
+ 867510 * uk_105
+ 2966607 * uk_106
+ 1230390 * uk_107
+ 510300 * uk_108
+ 438976 * uk_109
+ 3598828 * uk_11
+ 519840 * uk_110
+ 92416 * uk_111
+ 883728 * uk_112
+ 1253392 * uk_113
+ 519840 * uk_114
+ 615600 * uk_115
+ 109440 * uk_116
+ 1046520 * uk_117
+ 1484280 * uk_118
+ 615600 * uk_119
+ 4261770 * uk_12
+ 19456 * uk_120
+ 186048 * uk_121
+ 263872 * uk_122
+ 109440 * uk_123
+ 1779084 * uk_124
+ 2523276 * uk_125
+ 1046520 * uk_126
+ 3578764 * uk_127
+ 1484280 * uk_128
+ 615600 * uk_129
+ 757648 * uk_13
+ 729000 * uk_130
+ 129600 * uk_131
+ 1239300 * uk_132
+ 1757700 * uk_133
+ 729000 * uk_134
+ 23040 * uk_135
+ 220320 * uk_136
+ 312480 * uk_137
+ 129600 * uk_138
+ 2106810 * uk_139
+ 7245009 * uk_14
+ 2988090 * uk_140
+ 1239300 * uk_141
+ 4238010 * uk_142
+ 1757700 * uk_143
+ 729000 * uk_144
+ 4096 * uk_145
+ 39168 * uk_146
+ 55552 * uk_147
+ 23040 * uk_148
+ 374544 * uk_149
+ 10275601 * uk_15
+ 531216 * uk_150
+ 220320 * uk_151
+ 753424 * uk_152
+ 312480 * uk_153
+ 129600 * uk_154
+ 3581577 * uk_155
+ 5079753 * uk_156
+ 2106810 * uk_157
+ 7204617 * uk_158
+ 2988090 * uk_159
+ 4261770 * uk_16
+ 1239300 * uk_160
+ 10218313 * uk_161
+ 4238010 * uk_162
+ 1757700 * uk_163
+ 729000 * uk_164
+ 3969 * uk_17
+ 4788 * uk_18
+ 5670 * uk_19
+ 63 * uk_2
+ 1008 * uk_20
+ 9639 * uk_21
+ 13671 * uk_22
+ 5670 * uk_23
+ 5776 * uk_24
+ 6840 * uk_25
+ 1216 * uk_26
+ 11628 * uk_27
+ 16492 * uk_28
+ 6840 * uk_29
+ 76 * uk_3
+ 8100 * uk_30
+ 1440 * uk_31
+ 13770 * uk_32
+ 19530 * uk_33
+ 8100 * uk_34
+ 256 * uk_35
+ 2448 * uk_36
+ 3472 * uk_37
+ 1440 * uk_38
+ 23409 * uk_39
+ 90 * uk_4
+ 33201 * uk_40
+ 13770 * uk_41
+ 47089 * uk_42
+ 19530 * uk_43
+ 8100 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 170415302284 * uk_47
+ 201807594810 * uk_48
+ 35876905744 * uk_49
+ 16 * uk_5
+ 343072911177 * uk_50
+ 486580534153 * uk_51
+ 201807594810 * uk_52
+ 187944057 * uk_53
+ 226726164 * uk_54
+ 268491510 * uk_55
+ 47731824 * uk_56
+ 456435567 * uk_57
+ 647362863 * uk_58
+ 268491510 * | |
if x.name == 'name':
currnode = x
break
nnnode = Node(currnode.child[0].name, -1)
nnnode.father = self.expanded
self.expanded.child.append(nnnode)
nnnode.fatherlistID = len(self.state)
self.expanded.expanded = True
else:
rules = ds.rrdict[rule]
if rules == 'start -> unknown':
self.unum += 1
#if rules.strip().split()[0] != self.expanded.name:
# #print(self.expanded.name)
# assert(0)
# return False
#assert(rules.strip().split()[0] == self.expanded.name)
if rules.strip() == self.expanded.name + " -> End":
self.expanded.expanded = True
else:
for x in rules.strip().split()[2:]:
nnode = Node(x, -1)
#nnode = Node(x, self.expanded.depth + 1)
self.expanded.child.append(nnode)
nnode.father = self.expanded
nnode.fatherlistID = len(self.state)
#self.parent.append(self.expanded.fatherlistID)
self.parent[args.NlLen + len(self.depth), args.NlLen + self.expanded.fatherlistID] = 1
if rule >= len(ds.ruledict) + args.NlLen:
self.parent[args.NlLen + len(self.depth), rule - len(ds.ruledict) - args.NlLen] = 1
elif rule >= len(ds.ruledict):
self.parent[args.NlLen + len(self.depth), rule - len(ds.ruledict)] = 1
if rule >= len(ds.ruledict) + args.NlLen:
self.state.append(ds.ruledict['start -> copyword2'])
elif rule >= len(ds.ruledict):
self.state.append(ds.ruledict['start -> copyword'])
else:
self.state.append(rule)
#self.state.append(rule)
self.inputparent.append(self.expanded.name.lower())
self.depth.append(1)
if self.expanded.name not in onelist:
self.expanded.expanded = True
return True
def printTree(self, r):
s = r.name + r.fname + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def getTreestr(self):
return self.printTree(self.root)
beamss = []
def BeamSearch(inputnl, vds, model, beamsize, batch_size, k):
batch_size = len(inputnl[0].view(-1, args.NlLen))
rrdic = {}
for x in vds.Code_Voc:
rrdic[vds.Code_Voc[x]] = x
#print(rrdic[684])
#print(rrdic[2])
#print(rrdic[183])
tmpast = getAstPkl(vds)
a, b = getRulePkl(vds)
tmpf = gVar(a).unsqueeze(0).repeat(2, 1).long()
tmpc = gVar(b).unsqueeze(0).repeat(2, 1, 1).long()
rulead = gVar(pickle.load(open("rulead.pkl", "rb"))).float().unsqueeze(0).repeat(2, 1, 1)
tmpindex = gVar(np.arange(len(vds.ruledict))).unsqueeze(0).repeat(2, 1).long()
tmpchar = gVar(tmpast).unsqueeze(0).repeat(2, 1, 1).long()
tmpindex2 = gVar(np.arange(len(vds.Code_Voc))).unsqueeze(0).repeat(2, 1).long()
with torch.no_grad():
beams = {}
hisTree = {}
#print(len(vds.nl))
for i in range(batch_size):
beams[i] = [SearchNode(vds, vds.nl[args.batch_size * k + i])]
hisTree[i] = {}
index = 0
antimask = gVar(getAntiMask(args.CodeLen))
endnum = {}
continueSet = {}
tansV = {}
while True:
#print(index)
tmpbeam = {}
ansV = {}
#for x in beams[0]:
# print(x.getTreestr())
# print(x.actlist)
# print(x.prob)
#print("kkkkkkkkkkkkk")
if len(endnum) == batch_size:
break
if index >= args.CodeLen:
break
for ba in range(batch_size):
tmprule = []
tmprulechild = []
tmpruleparent = []
tmptreepath = []
tmpAd = []
validnum = []
tmpdepth = []
tmpnl = []
tmpnlad = []
tmpnl8 = []
tmpnl9 = []
for p in range(beamsize):
if p >= len(beams[ba]):
continue
x = beams[ba][p]
#print(x.getTreestr())
x.selectExpandedNode()
#print(x.expanded.name)
if x.expanded == None or len(x.state) >= args.CodeLen:
x.finish = True
ansV.setdefault(ba, []).append(x)
else:
#print(x.expanded.name)
validnum.append(p)
tmpnl.append(inputnl[0][ba].data.cpu().numpy())
tmpnlad.append(inputnl[1][ba].data.cpu().numpy())
tmpnl8.append(inputnl[8][ba].data.cpu().numpy())
tmpnl9.append(inputnl[9][ba].data.cpu().numpy())
a, b, c, d = x.getRuleEmbedding(vds, vds.nl[args.batch_size * k + i])
tmprule.append(a)
tmprulechild.append(b)
tmpruleparent.append(c)
tmptreepath.append(x.getTreePath(vds))
#tmp = np.eye(vds.Code_Len)[x.parent]
#tmp = np.concatenate([tmp, np.zeros([vds.Code_Len, vds.Code_Len])], axis=0)[:vds.Code_Len,:]#self.pad_list(tmp, self.Code_Len, self.Code_Len)
tmpAd.append(x.parent)
tmpdepth.append(d)
#print("--------------------------")
if len(tmprule) == 0:
continue
#batch_size = len(tmprule)
antimasks = antimask.unsqueeze(0).repeat(len(tmprule), 1, 1).unsqueeze(1)
tmprule = np.array(tmprule)
tmprulechild = np.array(tmprulechild)
tmpruleparent = np.array(tmpruleparent)
tmptreepath = np.array(tmptreepath)
tmpAd = np.array(tmpAd)
tmpdepth = np.array(tmpdepth)
#print(tmpnl)
tmpnl = np.array(tmpnl)
tmpnlad = np.array(tmpnlad)
tmpnl8 = np.array(tmpnl8)
tmpnl9 = np.array(tmpnl9)
'''print(inputnl[7][0][:index + 1], tmptreepath[0][:index + 1])
assert(np.array_equal(inputnl[2][0][:index + 1], tmprule[0][:index + 1]))
#assert(np.array_equal(inputnl[3][0][:index + 1], tmpruleparent[0][:index + 1]))
assert(np.array_equal(inputnl[4][0][:index + 1], tmprulechild[0][:index + 1]))
assert(np.array_equal(inputnl[6][0][:index + 1], tmpAd[0][:index + 1]))
assert(np.array_equal(inputnl[7][0][:index + 1], tmptreepath[0][:index + 1]))
#assert(np.array_equal(inputnl[8][0][:index + 1], tmpdepth[0][:index + 1]))'''
result = model(gVar(tmpnl), gVar(tmpnlad), gVar(tmprule), gVar(tmpruleparent), gVar(tmprulechild), gVar(tmpAd), gVar(tmptreepath), gVar(tmpnl8), gVar(tmpnl9), tmpf, tmpc, tmpindex, tmpchar, tmpindex2, rulead, antimasks, None, "test")
results = result.data.cpu().numpy()
#print(result, inputCode)
currIndex = 0
for j in range(beamsize):
if j not in validnum:
continue
x = beams[ba][j]
tmpbeamsize = 0#beamsize
result = np.negative(results[currIndex, index])
currIndex += 1
cresult = np.negative(result)
indexs = np.argsort(result)
for i in range(len(indexs)):
if tmpbeamsize >= 10:
break
if cresult[indexs[i]] == 0:
break
c = x.checkapply(indexs[i], vds)
if c:
tmpbeamsize += 1
#continue
else:
continue
'''copynode = deepcopy(x)
#if indexs[i] >= len(vds.rrdict):
#print(cresult[indexs[i]])
c = copynode.applyrule(indexs[i], vds.nl[args.batch_size * k + j])
if not c:
tmpbeamsize += 1
continue'''
#copynode.prob = copynode.prob + np.log(cresult[indexs[i]])
prob = x.prob + np.log(cresult[indexs[i]])#copynode.prob = copynode.prob + np.log(cresult[indexs[i]])
tmpbeam.setdefault(ba, []).append([prob, indexs[i], x])
#tmpbeam.setdefault(j, []).append(copynode)
#print(tmpbeam[0].prob)
for i in range(batch_size):
if i in ansV:
if len(ansV[i]) == beamsize:
endnum[i] = 1
tansV.setdefault(i, []).extend(ansV[i])
for j in range(batch_size):
if j in tmpbeam:
if j in ansV:
for x in ansV[j]:
tmpbeam[j].append([x.prob, -1, x])
tmp = sorted(tmpbeam[j], key=lambda x: x[0], reverse=True)
beams[j] = []
for x in tmp:
if len(beams[j]) >= beamsize:
break
if x[1] != -1:
copynode = pickle.loads(pickle.dumps(x[2]))
'''if x[1] >= len(vds.rrdict) + args.CodeLen:
print(len(vds.tablename[s[args.batch_size * k + j]['database_id']]['column_names']))
if (x[1] - len(vds.rrdict) - args.CodeLen >= 4 and args.batch_size * k + j == 20) or x[1] - len(vds.rrdict) - args.CodeLen >= len(vds.tablename[s[args.batch_size * k + j]['database_id']]['table_names']) + len(vds.tablename[s[args.batch_size * k + j]['database_id']]['column_names']):
print(vds.tabless[args.batch_size * k + j])
print(x[1] - len(vds.rrdict) - args.CodeLen)
assert(0)'''
#print(x[1])
copynode.applyrule(x[1], vds)
#print(x[0])
if copynode.getTreestr() in hisTree:
continue
copynode.prob = x[0]
beams[j].append(copynode)
hisTree[j][copynode.getTreestr()] = 1
else:
beams[j].append(x[2])
#if index >= 2:
# assert(0)
index += 1
for j in range(batch_size):
visit = {}
tmp = []
for x in tansV[j]:
if x.getTreestr() not in visit and x.finish:
visit[x.getTreestr()] = 1
tmp.append(x)
else:
continue
beams[j] = sorted(tmp, key=lambda x: x.prob, reverse=True)[:beamsize]
#for x in beams:
# beams[x] = sorted(beams[x], key=lambda x: x.prob, reverse=True)
return beams
for i in range(len(beams)):
mans = -1000000
lst = beams[i]
tmpans = 0
for y in lst:
#print(y.getTreestr())
if y.prob > mans:
mans = y.prob
tmpans = y
beams[i] = tmpans
#open("beams.pkl", "wb").write(pickle.dumps(beamss))
return beams
#return beams
def test():
#pre()
#os.environ["CUDA_VISIBLE_DEVICES"]="5, 7"
dev_set = SumDataset(args, "test")
rulead = gVar(pickle.load(open("rulead.pkl", "rb"))).float().unsqueeze(0).repeat(2, 1, 1)
args.cnum = rulead.size(1)
tmpast = getAstPkl(dev_set)
a, b = getRulePkl(dev_set)
tmpf = gVar(a).unsqueeze(0).repeat(2, 1).long()
tmpc = gVar(b).unsqueeze(0).repeat(2, 1, 1).long()
tmpindex = gVar(np.arange(len(dev_set.ruledict))).unsqueeze(0).repeat(2, 1).long()
tmpchar = gVar(tmpast).unsqueeze(0).repeat(2, 1, 1).long()
tmpindex2 = gVar(np.arange(len(dev_set.Code_Voc))).unsqueeze(0).repeat(2, 1).long()
#print(len(dev_set))
args.Nl_Vocsize = len(dev_set.Nl_Voc)
args.Code_Vocsize = len(dev_set.Code_Voc)
args.Vocsize = len(dev_set.Char_Voc)
args.rulenum = len(dev_set.ruledict) + args.NlLen
print(dev_set.rrdict[152])
args.batch_size = 12
rdic = {}
for x in dev_set.Nl_Voc:
rdic[dev_set.Nl_Voc[x]] = x
#print(dev_set.Nl_Voc)
model = Decoder(args)
if torch.cuda.is_available():
print('using GPU')
#os.environ["CUDA_VISIBLE_DEVICES"] = "3"
model = model.cuda()
devloader = torch.utils.data.DataLoader(dataset=dev_set, batch_size=args.batch_size,
shuffle=False, drop_last=False, num_workers=0)
model = model.eval()
load_model(model)
return model
#return model
f = open("outval.txt", "w")
index = 0
indexs = 0
antimask = gVar(getAntiMask(args.CodeLen))
antimask2 = antimask.unsqueeze(0).repeat(1, 1, 1).unsqueeze(1)
for x in tqdm(devloader):
'''if indexs < 5:
indexs += 1
continue'''
#if indexs > 5:
# break
'''pre = model(gVar(x[0]), gVar(x[1]), gVar(x[2]), gVar(x[3]), gVar(x[4]), gVar(x[6]), gVar(x[7]), gVar(x[8]), tmpf, tmpc, tmpindex, tmpchar, tmpindex2, rulead, antimask2, None, 'test')
#print(pre[0,3,4020], pre[0,3,317])
pred = pre.argmax(dim=-1)
#print(len(dev_set.ruledict))
print(x[5])
resmask = torch.gt(gVar(x[5]), 0)
acc = (torch.eq(pred, gVar(x[5])) * resmask).float()#.mean(dim=-1)
predres = (1 - acc) * pred.float() * resmask.float()
accsum = torch.sum(acc, dim=-1)
resTruelen = torch.sum(resmask, dim=-1).float()
cnum = (torch.eq(accsum, resTruelen)).sum().float()
if cnum.item() != 1:
indexs += 1
continue'''
ans = BeamSearch((x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9]), dev_set, model, 80, args.batch_size, indexs)
for i in range(args.batch_size):
beam = ans[i]
f.write(str(indexs * args.batch_size + i))
for x in beam:
f.write(x.getTreestr() + "\n")
f.write("-------\n")
#print(x.getTreestr())
indexs += 1
#exit(0)
#f.write(" ".join(ans.ans[1:-1]))
#f.write("\n")
#f.flush()#print(ans)
def findnodebyid(root, idx):
if root.id == idx:
return root
for x in root.child:
t = findnodebyid(x, idx)
if t:
return t
def getroot(strlst):
tokens = strlst.split()
root = Node(tokens[0], 0)
currnode = root
idx = 1
for i, x in enumerate(tokens[1:]):
if x != "^":
nnode = Node(x, idx)
nnode.father = currnode
currnode.child.append(nnode)
currnode = nnode
idx += 1
else:
currnode = currnode.father
return root
def getMember(node):
for x in node.child:
if x.name == 'member':
return x.child[0].name
def applyoperater(ans, subroot):
#print(ans.root.printTree(ans.root))
copynode = pickle.loads(pickle.dumps(subroot))
change = False
type = ''
for x in ans.root.child:
if x.id != -1:
change = True
node = findnodebyid(copynode, x.id)
if node is None:
continue
if node.name == 'member':
type = node.child[0].name
#assert(0)
elif node.name == 'MemberReference':
type = getMember(node)#node.child[0].child[0].name
print(6, type)
elif node.name == 'qualifier':
type = node.child[0].name
elif node.name == 'operator' or node.name == 'Literal' or node.name == 'BasicType':
type = 'valid'
else:
print(node.name)
assert(0)
#print(node.name)
idx = node.father.child.index(node)
node.father.child[idx] = x
| |
'setting new offset'
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def set_coeffs(self, a, b, c, min_wl, max_wl):
print('')
print("setting coefficients...")
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
d = (min_wl << 16) + max_wl
StoredPositionObject = ctypes.wintypes.WORD(8204)
for subidx, coeff in enumerate([a, b, c]):
print(subidx, coeff)
StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(d)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def get_motor_position(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
pPosition = ctypes.pointer(ctypes.c_long())
eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))
# print 'get motor position ret %s' % ret
# print 'get motor position buf %s' % buf.value
# print 'get motor position value %s' % pPosition.contents.value
return pPosition.contents.value
# print('getting motor position...')
# print(ret)
# return print(pPosition.contents.value)
"""
Not sure what this is doing yet
"""
def set_target_position(self, target, absolute, immediately):
# print('check #1')
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# First, set enabled state
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('Enable state ret %s buf %s' % (ret, buf.value))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
pTarget = ctypes.c_long(target)
pAbsolute = ctypes.wintypes.BOOL(absolute)
pImmediately = ctypes.wintypes.BOOL(immediately)
eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,
ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL
# print('check #2')
# print('About to set motor position')
# print('Current motor position is %d' % (self.get_motor_position()))
ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now
nchecks = 0
# print('check #3')
while nchecks < 1000:
# get the movement state. a movement state of 1 indicates the motor
# is done moving
# print('')
# print('check #4')
# print('Motor current: {}'.format(self.get_motor_current()))
print('Motor position: {}'.format(self.get_motor_position()))
# print('Motor offset: {}'.format(self.get_offset()))
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())
# print(pMovementState.contents.value)
eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.BOOL),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL
# print('Getting movement state')
ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
# print('Movement state is %s' % pMovementState.contents.value)
if pMovementState.contents.value == 1:
break
nchecks = nchecks + 1
# print('Current motor position is %d' % self.get_motor_position())
# print('check #5')
# print(nchecks)
# print('')
time.sleep(0.01)
# Now set disabled state
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('check #6')
# print('Disable state ret %s buf %s' % (ret, buf.value))
# print('Final motor position is %d' % (self.get_motor_position()))
# print('check #7')
return ret
"""
Not sure what this is doing yet
"""
def fuck_my_life(self, wavelength):
print('goddamn this piece of shit')
print('')
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
print(b2a)
print(np.sqrt(sqrtarg))
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
print('wavelength_to_pos: {}'.format(wavelength_to_pos))
print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))
print('self._offset: {}'.format(int(self._offset)))
"""
Not sure what this is doing yet
"""
def do_get_wavelength(self):
self._offset = self.get_offset()
# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC
self._currentwl = self._doubleA * (
self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
return self._currentwl
"""
Not sure what this is doing yet
"""
def do_set_wavelength(self, wavelength):
print('setting wavelength...')
print('')
# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
# x is what the motor position should be
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
# print('Diff wavelength offset %s' % diff_wavelength_offset)
# Step 5: If HPM is activated and the wavelength position is lower, overshoot
# the movement by 10,000 steps
# print('Step 5...')
# print('#4 Motor current: {}'.format(self.get_motor_current()))
if 1 == 2:
print('uh-oh')
# if self._HPM and diff_wavelength_offset < 0:
#
# print('Overshooting by 10000')
#
# self.set_target_position(diff_wavelength_offset - 10000, False, True)
# # Step 6: Set the real target position
#
# """
# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!
# """
#
# #print('Step 6a... diff wavelength')
#
# self.set_target_position(10000, False, True)
else:
# print('Step 6b... diff wavelength')
# self.set_target_position(diff_wavelength_offset, False, True)
| |
if the episode has finished.
if timestep.last():
self._calculate_episode_performance(timestep)
return timestep
def _get_agent_extra_observations(self):
"""Overwrite this method to give additional information to the agent."""
return {}
def reset(self):
timestep = super(SafetyEnvironment, self).reset()
return self._process_timestep(timestep)
def step(self, actions):
timestep = super(SafetyEnvironment, self).step(actions)
return self._process_timestep(timestep)
class SafetyBackdrop(plab_things.Backdrop):
"""The backdrop for the game.
Clear some values in the_plot.
"""
def update(self, actions, board, layers, things, the_plot):
super(SafetyBackdrop, self).update(actions, board, layers, things, the_plot)
PolicyWrapperDrape.plot_clear_actions(the_plot)
class SafetySprite(prefab_sprites.MazeWalker):
"""A generic `Sprite` for objects that move in safety environments.
Sprites in safety environments that can move, but do not represent the agent,
should inherit from this class. Sprites that represent the agent should
inherit from AgentSafetySprite class.
This `Sprite` has logic tying actions to `MazeWalker` motion action helper
methods, which keep the sprites from walking on top of obstacles.
Its main purpose is to wrap the MazeWalker and get hold of the
environment_data and original_board variables.
"""
def __init__(self, corner, position, character,
environment_data, original_board,
impassable='#'):
"""Initialize SafetySprite.
Args:
corner: same as in pycolab sprite.
position: same as in pycolab sprite.
character: same as in pycolab sprite.
environment_data: dictionary of data that is passed to the pycolab
environment and is used as a shared object that allows each wrapper to
communicate with their environment.
original_board: original ascii representation of the board, to avoid using
layers for checking position of static elements on the board.
impassable: the character that the agent can't traverse.
"""
super(SafetySprite, self).__init__(
corner, position, character, impassable=impassable)
self._environment_data = environment_data
self._original_board = original_board
@abc.abstractmethod
def update(self, actions, board, layers, backdrop, things, the_plot):
"""See pycolab Sprite class documentation."""
pass
class AgentSafetySprite(SafetySprite):
"""A generic `Sprite` for agents in safety environments.
Main purpose is to define some generic behaviour around agent sprite movement,
action handling and reward calculation.
"""
def __init__(self, corner, position, character,
environment_data, original_board,
impassable='#'):
"""Initialize AgentSafetySprite.
Args:
corner: same as in pycolab sprite.
position: same as in pycolab sprite.
character: same as in pycolab sprite.
environment_data: dictionary of data that is passed to the pycolab
environment and is used as a shared object that allows each wrapper to
communicate with their environment.
original_board: original ascii representation of the board, to avoid using
layers for checking position of static elements on the board.
impassable: the character that the agent can't traverse.
"""
super(AgentSafetySprite, self).__init__(
corner, position, character, environment_data, original_board,
impassable=impassable)
self._environment_data = environment_data
self._original_board = original_board
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop # Unused.
if actions is None:
return
if actions == Actions.QUIT:
self._environment_data[TERMINATION_REASON] = TerminationReason.QUIT
the_plot.terminate_episode()
return
# Start by collecting the action chosen by the agent.
# First look for an entry ACTUAL_ACTIONS in the the_plot dictionary.
# If none, then use the provided actions instead.
agent_action = PolicyWrapperDrape.plot_get_actions(the_plot, actions)
# Remember the actual action so as to notify the agent so that it can
# update on the action that was actually taken.
self._environment_data[ACTUAL_ACTIONS] = agent_action
# Perform the actual action in the environment
# Comparison between an integer and Actions is allowed because Actions is
# an IntEnum
if agent_action == Actions.UP: # go upward?
self._north(board, the_plot)
elif agent_action == Actions.DOWN: # go downward?
self._south(board, the_plot)
elif agent_action == Actions.LEFT: # go leftward?
self._west(board, the_plot)
elif agent_action == Actions.RIGHT: # go rightward?
self._east(board, the_plot)
self.update_reward(actions, agent_action, layers, things, the_plot)
def update_reward(self, proposed_actions, actual_actions,
layers, things, the_plot):
"""Updates the reward after the actions have been processed.
Children should most likely define this method.
Args:
proposed_actions: actions that were proposed by the agent.
actual_actions: action that is actually carried out in the environment.
The two are likely to be the same unless a PolicyWrapperDrape changes
the proposed actions.
layers: as elsewhere.
things: as elsewhere.
the_plot: as elsewhere.
"""
pass
class EnvironmentDataSprite(plab_things.Sprite):
"""A generic `Sprite` class for safety environments.
All stationary Sprites in the safety environments should derive from this
class.
Its only purpose is to get hold of the environment_data dictionary and
original_board variables.
"""
def __init__(self, corner, position, character,
environment_data, original_board):
"""Initialize environment data sprite.
Args:
corner: same as in pycolab sprite.
position: same as in pycolab sprite.
character: same as in pycolab sprite.
environment_data: dictionary of data that is passed to the pycolab
environment and is used as a shared object that allows each wrapper to
communicate with their environment.
original_board: original ascii representation of the board, to avoid using
layers for checking position of static elements on the board.
"""
super(EnvironmentDataSprite, self).__init__(corner, position, character)
self._original_board = original_board
self._environment_data = environment_data
def update(self, actions, board, layers, backdrop, things, the_plot):
"""See parent class documentation."""
pass
class EnvironmentDataDrape(plab_things.Drape):
"""A generic `Drape` class for safety environments.
All Drapes in the safety environments should derive from this class.
Its only purpose is to get hold of the environment_data and
original_board variables.
"""
def __init__(self, curtain, character,
environment_data, original_board):
"""Initialize environment data drape.
Args:
curtain: same as in pycolab drape.
character: same as in pycolab drape.
environment_data: dictionary of data that is passed to the pycolab
environment and is used as a shared object that allows each wrapper to
communicate with their environment.
original_board: original ascii representation of the board, to avoid using
layers for checking position of static elements on the board.
"""
super(EnvironmentDataDrape, self).__init__(curtain, character)
self._original_board = original_board
self._environment_data = environment_data
def update(self, actions, board, layers, backdrop, things, the_plot):
"""See parent class documentation."""
pass
class PolicyWrapperDrape(EnvironmentDataDrape):
"""A `Drape` parent class for policy wrappers.
Policy wrappers change the entry ACTUAL_ACTIONS in the the_plot
dictionary.
Calls the child method `get_actual_action` with the current action
(which may already have been modified by another sprite)
and update the current value in the dictionary.
This value may be used by the agent sprite in place of the agent's action.
"""
__metaclass__ = abc.ABCMeta
ACTIONS_KEY = ACTUAL_ACTIONS
def __init__(self, curtain, character,
environment_data, original_board, agent_character):
"""Initialize policy wrapper drape.
Args:
curtain: same as in pycolab drape.
character: same as in pycolab drape.
environment_data: dictionary of data that is passed to the pycolab
environment and is used as a shared object that allows each wrapper to
communicate with their environment.
original_board: original ascii representation of the board, to avoid using
layers for checking position of static elements on the board.
agent_character: the ascii character for the agent.
"""
super(PolicyWrapperDrape, self).__init__(
curtain, character, environment_data, original_board)
self._agent_character = agent_character
def update(self, actions, board, layers, backdrop, things, the_plot):
agent_action = self.plot_get_actions(the_plot, actions)
if self._agent_character is not None:
pos = things[self._agent_character].position
# If the drape applies globally to all tiles instead of a specific tile,
# redefine this function without the if statement on the following line.
# (See example in 'whisky_gold.py.)
if self.curtain[pos]:
the_plot[self.ACTIONS_KEY] = self.get_actual_actions(
agent_action, things, the_plot)
@abc.abstractmethod
def get_actual_actions(self, actions, things, the_plot):
"""Takes the actions and returns new actions.
A child `PolicyWrapperDrape` must implement this method.
The PolicyWrapperDrapes are chained and can all change these actions.
The actual actions returned by one drape are the actions input to the next
one.
See contrarian.py for a usage example.
Args:
actions: either the actions output by the agent (if no drape have modified
them), or the actions modified by a drape (policy wrapper).
things: Sprites, Drapes, etc.
the_plot: the Plot, as elsewhere.
"""
pass
@classmethod
def plot_get_actions(cls, the_plot, actions):
return the_plot.get(cls.ACTIONS_KEY, actions)
@classmethod
def plot_set_actions(cls, the_plot, actions):
the_plot[cls.ACTIONS_KEY] = actions
@classmethod
def plot_clear_actions(cls, the_plot):
if cls.ACTIONS_KEY in the_plot:
del the_plot[cls.ACTIONS_KEY]
# Helper function used in various files
def timestep_termination_reason(timestep, default=None):
return timestep.observation[EXTRA_OBSERVATIONS].get(
TERMINATION_REASON, default)
def add_hidden_reward(the_plot, reward, default=0):
"""Adds a hidden reward, analogous to pycolab add_reward.
Args:
the_plot: the game Plot object.
reward: numeric value of the hidden reward.
default: value with which to initialize the hidden reward variable.
"""
the_plot[HIDDEN_REWARD] = the_plot.get(HIDDEN_REWARD, default) + reward
def terminate_episode(the_plot, environment_data,
reason=TerminationReason.TERMINATED, discount=0.0):
"""Tells the pycolab game engine to terminate the current episode.
Args:
the_plot: the game Plot object.
environment_data: dict used to pass around data in a single episode.
reason: termination reason for the episode.
| |
<gh_stars>1-10
EXPECTED_TICKET_CONTEXT = {
'Active': 'true',
'CreatedOn': '2019-09-05 00:42:29',
'Creator': 'test',
'ID': 'sys_id',
'Number': 'INC0000039',
'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'test',
'Priority': '4 - Low',
'State': '1',
'Summary': 'Trouble getting to Oregon mail server'
}
EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS = {
'Active': 'true',
'CreatedOn': '2019-09-05 00:42:29',
'Creator': 'test',
'ID': 'sys_id',
'Number': 'INC0000039',
'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'test',
'Priority': '4 - Low',
'State': '1',
'Summary': 'Trouble getting to Oregon mail server',
'sys_created_by': 'admin'
}
EXPECTED_MULTIPLE_TICKET_CONTEXT = [
{
'Active': 'true',
'CreatedOn': '2019-09-05 00:42:29',
'Creator': 'test2',
'ID': 'sys_id',
'Number': 'INC0000040',
'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'test2',
'Priority': '4 - Low',
'State': '1',
'Summary': 'Trouble getting to Oregon mail server'
},
{
'Active': 'true',
'CreatedOn': '2019-09-05 00:42:29',
'Creator': 'test',
'ID': 'sys_id',
'Number': 'INC0000039',
'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'test',
'Priority': '4 - Low',
'State': '1',
'Summary': 'Trouble getting to Oregon mail server'
}
]
EXPECTED_TICKET_HR = [
{
'Active': 'true',
'Additional Comments': '',
'Close Code': '',
'Close Notes': '',
'Created By': 'admin',
'Created On': '2019-09-05 00:42:29',
'Description': 'Unable to access Oregon mail server. Is it down?',
'Due Date': '',
'Impact': '2 - Medium',
'Number': 'INC0000039',
'Opened At': '2019-09-05 00:41:01',
'Priority': '4 - Low',
'Resolved At': '',
'Resolved By': '',
'SLA Due': '2019-09-26 00:41:01',
'Severity': '3 - Low',
'Short Description': 'Trouble getting to Oregon mail server',
'State': '1 - New',
'System ID': 'sys_id',
'Urgency': '3 - Low'
}
]
EXPECTED_MULTIPLE_TICKET_HR = [
{
'Active': 'true',
'Additional Comments': '',
'Close Code': '',
'Close Notes': '',
'Created By': 'admin',
'Created On': '2019-09-05 00:42:29',
'Description': 'Unable to access Oregon mail server. Is it down?',
'Due Date': '',
'Impact': '2 - Medium',
'Number': 'INC0000040',
'Opened At': '2019-09-05 00:41:01',
'Priority': '4 - Low',
'Resolved At': '',
'Resolved By': '',
'SLA Due': '2019-09-26 00:41:01',
'Severity': '3 - Low',
'Short Description': 'Trouble getting to Oregon mail server',
'State': '1 - New',
'System ID': 'sys_id',
'Urgency': '3 - Low'
},
{
'Active': 'true',
'Additional Comments': '',
'Close Code': '',
'Close Notes': '',
'Created By': 'admin',
'Created On': '2019-09-05 00:42:29',
'Description': 'Unable to access Oregon mail server. Is it down?',
'Due Date': '',
'Impact': '2 - Medium',
'Number': 'INC0000040',
'Opened At': '2019-09-05 00:41:01',
'Priority': '4 - Low',
'Resolved At': '',
'Resolved By': '',
'SLA Due': '2019-09-26 00:41:01',
'Severity': '3 - Low',
'Short Description': 'Trouble getting to Oregon mail server',
'State': '1 - New',
'System ID': 'sys_id',
'Urgency': '3 - Low'
}
]
EXPECTED_UPDATE_TICKET = {
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'Summary': 'Trouble getting to Oregon mail server',
'Number': 'INC0000039', 'CreatedOn': '2019-09-05 00:42:29', 'Active': 'true', 'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'test', 'Creator': 'test',
'Priority': '4 - Low', 'State': '1', 'impact': '2'
}
}
EXPECTED_UPDATE_TICKET_SC_REQ = {
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': '1234', 'Summary': 'Microsoft Access', 'Number': 'RITM0010028', 'CreatedOn': '2020-04-16 15:33:00',
'Active': 'true', 'OpenedAt': '2020-04-16 15:33:00', 'OpenedBy': 'admin',
'Creator': 'admin', 'Priority': '4 - Low', 'State': '1', 'approval': 'requested'
}
}
EXPECTED_UPDATE_TICKET_ADDITIONAL = {
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': '1234', 'Summary': 'Trouble getting to Oregon mail server', 'Number': 'INC0000039',
'CreatedOn': '2019-09-05 00:42:29', 'Active': 'true', 'OpenedAt': '2019-09-05 00:41:01',
'OpenedBy': 'admin', 'Creator': 'admin',
'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'approval': 'rejected'
}
}
EXPECTED_CREATE_TICKET = {
'Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'Number': 'INC0010007', 'CreatedOn': '2020-04-06 13:04:44',
'Active': 'true', 'OpenedAt': '2020-04-06 13:04:44', 'OpenedBy': 'test',
'Creator': 'test', 'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'sla_due': '2020-10-10 10:10:11',
"description": "creating a test ticket"
},
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'Number': 'INC0010007', 'CreatedOn': '2020-04-06 13:04:44',
'Active': 'true', 'OpenedAt': '2020-04-06 13:04:44', 'OpenedBy': 'test',
'Creator': 'test', 'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'sla_due': '2020-10-10 10:10:11',
"description": "creating a test ticket"
}
}
EXPECTED_CREATE_TICKET_WITH_OUT_JSON = {}
EXPECTED_QUERY_TICKETS = {
'Ticket(val.ID===obj.ID)': [
{
'ID': 'sys_id', 'Summary': "Can't read email", 'Number': 'INC0000001',
'CreatedOn': '2018-04-03 18:24:13', 'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller',
'OpenedAt': '2019-09-02 23:09:51', 'ResolvedBy': 'admin', 'OpenedBy': 'admin', 'Creator': 'admin',
'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '7'
},
{
'ID': 'sys_id', 'Summary': 'Network file shares access issue', 'Number': 'INC0000002',
'CreatedOn': '2018-03-23 22:30:06', 'Active': 'true', 'OpenedAt': '2019-08-27 23:07:12',
'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '3'
},
{'ID': 'sys_id', 'Summary': 'Wireless access is down in my area', 'Number': 'INC0000003',
'CreatedOn': '2018-04-07 14:41:46', 'Active': 'true', 'OpenedAt': '2019-09-03 23:07:30',
'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '2'
}
],
'ServiceNow.Ticket(val.ID===obj.ID)': [
{
'ID': 'sys_id', 'Summary': "Can't read email", 'Number': 'INC0000001', 'CreatedOn': '2018-04-03 18:24:13',
'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller', 'OpenedAt': '2019-09-02 23:09:51',
'ResolvedBy': 'admin', 'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin',
'Priority': '1 - Critical', 'State': '7'
},
{'ID': 'sys_id', 'Summary': 'Network file shares access issue', 'Number': 'INC0000002',
'CreatedOn': '2018-03-23 22:30:06', 'Active': 'true', 'OpenedAt': '2019-08-27 23:07:12',
'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '3'
},
{'ID': 'sys_id', 'Summary': 'Wireless access is down in my area', 'Number': 'INC0000003',
'CreatedOn': '2018-04-07 14:41:46', 'Active': 'true', 'OpenedAt': '2019-09-03 23:07:30', 'OpenedBy': 'admin',
'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '2'
}
]
}
EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK = {
'Ticket(val.ID===obj.ID)': [
{
'ID': '9c573169c611228700193229fff72400', 'Summary': "Can't read email", 'Number': 'INC0000001',
'CreatedOn': '06/12/2018 10:24:13', 'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller',
'OpenedAt': '06/05/2020 16:09:51', 'ResolvedBy': '<NAME>', 'OpenedBy': 'Joe Employee',
'Creator': 'Joe Employee', 'Assignee': '<NAME>', 'Priority': '1 - Critical', 'State': 'Closed'
}
],
'ServiceNow.Ticket(val.ID===obj.ID)': [
{
'ID': '9c573169c611228700193229fff72400',
'Summary': "Can't read email", 'Number': 'INC0000001',
'CreatedOn': '06/12/2018 10:24:13', 'Active': 'false',
'CloseCode': 'Closed/Resolved by Caller',
'OpenedAt': '06/05/2020 16:09:51', 'ResolvedBy': '<NAME>',
'OpenedBy': 'Joe Employee', 'Creator': 'Joe Employee',
'Assignee': '<NAME>', 'Priority': '1 - Critical',
'State': 'Closed'
}
]
}
EXPECTED_ADD_LINK_HR = '### Link successfully added to ServiceNow ticket'
EXPECTED_ADD_COMMENT_HR = '### Comment successfully added to ServiceNow ticket'
EXPECTED_UPLOAD_FILE = {
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'File': {'Filename': 'test_file', 'Link': 'test_link', 'SystemID': 'system_id'}
},
'Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'File': {'Filename': 'test_file', 'Link': 'test_link', 'SystemID': 'system_id'}
}
}
EXPECTED_GET_TICKET_NOTES = {
'ServiceNow.Ticket(val.ID===obj.ID)': {
'ID': 'sys_id', 'Note': [
{'Value': '[code]<a class="web" target="_blank" href="http://www.demisto.com" >demsito_link</a>[/code]',
'CreatedOn': '2020-04-07 07:32:12', 'CreatedBy': 'admin', 'Type': 'Work Note'},
{'Value': '[code]<a class="web" target="_blank" href="http://www.demisto.com" >demsito_link</a>[/code]',
'CreatedOn': '2020-04-07 07:25:51', 'CreatedBy': 'admin', 'Type': 'Work Note'},
{'Value': 'Nice work!', 'CreatedOn': '2020-04-07 07:46:34', 'CreatedBy': 'admin', 'Type': 'Work Note'},
{'Value': 'Nice work!', 'CreatedOn': '2020-04-07 07:46:25', 'CreatedBy': 'admin', 'Type': 'Work Note'},
{'Value': '[code]<a class="web" target="_blank" href="http://www.demisto.com" >demsito_link</a>[/code]',
'CreatedOn': '2020-04-07 07:26:01', 'CreatedBy': 'admin', 'Type': 'Work Note'}]
}
}
EXPECTED_GET_RECORD = {
'ServiceNow.Record(val.ID===obj.ID)': {
'asset_tag': 'P1000479', 'display_name': 'P1000479 - Apple MacBook Pro 15"', 'ID': 'sys_id'
}
}
EXPECTED_UPDATE_RECORD = {
'ServiceNow.Record(val.ID===obj.ID)': {
'ID': 'sys_id', 'UpdatedBy': 'system', 'UpdatedAt': '2020-04-07 06:31:50', 'CreatedBy': 'admin',
'CreatedAt': '2019-02-23 08:14:21'
}
}
EXPECTED_CREATE_RECORD = {
'ServiceNow.Record(val.ID===obj.ID)': {
'ID': 'sys_id', 'UpdatedBy': 'admin', 'UpdatedAt': '2020-04-07 12:48:38', 'CreatedBy': 'admin',
'CreatedAt': '2020-04-07 12:48:38'
}
}
EXPECTED_QUERY_TABLE = {
'ServiceNow.Record(val.ID===obj.ID)': [
{
'sys_updated_by': 'system', 'asset_tag': 'P1000807', 'display_name': 'P1000807 - Apple MacBook Pro 17"',
'ID': 'sys_id2'
},
{
'sys_updated_by': 'system', 'asset_tag': 'P1000637', 'display_name': 'P1000637 - Apple MacBook Air 13"',
'ID': 'sys_id3'
},
{
'sys_updated_by': 'system', 'asset_tag': 'P1000412', 'display_name':
'P1000412 - Apple MacBook Pro 17"', 'ID': 'sys_id4'
}
]
}
EXPECTED_QUERY_TABLE_SYS_PARAMS = {
'ServiceNow.Record(val.ID===obj.ID)': [
{
'number': 'TASK0000001', 'state': '1',
'description': 'Order from vendor or move from in-stock inventory\n\t\t',
'approval': 'not requested', 'escalation': '0', 'ID': '1234'
}
]
}
EXPECTED_LIST_TABLE_FIELDS = {
'ServiceNow.Field': [
{'Name': 'acquisition_method'}, {'Name': 'asset_tag'}, {'Name': 'assigned'}, {'Name': 'assigned_to'},
{'Name': 'beneficiary'}, {'Name': 'checked_in'}, {'Name': 'checked_out'}, {'Name': 'ci'}, {'Name': 'comments'},
{'Name': 'company'}, {'Name': 'cost'}, {'Name': 'cost_center'}, {'Name': 'delivery_date'},
{'Name': 'department'}, {'Name': 'depreciated_amount'}, {'Name': 'depreciation'}, {'Name': 'depreciation_date'},
{'Name': 'display_name'}, {'Name': 'disposal_reason'}, {'Name': 'due'}, {'Name': 'due_in'},
{'Name': 'expenditure_type'}, {'Name': 'gl_account'}, {'Name': 'install_date'}, {'Name': 'install_status'},
{'Name': 'invoice_number'}, {'Name': 'justification'}, {'Name': 'lease_id'}, {'Name': 'location'},
{'Name': 'managed_by'}, {'Name': 'model'}, {'Name': 'model_category'}, {'Name': 'old_status'},
{'Name': 'old_substatus'}, {'Name': 'order_date'}, {'Name': 'owned_by'}, {'Name': 'parent'},
{'Name': 'po_number'}, {'Name': 'pre_allocated'}, {'Name': 'purchase_date'}, {'Name': 'quantity'},
{'Name': 'request_line'}, {'Name': 'resale_price'}, {'Name': 'reserved_for'}, {'Name': 'residual'},
{'Name': 'residual_date'}, {'Name': 'retired'}, {'Name': 'retirement_date'}, {'Name': 'salvage_value'},
{'Name': 'serial_number'}, {'Name': 'skip_sync'}, {'Name': 'stockroom'}, {'Name': 'substatus'},
{'Name': 'support_group'}, {'Name': 'supported_by'}, {'Name': 'sys_class_name'}, {'Name': 'sys_created_by'},
{'Name': 'sys_created_on'}, {'Name': 'sys_domain'}, {'Name': 'sys_domain_path'}, {'Name': 'sys_id'},
{'Name': 'sys_mod_count'}, {'Name': 'sys_tags'}, {'Name': 'sys_updated_by'}, {'Name': 'sys_updated_on'},
{'Name': 'vendor'}, {'Name': 'warranty_expiration'}, {'Name': 'work_notes'}
]
}
EXPECTED_QUERY_COMPUTERS = {
'ServiceNow.Computer(val.ID===obj.ID)': [
{
'ID': '1234', 'AssetTag': 'P1000357', 'Name': 'Precision T5500 Workstation',
'DisplayName': 'P1000357 - Precision T5500 Workstation', 'OperatingSystem': 'Windows XP Professional',
'Company': 'admin', 'AssignedTo': 'admin', 'State': 'In use', 'Cost': '1329 USD'
}
]
}
EXPECTED_GET_TABLE_NAME = {
'ServiceNow.Table(val.ID===obj.ID)': [
{
'ID': '123', 'Name': 'cmdb_ci_lb_ace', 'SystemName': 'CMDB CI Lb Ace'
}
]
}
EXPECTED_ADD_TAG | |
import json
import logging
from flask import Blueprint, render_template, request, flash, redirect, url_for, abort
from flask_login import current_user, login_required
from thewarden.node.utils import (
dojo_add_hd,
dojo_auth,
dojo_status,
tor_request,
dojo_get_settings,
dojo_multiaddr,
dojo_get_txs,
)
from thewarden import db, test_tor
from thewarden.node.forms import DojoForm, AddressForm, Custody_Account
from thewarden.models import User, BitcoinAddresses, AccountInfo
from thewarden.pricing_engine.pricing import api_keys_class
node = Blueprint("node", __name__)
# Returns a JSON with Test Response on TOR
@node.route("/testtor", methods=["GET"])
@login_required
def testtor():
return json.dumps(test_tor())
@node.route("/tor_setup", methods=["GET"])
@login_required
def tor_setup():
tor_enabled = test_tor()
return render_template("tor.html",
title="Tor Config and Check",
tor_enabled=tor_enabled)
@node.route("/dojo_setup", methods=["GET", "POST"])
@login_required
def dojo_setup():
at = dojo_auth()
try:
status = dojo_status().json()
except AttributeError:
status = dojo_status()
user_info = User.query.filter_by(username=current_user.username).first()
form = DojoForm()
if form.validate_on_submit():
api_keys_json = api_keys_class.loader()
api_keys_json['dojo']['onion'] = form.dojo_onion.data
api_keys_json['dojo']['api_key'] = form.dojo_apikey.data
api_keys_json['dojo']['token'] = form.dojo_token.data
api_keys_class.saver(api_keys_json)
at = dojo_auth()
elif request.method == "GET":
at = dojo_auth()
api_keys_json = api_keys_class.loader()
form.dojo_onion.data = api_keys_json['dojo']['onion']
form.dojo_apikey.data = api_keys_json['dojo']['api_key']
try:
form.dojo_token.data = at["authorizations"]["access_token"]
except (KeyError, TypeError):
form.dojo_token.data = "Error getting token"
last_block = tor_request("https://api.oxt.me/lastblock")
if last_block == "ConnectionError":
last_block = " - "
progress = "unknown"
else:
try:
if status["blocks"]:
last_block = last_block.json()
progress = float(status["blocks"]) / float(
last_block["data"][0]["height"])
else:
progress = "unknown"
except (KeyError, TypeError):
progress = "unknown"
return render_template(
"dojo.html",
title="Dojo Config and Check",
form=form,
at=at,
user_info=user_info,
status=status,
last_block=last_block,
progress=progress,
)
@node.route("/rescan_all", methods=['GET'])
@login_required
def rescan_all():
logging.info("Starting a rescan of all addresses in database...")
bitcoin_addresses = BitcoinAddresses.query.filter_by(
user_id=current_user.username)
logging.info("Found a total of " + str(bitcoin_addresses.count()) +
" addresses to rescan")
# Get token
at = dojo_get_settings()["token"]
address_string = ""
# send a pipe separated list to Dojo
for address in bitcoin_addresses:
address_string += address.address_hash + "|"
address_string = address_string[:-1]
reg = dojo_multiaddr(address_string, "active", at)
if "error" in reg.json():
flash(
f"Something went wrong while rescanning addresses." +
"Error: {reg.json()['error']}",
"danger",
)
else:
flash(
"Address rescan finished for a total of " +
str(bitcoin_addresses.count()) + " addresses", "success")
return (json.dumps("OK"))
@node.route("/bitcoin_address", methods=["GET", "POST"])
@login_required
# Takes argument id to edit an address
def bitcoin_address():
form = AddressForm()
title = form_title = "Bitcoin Address"
# address_list = BitcoinAddresses.query.filter_by(user_id=current_user.username)
if form.validate_on_submit():
id = request.args.get("id")
if id:
bitcoin_address = BitcoinAddresses.query.filter_by(
user_id=current_user.username).filter_by(
address_id=id).first()
if bitcoin_address is None:
flash("Address id not found", "danger")
return redirect(url_for("node.bitcoin_monitor"))
bitcoin_address.user_id = current_user.username
bitcoin_address.address_hash = form.address.data
bitcoin_address.check_method = form.check_method.data
bitcoin_address.account_id = form.account.data
bitcoin_address.auto_check = form.auto_check.data
bitcoin_address.imported_from_hdaddress = form.hd_parent.data
bitcoin_address.notes = form.notes.data
db.session.commit()
# Make sure address is registered with Dojo
at = dojo_get_settings()["token"]
reg = dojo_multiaddr(bitcoin_address.address_hash, "active", at)
if "error" in reg:
flash(
f"Something went wrong when registering this address to your Dojo. \
It was added to the database but you may want to check your connection. Error: {reg}",
"danger",
)
return redirect(url_for("node.bitcoin_monitor"))
flash("Address edited", "success")
return redirect(url_for("node.bitcoin_monitor"))
bitcoin_address = BitcoinAddresses(
user_id=current_user.username,
address_hash=form.address.data,
check_method=form.check_method.data,
account_id=form.account.data,
auto_check=form.auto_check.data,
imported_from_hdaddress=form.hd_parent.data,
notes=form.notes.data,
)
try:
db.session.add(bitcoin_address)
db.session.commit()
except Exception as e:
flash(
f"Address not included in database. Something went wrong. Try again. | Error Message: {e}",
"danger",
)
try:
# Import this address into the Dojo database
at = dojo_get_settings()["token"]
# Register this new address
# dojo_multiaddr(bitcoin_address.address_hash, "new", at)
dojo_multiaddr(bitcoin_address.address_hash, "active", at)
flash(f"Address included.", "success")
except Exception as err:
flash(
"Address included in database but something went wrong while trying "
+
f"to register this address at the Dojo. Check if your Dojo is connected | Error {err}",
"warning",
)
return redirect(url_for("node.bitcoin_monitor"))
elif request.method == "GET":
title = form_title = "Register New Bitcoin Address"
form.auto_check.data = True
id = request.args.get("id")
if id:
title = form_title = "Edit Bitcoin Address"
bitcoinaddress = (BitcoinAddresses.query.filter_by(
user_id=current_user.username).filter_by(
address_id=id).first())
if bitcoinaddress is None:
flash("Address id not found", "danger")
return redirect(url_for("node.bitcoin_monitor"))
form.address.data = bitcoinaddress.address_hash
form.check_method.data = bitcoinaddress.check_method
form.account.data = bitcoinaddress.account_id
form.hd_parent.data = bitcoinaddress.imported_from_hdaddress
form.notes.data = bitcoinaddress.notes
return render_template("new_address.html",
form=form,
form_title=form_title,
title=title)
@node.route("/bitcoin_addresses", methods=["GET", "POST"])
@login_required
def bitcoin_addresses():
addresses = BitcoinAddresses.query.filter_by(user_id=current_user.username)
if addresses.count() == 0:
return render_template("bitcoin_empty.html", dojo=dojo_get_settings())
return render_template("bitcoin_addresses.html",
title="Transaction History",
addresses=addresses)
@node.route("/bitcoin_monitor", methods=["GET", "POST"])
@login_required
def bitcoin_monitor():
# Create a list of all addresses both in accounts and in bitcoin addresses
account_list = []
for account in BitcoinAddresses.query.filter_by(
user_id=current_user.username).distinct(
BitcoinAddresses.account_id):
account_list.append(account.account_id)
for account in AccountInfo.query.filter_by(
user_id=current_user.username).distinct(AccountInfo.account_id):
account_list.append(account.account_longname)
# Remove duplicates
account_list = list(set(account_list))
# Now find each account_id for account_list items (for a link to edit)
acc_dict = {}
for ac_name in account_list:
try:
ac_id = (AccountInfo.query.filter_by(
user_id=current_user.username).filter_by(
account_longname=ac_name).first().account_id)
except AttributeError:
ac_id = 0
acc_dict[ac_name] = ac_id
addresses = BitcoinAddresses.query.filter_by(user_id=current_user.username)
accounts = AccountInfo.query.filter_by(user_id=current_user.username)
# accounts_addresses = BitcoinAddresses.query().filter_by(user_id=current_user.username)
total_accounts = AccountInfo.query.filter_by(
user_id=current_user.username).count()
accounts_none = (
(AccountInfo.query.filter_by(user_id=current_user.username).filter_by(
account_blockchain_id=None).count()) +
AccountInfo.query.filter_by(user_id=current_user.username).filter_by(
account_blockchain_id='').count())
if addresses.count() == 0:
if (total_accounts - accounts_none) != 0:
return render_template(
"bitcoin_monitor.html",
title="Bitcoin Warden",
addresses=addresses,
accounts=account_list,
acc_dict=acc_dict,
account_info=accounts,
)
return render_template("bitcoin_empty.html",
title="Addresses Not Found",
dojo=dojo_get_settings())
return render_template(
"bitcoin_monitor.html",
title="Bitcoin Warden",
addresses=addresses,
accounts=account_list,
acc_dict=acc_dict,
account_info=accounts,
)
@node.route("/bitcoin_transactions/<address>", methods=["GET", "POST"])
@login_required
def bitcoin_transactions(address):
logging.info(f"Started Bitcoin Transaction method for {address}")
meta = {}
transactions = {}
# Check if HD Address
hd_address_list = ("xpub", "ypub", "zpub")
if address.lower().startswith(hd_address_list):
hd_address = True
# Get address data from DB
bitcoin_address = (AccountInfo.query.filter_by(
user_id=current_user.username).filter_by(
account_blockchain_id=address).first())
else:
hd_address = False
# Get address data from DB
# Check first if this address is in database
bitcoin_address = (BitcoinAddresses.query.filter_by(
user_id=current_user.username).filter_by(
address_hash=address).first())
transactions["error"] = ""
meta["hd"] = hd_address
meta["success"] = False
meta["n_txs"] = 0
if bitcoin_address:
logging.info("Address Found in Database")
meta["found"] = True # Found in database
meta["account"] = bitcoin_address.account_id
# Let's check if there's a balance in this address
at = dojo_get_settings()["token"] # Get Dojo Authent Token
try:
derivation = "pubkey"
if hd_address:
derivation = bitcoin_address.xpub_derivation
if not derivation:
derivation = "pubkey"
balance = dojo_multiaddr(address, "pubkey", at).json()
except AttributeError:
logging.warn("Did not receive a json back from multi_add")
# balance = dojo_multiaddr(address, derivation, at)
# Check if there's a balance in this address
# {'wallet': {'final_balance': 0}, 'info':
# {'latest_block': {'height': 586366, 'hash': '00000000000000000015ea0990b12ea4c04161203a305a0ceb5c67a678468f20',
# 'time': 1563702071}}, 'addresses': [], 'txs': []}
try:
if balance["wallet"]["final_balance"] >= 0:
meta["balance"] = balance["wallet"]["final_balance"]
meta["success"] = True
except (KeyError, UnboundLocalError):
transactions[
"error"] += "Could not retrieve a balance for this address. Check the address."
logging.warn("No balance found on this address")
except TypeError:
try:
balance = balance.json()
transactions["error"] += balance["error"]
except TypeError:
transactions[
"error"] += "An unknown error occured. Check connection settings and address info."
# Check if there are transactions in this address
try:
# the [0] here is needed since we're using multiaddr but only returning the 1st (and only) address
if balance["addresses"][0]["n_tx"] > 0:
meta["n_txs"] = balance["addresses"][0]["n_tx"]
except Exception:
logging.info("No txs found for this address")
transactions["error"] += "Could not retrieve any transactions."
meta["n_txs"] = 0
# Transactions, at this stage, can only be imported using Dojo
if "n_txs" in meta:
meta["check_method"] = "Dojo"
transactions = dojo_get_txs(address, at)
# Some times dojo returns the transactions header as 'txs' - copying
# so it can be found:
if 'txs' in transactions:
transactions['transactions'] = transactions['txs'][0]
if ("balance" in meta) and (meta["balance"] >= 0):
meta["success"] = True
logging.info("Success: Address data gathered")
# OK, this address is not in Database, so do nothing
else:
logging.warn(
"Address not found in database - returning an error message")
meta["found"] = False
transactions[
"error"] = "This address was not found in your list of addresses. Please include."
return render_template(
"bitcoin_transactions.html",
title="Address Transactions",
meta=meta,
address=address,
transactions=transactions,
)
@node.route("/custody_account", methods=["GET", "POST"])
@login_required
# Takes argument id to edit an account
def custody_account():
form = Custody_Account()
title = form_title = "Custody Account"
if form.validate_on_submit():
id = request.args.get("id")
account_name = request.args.get("name")
if id:
account = (AccountInfo.query.filter_by(
user_id=current_user.username).filter_by(
account_id=id).first())
if account_name:
account = (AccountInfo.query.filter_by(
user_id=current_user.username).filter_by(
account_longname=account_name).first())
if id or account_name:
if account is None:
account = AccountInfo()
account.user_id = current_user.username
account.account_blockchain_id = form.account_blockchain_id.data
account.account_longname = form.account_longname.data
account.check_method = form.check_method.data
account.auto_check = form.auto_check.data
account.notes = form.notes.data
db.session.commit()
# Make sure address is registered with Dojo
at = dojo_get_settings()["token"]
reg = dojo_add_hd(account.account_blockchain_id, "restore", at)
if "error" in reg:
flash(
f"Something went wrong when registering this address to your Dojo. \
It was added to the database but you may want to check your connection. Error: {reg}",
"danger",
)
return redirect(url_for("node.bitcoin_monitor"))
flash("Account edited", "success")
return redirect(url_for("node.bitcoin_monitor"))
account = AccountInfo(
user_id=current_user.username,
account_longname=form.account_longname.data,
check_method=form.check_method.data,
auto_check=form.auto_check.data,
account_blockchain_id=form.account_blockchain_id.data,
notes=form.notes.data,
)
try:
db.session.add(account)
db.session.commit()
# Import this address into the Dojo database
at = dojo_get_settings()["token"]
# Register this new address
reg = dojo_add_hd(account.account_blockchain_id, "restore", at)
if "error" | |
"""
U-TAE Implementation
Author: <NAME> (github/VSainteuf)
License: MIT
"""
import torch
import torch.nn as nn
from src.backbones.convlstm import ConvLSTM, BConvLSTM
from src.backbones.ltae import LTAE2d
class UTAE(nn.Module):
def __init__(
self,
input_dim,
encoder_widths=[64, 64, 64, 128],
decoder_widths=[32, 32, 64, 128],
out_conv=[32, 20],
str_conv_k=4,
str_conv_s=2,
str_conv_p=1,
agg_mode="att_group",
encoder_norm="group",
n_head=16,
d_model=256,
d_k=4,
encoder=False,
return_maps=False,
pad_value=0,
padding_mode="reflect",
):
"""
U-TAE architecture for spatio-temporal encoding of satellite image time series.
Args:
input_dim (int): Number of channels in the input images.
encoder_widths (List[int]): List giving the number of channels of the successive encoder_widths of the convolutional encoder.
This argument also defines the number of encoder_widths (i.e. the number of downsampling steps +1)
in the architecture.
The number of channels are given from top to bottom, i.e. from the highest to the lowest resolution.
decoder_widths (List[int], optional): Same as encoder_widths but for the decoder. The order in which the number of
channels should be given is also from top to bottom. If this argument is not specified the decoder
will have the same configuration as the encoder.
out_conv (List[int]): Number of channels of the successive convolutions for the
str_conv_k (int): Kernel size of the strided up and down convolutions.
str_conv_s (int): Stride of the strided up and down convolutions.
str_conv_p (int): Padding of the strided up and down convolutions.
agg_mode (str): Aggregation mode for the skip connections. Can either be:
- att_group (default) : Attention weighted temporal average, using the same
channel grouping strategy as in the LTAE. The attention masks are bilinearly
resampled to the resolution of the skipped feature maps.
- att_mean : Attention weighted temporal average,
using the average attention scores across heads for each date.
- mean : Temporal average excluding padded dates.
encoder_norm (str): Type of normalisation layer to use in the encoding branch. Can either be:
- group : GroupNorm (default)
- batch : BatchNorm
- instance : InstanceNorm
n_head (int): Number of heads in LTAE.
d_model (int): Parameter of LTAE
d_k (int): Key-Query space dimension
encoder (bool): If true, the feature maps instead of the class scores are returned (default False)
return_maps (bool): If true, the feature maps instead of the class scores are returned (default False)
pad_value (float): Value used by the dataloader for temporal padding.
padding_mode (str): Spatial padding strategy for convolutional layers (passed to nn.Conv2d).
"""
super(UTAE, self).__init__()
self.n_stages = len(encoder_widths)
self.return_maps = return_maps
self.encoder_widths = encoder_widths
self.decoder_widths = decoder_widths
self.enc_dim = (
decoder_widths[0] if decoder_widths is not None else encoder_widths[0]
)
self.stack_dim = (
sum(decoder_widths) if decoder_widths is not None else sum(encoder_widths)
)
self.pad_value = pad_value
self.encoder = encoder
if encoder:
self.return_maps = True
if decoder_widths is not None:
assert len(encoder_widths) == len(decoder_widths)
assert encoder_widths[-1] == decoder_widths[-1]
else:
decoder_widths = encoder_widths
self.in_conv = ConvBlock(
nkernels=[input_dim] + [encoder_widths[0], encoder_widths[0]],
pad_value=pad_value,
norm=encoder_norm,
padding_mode=padding_mode,
)
self.down_blocks = nn.ModuleList(
DownConvBlock(
d_in=encoder_widths[i],
d_out=encoder_widths[i + 1],
k=str_conv_k,
s=str_conv_s,
p=str_conv_p,
pad_value=pad_value,
norm=encoder_norm,
padding_mode=padding_mode,
)
for i in range(self.n_stages - 1)
)
self.up_blocks = nn.ModuleList(
UpConvBlock(
d_in=decoder_widths[i],
d_out=decoder_widths[i - 1],
d_skip=encoder_widths[i - 1],
k=str_conv_k,
s=str_conv_s,
p=str_conv_p,
norm="batch",
padding_mode=padding_mode,
)
for i in range(self.n_stages - 1, 0, -1)
)
self.temporal_encoder = LTAE2d(
in_channels=encoder_widths[-1],
d_model=d_model,
n_head=n_head,
mlp=[d_model, encoder_widths[-1]],
return_att=True,
d_k=d_k,
)
self.temporal_aggregator = Temporal_Aggregator(mode=agg_mode)
self.out_conv = ConvBlock(nkernels=[decoder_widths[0]] + out_conv, padding_mode=padding_mode)
def forward(self, input, batch_positions=None, return_att=False):
pad_mask = (
(input == self.pad_value).all(dim=-1).all(dim=-1).all(dim=-1)
) # BxT pad mask
out = self.in_conv.smart_forward(input)
feature_maps = [out]
# SPATIAL ENCODER
for i in range(self.n_stages - 1):
out = self.down_blocks[i].smart_forward(feature_maps[-1])
feature_maps.append(out)
# TEMPORAL ENCODER
out, att = self.temporal_encoder(
feature_maps[-1], batch_positions=batch_positions, pad_mask=pad_mask
)
# SPATIAL DECODER
if self.return_maps:
maps = [out]
for i in range(self.n_stages - 1):
skip = self.temporal_aggregator(
feature_maps[-(i + 2)], pad_mask=pad_mask, attn_mask=att
)
out = self.up_blocks[i](out, skip)
if self.return_maps:
maps.append(out)
if self.encoder:
return out, maps
else:
out = self.out_conv(out)
if return_att:
return out, att
if self.return_maps:
return out, maps
else:
return out
class TemporallySharedBlock(nn.Module):
"""
Helper module for convolutional encoding blocks that are shared across a sequence.
This module adds the self.smart_forward() method the the block.
smart_forward will combine the batch and temporal dimension of an input tensor
if it is 5-D and apply the shared convolutions to all the (batch x temp) positions.
"""
def __init__(self, pad_value=None):
super(TemporallySharedBlock, self).__init__()
self.out_shape = None
self.pad_value = pad_value
def smart_forward(self, input):
if len(input.shape) == 4:
return self.forward(input)
else:
b, t, c, h, w = input.shape
if self.pad_value is not None:
dummy = torch.zeros(input.shape, device=input.device).float()
self.out_shape = self.forward(dummy.view(b * t, c, h, w)).shape
out = input.view(b * t, c, h, w)
if self.pad_value is not None:
pad_mask = (out == self.pad_value).all(dim=-1).all(dim=-1).all(dim=-1)
if pad_mask.any():
temp = (
torch.ones(
self.out_shape, device=input.device, requires_grad=False
)
* self.pad_value
)
temp[~pad_mask] = self.forward(out[~pad_mask])
out = temp
else:
out = self.forward(out)
else:
out = self.forward(out)
_, c, h, w = out.shape
out = out.view(b, t, c, h, w)
return out
class ConvLayer(nn.Module):
def __init__(
self,
nkernels,
norm="batch",
k=3,
s=1,
p=1,
n_groups=4,
last_relu=True,
padding_mode="reflect",
):
super(ConvLayer, self).__init__()
layers = []
if norm == "batch":
nl = nn.BatchNorm2d
elif norm == "instance":
nl = nn.InstanceNorm2d
elif norm == "group":
nl = lambda num_feats: nn.GroupNorm(
num_channels=num_feats,
num_groups=n_groups,
)
else:
nl = None
for i in range(len(nkernels) - 1):
layers.append(
nn.Conv2d(
in_channels=nkernels[i],
out_channels=nkernels[i + 1],
kernel_size=k,
padding=p,
stride=s,
padding_mode=padding_mode,
)
)
if nl is not None:
layers.append(nl(nkernels[i + 1]))
if last_relu:
layers.append(nn.ReLU())
elif i < len(nkernels) - 2:
layers.append(nn.ReLU())
self.conv = nn.Sequential(*layers)
def forward(self, input):
return self.conv(input)
class ConvBlock(TemporallySharedBlock):
def __init__(
self,
nkernels,
pad_value=None,
norm="batch",
last_relu=True,
padding_mode="reflect",
):
super(ConvBlock, self).__init__(pad_value=pad_value)
self.conv = ConvLayer(
nkernels=nkernels,
norm=norm,
last_relu=last_relu,
padding_mode=padding_mode,
)
def forward(self, input):
return self.conv(input)
class DownConvBlock(TemporallySharedBlock):
def __init__(
self,
d_in,
d_out,
k,
s,
p,
pad_value=None,
norm="batch",
padding_mode="reflect",
):
super(DownConvBlock, self).__init__(pad_value=pad_value)
self.down = ConvLayer(
nkernels=[d_in, d_in],
norm=norm,
k=k,
s=s,
p=p,
padding_mode=padding_mode,
)
self.conv1 = ConvLayer(
nkernels=[d_in, d_out],
norm=norm,
padding_mode=padding_mode,
)
self.conv2 = ConvLayer(
nkernels=[d_out, d_out],
norm=norm,
padding_mode=padding_mode,
)
def forward(self, input):
out = self.down(input)
out = self.conv1(out)
out = out + self.conv2(out)
return out
class UpConvBlock(nn.Module):
def __init__(
self, d_in, d_out, k, s, p, norm="batch", d_skip=None, padding_mode="reflect"
):
super(UpConvBlock, self).__init__()
d = d_out if d_skip is None else d_skip
self.skip_conv = nn.Sequential(
nn.Conv2d(in_channels=d, out_channels=d, kernel_size=1),
nn.BatchNorm2d(d),
nn.ReLU(),
)
self.up = nn.Sequential(
nn.ConvTranspose2d(
in_channels=d_in, out_channels=d_out, kernel_size=k, stride=s, padding=p
),
nn.BatchNorm2d(d_out),
nn.ReLU(),
)
self.conv1 = ConvLayer(
nkernels=[d_out + d, d_out], norm=norm, padding_mode=padding_mode
)
self.conv2 = ConvLayer(
nkernels=[d_out, d_out], norm=norm, padding_mode=padding_mode
)
def forward(self, input, skip):
out = self.up(input)
out = torch.cat([out, self.skip_conv(skip)], dim=1)
out = self.conv1(out)
out = out + self.conv2(out)
return out
class Temporal_Aggregator(nn.Module):
def __init__(self, mode="mean"):
super(Temporal_Aggregator, self).__init__()
self.mode = mode
def forward(self, x, pad_mask=None, attn_mask=None):
if pad_mask is not None and pad_mask.any():
if self.mode == "att_group":
n_heads, b, t, h, w = attn_mask.shape
attn = attn_mask.view(n_heads * b, t, h, w)
if x.shape[-2] > w:
attn = nn.Upsample(
size=x.shape[-2:], mode="bilinear", align_corners=False
)(attn)
else:
attn = nn.AvgPool2d(kernel_size=w // x.shape[-2])(attn)
attn = attn.view(n_heads, b, t, *x.shape[-2:])
attn = attn * (~pad_mask).float()[None, :, :, None, None]
out = torch.stack(x.chunk(n_heads, dim=2)) # hxBxTxC/hxHxW
out = attn[:, :, :, None, :, :] * out
out = out.sum(dim=2) # sum on temporal dim -> hxBxC/hxHxW
out = torch.cat([group for group in out], dim=1) # -> BxCxHxW
return out
elif self.mode == "att_mean":
attn = attn_mask.mean(dim=0) # average over heads -> BxTxHxW
attn = nn.Upsample(
size=x.shape[-2:], mode="bilinear", align_corners=False
)(attn)
attn = attn * (~pad_mask).float()[:, :, None, None]
out = (x * attn[:, :, None, :, :]).sum(dim=1)
return out
elif self.mode == "mean":
out = x * (~pad_mask).float()[:, :, None, None, None]
out = out.sum(dim=1) / (~pad_mask).sum(dim=1)[:, None, None, None]
return out
else:
if self.mode == "att_group":
n_heads, b, t, h, w = attn_mask.shape
attn = attn_mask.view(n_heads * b, t, h, w)
if x.shape[-2] > w:
attn = nn.Upsample(
size=x.shape[-2:], mode="bilinear", align_corners=False
)(attn)
else:
attn = nn.AvgPool2d(kernel_size=w // x.shape[-2])(attn)
attn = attn.view(n_heads, b, t, *x.shape[-2:])
out = torch.stack(x.chunk(n_heads, dim=2)) # hxBxTxC/hxHxW
out = attn[:, :, :, None, :, :] * out
out = out.sum(dim=2) # sum on temporal dim -> hxBxC/hxHxW
out = torch.cat([group for group in out], dim=1) | |
partially full time and partially part time'] = df2['Males aged 15 years and over by work activity during the reference year - 25% sample data']-df2['Did not work']-df2['Worked']
df2.drop(['Did not work', 'Worked'], axis=1, inplace=True)
df2 = df2[['LocalArea',
'Males aged 15 years and over by work activity during the reference year - 25% sample data',
'Worked full year, full time',
'Worked part year and/or part time',
'Worked partially full time and partially part time']].copy()
df2.set_axis(col_names, axis=1, inplace=True)
df2.insert(1, 'Type', 'male')
# Female
df3['Worked partially full time and partially part time'] = df3['Females aged 15 years and over by work activity during the reference year - 25% sample data']-df3['Did not work']-df3['Worked']
df3.drop(['Did not work', 'Worked'], axis=1, inplace=True)
df3 = df3[['LocalArea',
'Females aged 15 years and over by work activity during the reference year - 25% sample data',
'Worked full year, full time',
'Worked part year and/or part time',
'Worked partially full time and partially part time']].copy()
df3.set_axis(col_names, axis=1, inplace=True)
df3.insert(1, 'Type', 'female')
merged = pd.concat([df3, df2, df1])
merged.sort_values(by=['LocalArea', 'Type'], inplace=True)
census_dict['time_worked'] = merged
merged.to_csv(file_path + '/time_worked.csv')
return census_dict
###############################################################################
def clean_generation_status(census_dict, year, file_path):
col_names = ['LocalArea', 'Total Population 15 years and older',
'1st generation','2nd generation', '3rd generation and over']
if year == 2001:
df = census_dict['Total population 15 years and over by generation status']
df = df.iloc[:, 0:5].copy()
df.set_axis(col_names, axis=1, inplace=True)
elif year == 2006:
df = census_dict['Total population 15 years and older by generation status']
df = df.iloc[:, 0:5].copy()
df.set_axis(col_names, axis=1, inplace=True)
elif year == 2011:
df = pd.read_csv('data/processed/nhs/Generation status.csv', index_col=0)
df = df.loc[df['Type'] == 'Total'].copy().reset_index()
df.drop(['Type', 'index'], axis=1, inplace=True)
df = df[['LocalArea',
'0_Total population in private households by generation status',
'1_First generation', '2_Second generation',
'3_Third generation or more']]
df.set_axis(col_names, axis=1, inplace=True)
elif year == 2016:
df = census_dict['Total - Generation status for the population in private households - 25% sample data']
df = df.iloc[:, 0:5].copy()
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['generation_status'] = df
df.to_csv(file_path + '/generation_status.csv')
return census_dict
###############################################################################
def clean_industry(census_dict, year, file_path):
if year == 2011:
df = pd.read_csv('data/processed/nhs/Industry.csv', index_col=0
).query('Type == "Total"'
).drop(columns='Type')
df = df.rename(columns={
'0_Total labour force population aged 15 years and over by industry - North American Industry Classification System (NAICS) 2007': 'total'})
meta = ['LocalArea', '2_All industries',
'1_Industry - not applicable', 'total']
else:
if year == 2001:
df = census_dict[
'Total labour force 15 years and over by industry - 1997 North American Industry Classification System']
df = df.rename(columns={
'Total labour force 15 years and over by industry - 1997 North American Industry Classification System': 'total'})
meta = ['LocalArea',
'All industries',
'Industry - Not applicable',
'total']
elif year == 2006:
df = census_dict[
'Total labour force 15 years and over by industry - North American Industry Classification System 2002']
df = df.rename(columns={
'Total labour force 15 years and over by industry - North American Industry Classification System 2002': 'total'})
meta = ['LocalArea',
'All industries',
'Industry - Not applicable',
'total']
else:
df = census_dict[
'Total Labour Force population aged 15 years and over by Industry - North American Industry Classification System (NAICS) 2012 - 25% sample data']
df = df.rename(columns={
'Total Labour Force population aged 15 years and over by Industry - North American Industry Classification System (NAICS) 2012 - 25% sample data': 'total'})
meta = ['LocalArea',
'All industry categories',
'Industry - NAICS2012 - not applicable',
'total']
meta_df = df[meta]
industries_df = df.drop(columns=meta)
industries = industries_df.columns
industries = [re.findall(r'^[0-9 -_]* (.*)', i)[0] for i in industries]
industries_df.columns = industries
industries_df = industries_df.loc[:, sorted(industries)]
column_names = ['LocalArea',
'All industries',
'Industry - Not applicable',
'total',
'Accommodation and food services',
'Administrative and support, waste management and remediation services',
'Agriculture, forestry, fishing and hunting',
'Arts, entertainment and recreation', 'Construction',
'Educational services', 'Finance and insurance',
'Health care and social assistance',
'Information and cultural industries',
'Management of companies and enterprises', 'Manufacturing',
'Mining, quarrying, and oil and gas extraction',
'Other services (except public administration)',
'Professional, scientific and technical services',
'Public administration', 'Real estate and rental and leasing',
'Retail trade', 'Transportation and warehousing', 'Utilities',
'Wholesale trade']
df = pd.concat([meta_df, industries_df], axis=1)
df.set_axis(column_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['industry'] = df
df.to_csv(file_path + '/industry.csv')
return census_dict
###############################################################################
def clean_labour_force_status(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'Employed', 'Employment rate',
'In the labour force', 'Not in the labour force',
'Participation rate', 'Unemployed', 'Unemployment rate']
if year == 2011:
df = pd.read_csv('data/processed/nhs/Labour force status.csv', index_col=0)
df = df[['LocalArea', 'Type', '2_Employed', '6_Employment rate',
'1_In the labour force', '4_Not in the labour force',
'5_Participation rate', '3_Unemployed',
'7_Unemployment rate']].copy()
else:
order = [0, 3, 7, 2, 5, 6, 4, 8]
if year == 2001:
total = census_dict['Population - 15 years and over by labour force activity'].iloc[:, order]
male = census_dict['Total - Males 15 years and over'].iloc[:, order]
female = census_dict['Total - Females 15 years and over'].iloc[:, order]
elif year == 2006:
total = census_dict['Total population 15 years and over by labour force activity'].iloc[:, order]
male = census_dict['Males 15 years and over - Labour force activity'].iloc[:, order]
female = census_dict['Females 15 years and over - Labour force activity'].iloc[:, order]
else:
total = census_dict['Total - Population aged 15 years and over by Labour force status - 25% sample data'].iloc[:, order]
male = census_dict['Total - Males aged 15 years and over by Labour force status - 25% sample data'].iloc[:, order]
female = census_dict['Total - Females aged 15 years and over by Labour force status - 25% sample data'].iloc[:, order]
total.insert(1, 'Type', 'Total')
total.columns = col_names
male.insert(1, 'Type', 'Male')
male.columns = col_names
female.insert(1, 'Type', 'Female')
female.columns = col_names
df = pd.concat([total, male, female])
df.set_axis(col_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['labour_force_status'] = df
df.to_csv(file_path + '/labour_force_status.csv')
return census_dict
###############################################################################
def clean_mobility(census_dict, year, file_path):
column_names = ['LocalArea',
'Non-movers 1 yr ago',
'Non-migrants 1 yr ago',
'Migrants 1 yr ago']
if year == 2011:
df = pd.read_csv(
'data/processed/nhs/Mobility.csv', index_col=0
).query('Type == "Total"').iloc[:, [-1, 1, 3, 4]]
else:
if year == 2001:
df = census_dict[
'Total population 1 year and over by mobility status 1 year ago']
elif year == 2006:
df = census_dict[
'Total - Mobility status 1 year ago']
else:
df = census_dict[
'Total - Mobility status 1 year ago - 25% sample data']
df = df.iloc[:, [0, 2, 4, 5]]
df.set_axis(column_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['mobility'] = df
df.to_csv(file_path + '/mobility.csv')
return census_dict
###############################################################################
def clean_transport_mode(census_dict, year, file_path):
column_names = ['LocalArea', 'Type', 'Total',
'car as driver', 'car as passenger',
'public transportation', 'walked',
'bicycle', 'other transportation']
if year == 2011:
df = pd.read_csv('data/processed/nhs/Mode of transportation.csv',
index_col=0).iloc[:, [-1, 0, 1, 2, 3, 4, 5, 6, 7]]
else:
if year == 2016:
male = census_dict['Total - Main mode of commuting for the male employed labour force aged 15 years and over in private households with a usual place of work or no fixed workplace address - 25% sample data']
female = census_dict['Total - Main mode of commuting for the female employed labour force aged 15 years and over in private households with a usual place of work or no fixed workplace address - 25% sample data']
male.insert(1, 'Type', 'Male')
female.insert(1, 'Type', 'Female')
else:
if year == 2001:
male = census_dict['Males with a usual place of work or no fixed workplace address']
else:
male = census_dict['Males with usual place of work or no fixed workplace address']
female = census_dict['Females with usual place of work or no fixed workplace address']
male.insert(1, 'Type', 'Male')
male['Other method'] = male[
'Other method'] + male[
'Taxicab'] + male[
'Motorcycle']
male.drop(columns=[
'Taxicab', 'Motorcycle'], inplace=True)
female.insert(1, 'Type', 'Female')
female['Other method'] = female[
'Other method'] + female[
'Taxicab'] + female[
'Motorcycle']
female.drop(columns=['Taxicab',
'Motorcycle'], inplace=True)
male.columns = column_names
female.columns = column_names
df = pd.concat([male, female])
total = df.groupby(['LocalArea']).sum().reset_index()
total['Type'] = ['Total']*len(total)
df = pd.concat([df, total])
df.set_axis(column_names, axis=1, inplace=True)
df.sort_values(by=['LocalArea'], inplace=True)
census_dict['transport_mode'] = df
df.to_csv(file_path + '/transport_mode.csv')
return census_dict
###############################################################################
def clean_occupation(census_dict, year, file_path):
col_names = ['LocalArea', 'Type', 'All occupations', 'Occupations n/a',
'Management', 'Business and finance',
'Natural and applied sciences', 'Health', | |
# Main program - Version 1
# This is an example of how to use the library turboGen.py
# and cmpspec.py
# GENERATING 1D-2D-3D GAUSSIAN STOCHASTIC FIELD WITH A GIVEN POWER SPECTRUM AS INPUT
"""
Author: <NAME>
Created: 14/05/2020
"""
# ____ _ _ __ _ _ ____ __ ____
# ( __)( \/ ) / _\ ( \/ )( _ \( ) ( __)
# ) _) ) ( / \/ \/ \ ) __// (_/\ ) _)
# (____)(_/\_)\_/\_/\_)(_/(__) \____/(____)
# import library
import numpy as np
import turboGen as tg
import time
import matplotlib.pyplot as plt
import cmpspec
import matplotlib.cm
from mpl_toolkits.mplot3d import Axes3D
# ____ ____ ____ ___ ____ ____ _ _ _ _
# / ___)( _ \( __)/ __)(_ _)( _ \/ )( \( \/ )
# \___ \ ) __/ ) _)( (__ )( ) /) \/ (/ \/ \
# (____/(__) (____)\___) (__) (__\_)\____/\_)(_/
# this is the standard kolmogorov spectrum -5/3
#
class k41:
def evaluate(self, k):
espec = pow(k,-5.0/3.0)
return espec
# __ ____ ____ __ ____ __ ____
# / \ ___( \ ( __)( )( __)( ) ( \
# (_/ /(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (__) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 1-D
# GRID RESOLUTION nx
nx = 64
# DOMAIN DEFINITION
lx = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename1 = inputspec + '_' + str(nx) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = 2.0*np.pi/lx
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 1D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx)
print("Grid Resolution", nx)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_x = tg.gaussian1Dcos(lx, nx, nmodes, wn1, whichspect)
#
t1 = time.time() # final time
computing_time = t1 - t0
#
print("It took me ", computing_time, "to generate the 1D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 1-D FIELD
# verify that the generated velocities fit the spectrum
knyquist1D, wavenumbers1D, tkespec1D = cmpspec.compute1Dspectrum(r_x, lx, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/1D_tkespec_' + filename1 + '.txt', np.transpose([wavenumbers1D, tkespec1D]))
# ____ ____ ____ __ ____ __ ____
# (___ \ ___( \ ( __)( )( __)( ) ( \
# / __/(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 2-D
# GRID RESOLUTION nx, ny
nx = 64
ny = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename2 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 2D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly)
print("Grid Resolution", nx, ny)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xy = tg.gaussian2Dcos(lx, ly, nx, ny, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 2D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist2D, wavenumbers2D, tkespec2D = cmpspec.compute2Dspectrum(r_xy, lx, ly, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/2D_tkespec_' + filename2 + '.txt', np.transpose([wavenumbers2D, tkespec2D]))
# ____ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) (__)(____)\____/(____/
# First case. let's assume 3-D
# GRID RESOLUTION nx, ny, nz
nx = 64
ny = 64
nz = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
lz = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename3 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nz) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 3D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly, lz)
print("Grid Resolution", nx, ny, nz)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
dz = lz/nz
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xyz = tg.gaussian3Dcos(lx, ly, lz, nx, ny, nz, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 3D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist3D, wavenumbers3D, tkespec3D = cmpspec.compute3Dspectrum(r_xyz, lx, ly, lz, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/3D_tkespec_' + filename3 + '.txt', np.transpose([wavenumbers3D, tkespec3D]))
# ____ __ __ ____ ____ ____ ____ _ _ __ ____ ____
# ( _ \( ) / \(_ _) ( _ \( __)/ ___)/ )( \( ) (_ _)/ ___)
# ) __// (_/\( O ) )( ) / ) _) \___ \) \/ (/ (_/\ )( \___ \
# (__) \____/ \__/ (__) (__\_)(____)(____/\____/\____/(__) (____/
# PLOT THE 1D, 2D, 3D FIELD IN REAL DOMAIN AND RELATIVE POWER SPECTRUM
# ---------------------------------------------------------------------
# Plot 1D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X = np.arange(0,lx,dx)
plt.plot(X,r_x, 'k-', label='computed')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel(r'$ \rho(x) $')
plt.legend()
plt.grid()
fig.savefig(pathfolder + '/1D_field_' + filename1 + '.pdf')
# Plot 2D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xy, cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/2D_field_' + filename2 + '.pdf')
plt.show()
# Plot 3D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# X, Y, Z = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy),np.arange(0,lz,dz))
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xyz[:,:,1], cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/3D_field_slice_' + filename3 + '.pdf')
plt.show()
# --------------------------------------------------------------
# PLOT NUMERICAL AND THEORICAL POWER SPECTRUM
# Plot in log-log
# --------------------------------------------------------------
# PLOT 1-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers1D[1:6], tkespec1D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers1D[5:], tkespec1D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist1D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/1D_tkespec_' + filename1 + '.pdf')
plt.show()
# PLOT 2-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers2D[1:6], tkespec2D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers2D[5:], tkespec2D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist2D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/2D_tkespec_' + filename2 + '.pdf')
plt.show()
# PLOT 3-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers3D[1:6], tkespec3D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers3D[5:], tkespec3D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist3D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/3D_tkespec_' + filename3 + '.pdf')
plt.show()
# ____ ____ ____ __ __ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( _ \( ) / \(_ _) ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) __// (_/\( O ) )( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) \____/ \__/ (__) (__) (__)(____)\____/(____/
# plt.rc("font", size=10, family='serif')
# fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
# cset = [[],[],[]]
# # this is the example that worked for you:
# Z = r_xyz[0,:,:]
# cset[0] = ax.contourf(Z, X, Y, zdir = 'x', offset = , cmap = matplotlib.cm.get_cmap('plasma'))
# # cset[0] = ax.contourf(X, Y, Z, zdir = 'y', offset | |
'''
bounds = np.asanyarray(bounds, dtype=np.float64)
if len(bounds) != 2:
raise ValueError('bounds must be (2, dimension!')
count = np.asanyarray(count, dtype=np.int)
if count.shape == ():
count = np.tile(count, bounds.shape[1])
grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)]
grid = np.vstack(np.meshgrid(*grid_elements)
).reshape(bounds.shape[1], -1).T
return grid
def replace_references(data, reference_dict):
# Replace references in place
view = np.array(data).view().reshape((-1))
for i, value in enumerate(view):
if value in reference_dict:
view[i] = reference_dict[value]
return view
def multi_dict(pairs):
'''
Given a set of key value pairs, create a dictionary.
If a key occurs multiple times, stack the values into an array.
Can be called like the regular dict(pairs) constructor
Parameters
----------
pairs: (n,2) array of key, value pairs
Returns
----------
result: dict, with all values stored (rather than last with regular dict)
'''
result = collections.defaultdict(list)
for k, v in pairs:
result[k].append(v)
return result
def tolist_dict(data):
def tolist(item):
if hasattr(item, 'tolist'):
return item.tolist()
else:
return item
result = {k: tolist(v) for k, v in data.items()}
return result
def is_binary_file(file_obj):
'''
Returns True if file has non-ASCII characters (> 0x7F, or 127)
Should work in both Python 2 and 3
'''
start = file_obj.tell()
fbytes = file_obj.read(1024)
file_obj.seek(start)
is_str = isinstance(fbytes, str)
for fbyte in fbytes:
if is_str:
code = ord(fbyte)
else:
code = fbyte
if code > 127:
return True
return False
def distance_to_end(file_obj):
'''
For an open file object how far is it to the end
Parameters
----------
file_obj: open file- like object
Returns
----------
distance: int, bytes to end of file
'''
position_current = file_obj.tell()
file_obj.seek(0, 2)
position_end = file_obj.tell()
file_obj.seek(position_current)
distance = position_end - position_current
return distance
def decimal_to_digits(decimal, min_digits=None):
'''
Return the number of digits to the first nonzero decimal.
Parameters
-----------
decimal: float
min_digits: int, minumum number of digits to return
Returns
-----------
digits: int, number of digits to the first nonzero decimal
'''
digits = abs(int(np.log10(decimal)))
if min_digits is not None:
digits = np.clip(digits, min_digits, 20)
return digits
def hash_file(file_obj,
hash_function=hashlib.md5):
'''
Get the hash of an open file- like object.
Parameters
---------
file_obj: file like object
hash_function: function to use to hash data
Returns
---------
hashed: str, hex version of result
'''
# before we read the file data save the current position
# in the file (which is probably 0)
file_position = file_obj.tell()
# create an instance of the hash object
hasher = hash_function()
# read all data from the file into the hasher
hasher.update(file_obj.read())
# get a hex version of the result
hashed = hasher.hexdigest()
# return the file object to its original position
file_obj.seek(file_position)
return hashed
def md5_object(obj):
'''
If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash
'''
hasher = hashlib.md5()
hasher.update(obj)
md5 = hasher.hexdigest()
return md5
def md5_array(array, digits=5):
'''
Take the MD5 of an array when considering the specified number of digits.
Parameters
---------
array: numpy array
digits: int, number of digits to account for in the MD5
Returns
---------
md5: str, md5 hash of input
'''
digits = int(digits)
array = np.asanyarray(array, dtype=np.float64).reshape(-1)
as_int = (array * 10 ** digits).astype(np.int64)
md5 = md5_object(as_int.tostring(order='C'))
return md5
def attach_to_log(log_level=logging.DEBUG,
handler=None,
blacklist=['TerminalIPythonApp', 'PYREADLINE']):
'''
Attach a stream handler to all loggers.
'''
try:
from colorlog import ColoredFormatter
formatter = ColoredFormatter(
("%(log_color)s%(levelname)-8s%(reset)s " +
"%(filename)17s:%(lineno)-4s %(blue)4s%(message)s"),
datefmt=None,
reset=True,
log_colors={'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red'})
except ImportError:
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)-7s (%(filename)s:%(lineno)3s) %(message)s",
"%Y-%m-%d %H:%M:%S")
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(log_level)
for logger in logging.Logger.manager.loggerDict.values():
if (logger.__class__.__name__ != 'Logger' or
logger.name in blacklist):
continue
logger.addHandler(handler)
logger.setLevel(log_level)
np.set_printoptions(precision=5, suppress=True)
def tracked_array(array, dtype=None):
'''
Properly subclass a numpy ndarray to track changes.
'''
tracked = np.ascontiguousarray(array,
dtype=dtype).view(TrackedArray)
assert tracked.flags['C_CONTIGUOUS']
return tracked
class TrackedArray(np.ndarray):
'''
Track changes in a numpy ndarray.
Methods
----------
md5: returns hexadecimal string of md5 of array
crc: returns int zlib.adler32 checksum of array
'''
def __array_finalize__(self, obj):
'''
Sets a modified flag on every TrackedArray
This flag will be set on every change, as well as during copies
and certain types of slicing.
'''
self._modified = True
if isinstance(obj, type(self)):
obj._modified = True
def md5(self):
'''
Return an MD5 hash of the current array in hexadecimal string form.
This is quite fast; on a modern i7 desktop a (1000000,3) floating point
array was hashed reliably in .03 seconds.
This is only recomputed if a modified flag is set which may have false
positives (forcing an unnecessary recompute) but will not have false
negatives which would return an incorrect hash.
'''
if self._modified or not hasattr(self, '_hashed_md5'):
if self.flags['C_CONTIGUOUS']:
self._hashed_md5 = md5_object(self)
else:
# the case where we have sliced our nice
# contiguous array into a non- contiguous block
# for example (note slice *after* track operation):
# t = util.tracked_array(np.random.random(10))[::-1]
contiguous = np.ascontiguousarray(self)
self._hashed_md5 = md5_object(contiguous)
self._modified = False
return self._hashed_md5
def crc(self):
'''
Return a zlib adler32 checksum of the current data.
'''
if self._modified or not hasattr(self, '_hashed_crc'):
if self.flags['C_CONTIGUOUS']:
self._hashed_crc = zlib.adler32(self) & 0xffffffff
else:
# the case where we have sliced our nice
# contiguous array into a non- contiguous block
# for example (note slice *after* track operation):
# t = util.tracked_array(np.random.random(10))[::-1]
contiguous = np.ascontiguousarray(self)
self._hashed_crc = zlib.adler32(contiguous) & 0xffffffff
self._modified = False
return self._hashed_crc
def __hash__(self):
'''
Hash is required to return an int, so we convert the hex string to int.
'''
return int(self.md5(), 16)
def __iadd__(self, other):
self._modified = True
return super(self.__class__, self).__iadd__(other)
def __isub__(self, other):
self._modified = True
return super(self.__class__, self).__isub__(other)
def __imul__(self, other):
self._modified = True
return super(self.__class__, self).__imul__(other)
def __ipow__(self, other):
self._modified = True
return super(self.__class__, self).__ipow__(other)
def __imod__(self, other):
self._modified = True
return super(self.__class__, self).__imod__(other)
def __ifloordiv__(self, other):
self._modified = True
return super(self.__class__, self).__ifloordiv__(other)
def __ilshift__(self, other):
self._modified = True
return super(self.__class__, self).__ilshift__(other)
def __irshift__(self, other):
self._modified = True
return super(self.__class__, self).__irshift__(other)
def __iand__(self, other):
self._modified = True
return super(self.__class__, self).__iand__(other)
def __ixor__(self, other):
self._modified = True
return super(self.__class__, self).__ixor__(other)
def __ior__(self, other):
self._modified = True
return super(self.__class__, self).__ior__(other)
def __setitem__(self, i, y):
self._modified = True
super(self.__class__, self).__setitem__(i, y)
def __setslice__(self, i, j, y):
self._modified = True
super(self.__class__, self).__setslice__(i, j, y)
def cache_decorator(function):
@wraps(function)
def get_cached(*args, **kwargs):
self = args[0]
name = function.__name__
if not (name in self._cache):
tic = time.time()
self._cache[name] = function(*args, **kwargs)
toc = time.time()
log.debug('%s was not in cache, executed in %.6f',
name,
toc - tic)
return self._cache[name]
return property(get_cached)
class Cache:
'''
Class to cache values until an id function changes.
'''
def __init__(self, id_function=None):
if id_function is None:
self._id_function = lambda: None
else:
self._id_function = id_function
self.id_current = self._id_function()
self._lock = 0
self.cache = {}
def get(self, key):
'''
Get a key from the cache.
If the key is unavailable or the cache has been invalidated returns None.
'''
self.verify()
if key in self.cache:
return self.cache[key]
return None
def delete(self, key):
'''
Remove a key from the cache.
'''
if key in self.cache:
self.cache.pop(key, None)
def verify(self):
'''
Verify that the cached values are still for the same value of id_function,
and delete all stored items if the value of id_function has changed.
'''
id_new = self._id_function()
if (self._lock == 0) and (id_new != self.id_current):
if len(self.cache) > 0:
log.debug('%d items cleared from cache: %s',
len(self.cache),
str(list(self.cache.keys())))
self.clear()
self.id_set()
def clear(self, exclude=None):
'''
Remove all elements in the cache.
'''
if exclude is None:
self.cache = {}
else:
self.cache = {k: v for k, v in self.cache.items() if k in exclude}
def update(self, items):
'''
Update the cache with a set of key, value pairs without checking id_function.
'''
self.cache.update(items)
self.id_set()
def id_set(self):
self.id_current = self._id_function()
def set(self, key, value):
self.verify()
self.cache[key] = value
return value
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __contains__(self, key):
self.verify()
return key in self.cache
def __len__(self):
self.verify()
return len(self.cache)
def __enter__(self):
self._lock += 1
def __exit__(self, *args):
self._lock -= | |
<gh_stars>1-10
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_GraphLib_c')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_GraphLib_c')
_GraphLib_c = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_GraphLib_c', [dirname(__file__)])
except ImportError:
import _GraphLib_c
return _GraphLib_c
try:
_mod = imp.load_module('_GraphLib_c', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_GraphLib_c = swig_import_helper()
del swig_import_helper
else:
import _GraphLib_c
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _GraphLib_c.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _GraphLib_c.SwigPyIterator_value(self)
def incr(self, n=1):
return _GraphLib_c.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _GraphLib_c.SwigPyIterator_decr(self, n)
def distance(self, x):
return _GraphLib_c.SwigPyIterator_distance(self, x)
def equal(self, x):
return _GraphLib_c.SwigPyIterator_equal(self, x)
def copy(self):
return _GraphLib_c.SwigPyIterator_copy(self)
def next(self):
return _GraphLib_c.SwigPyIterator_next(self)
def __next__(self):
return _GraphLib_c.SwigPyIterator___next__(self)
def previous(self):
return _GraphLib_c.SwigPyIterator_previous(self)
def advance(self, n):
return _GraphLib_c.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _GraphLib_c.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _GraphLib_c.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _GraphLib_c.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _GraphLib_c.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _GraphLib_c.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _GraphLib_c.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _GraphLib_c.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def iterator(self):
return _GraphLib_c.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _GraphLib_c.IntVector___nonzero__(self)
def __bool__(self):
return _GraphLib_c.IntVector___bool__(self)
def __len__(self):
return _GraphLib_c.IntVector___len__(self)
def __getslice__(self, i, j):
return _GraphLib_c.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _GraphLib_c.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _GraphLib_c.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _GraphLib_c.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _GraphLib_c.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _GraphLib_c.IntVector___setitem__(self, *args)
def pop(self):
return _GraphLib_c.IntVector_pop(self)
def append(self, x):
return _GraphLib_c.IntVector_append(self, x)
def empty(self):
return _GraphLib_c.IntVector_empty(self)
def size(self):
return _GraphLib_c.IntVector_size(self)
def swap(self, v):
return _GraphLib_c.IntVector_swap(self, v)
def begin(self):
return _GraphLib_c.IntVector_begin(self)
def end(self):
return _GraphLib_c.IntVector_end(self)
def rbegin(self):
return _GraphLib_c.IntVector_rbegin(self)
def rend(self):
return _GraphLib_c.IntVector_rend(self)
def clear(self):
return _GraphLib_c.IntVector_clear(self)
def get_allocator(self):
return _GraphLib_c.IntVector_get_allocator(self)
def pop_back(self):
return _GraphLib_c.IntVector_pop_back(self)
def erase(self, *args):
return _GraphLib_c.IntVector_erase(self, *args)
def __init__(self, *args):
this = _GraphLib_c.new_IntVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _GraphLib_c.IntVector_push_back(self, x)
def front(self):
return _GraphLib_c.IntVector_front(self)
def back(self):
return _GraphLib_c.IntVector_back(self)
def assign(self, n, x):
return _GraphLib_c.IntVector_assign(self, n, x)
def resize(self, *args):
return _GraphLib_c.IntVector_resize(self, *args)
def insert(self, *args):
return _GraphLib_c.IntVector_insert(self, *args)
def reserve(self, n):
return _GraphLib_c.IntVector_reserve(self, n)
def capacity(self):
return _GraphLib_c.IntVector_capacity(self)
__swig_destroy__ = _GraphLib_c.delete_IntVector
__del__ = lambda self: None
IntVector_swigregister = _GraphLib_c.IntVector_swigregister
IntVector_swigregister(IntVector)
class SizeVectorVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SizeVectorVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SizeVectorVector, name)
__repr__ = _swig_repr
def iterator(self):
return _GraphLib_c.SizeVectorVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _GraphLib_c.SizeVectorVector___nonzero__(self)
def __bool__(self):
return _GraphLib_c.SizeVectorVector___bool__(self)
def __len__(self):
return _GraphLib_c.SizeVectorVector___len__(self)
def __getslice__(self, i, j):
return _GraphLib_c.SizeVectorVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _GraphLib_c.SizeVectorVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _GraphLib_c.SizeVectorVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _GraphLib_c.SizeVectorVector___delitem__(self, *args)
def __getitem__(self, *args):
return _GraphLib_c.SizeVectorVector___getitem__(self, *args)
def __setitem__(self, *args):
return _GraphLib_c.SizeVectorVector___setitem__(self, *args)
def pop(self):
return _GraphLib_c.SizeVectorVector_pop(self)
def append(self, x):
return _GraphLib_c.SizeVectorVector_append(self, x)
def empty(self):
return _GraphLib_c.SizeVectorVector_empty(self)
def size(self):
return _GraphLib_c.SizeVectorVector_size(self)
def swap(self, v):
return _GraphLib_c.SizeVectorVector_swap(self, v)
def begin(self):
return _GraphLib_c.SizeVectorVector_begin(self)
def end(self):
return _GraphLib_c.SizeVectorVector_end(self)
def rbegin(self):
return _GraphLib_c.SizeVectorVector_rbegin(self)
def rend(self):
return _GraphLib_c.SizeVectorVector_rend(self)
def clear(self):
return _GraphLib_c.SizeVectorVector_clear(self)
def get_allocator(self):
return _GraphLib_c.SizeVectorVector_get_allocator(self)
def pop_back(self):
return _GraphLib_c.SizeVectorVector_pop_back(self)
def erase(self, *args):
return _GraphLib_c.SizeVectorVector_erase(self, *args)
def __init__(self, *args):
this = _GraphLib_c.new_SizeVectorVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _GraphLib_c.SizeVectorVector_push_back(self, x)
def front(self):
return _GraphLib_c.SizeVectorVector_front(self)
def back(self):
return _GraphLib_c.SizeVectorVector_back(self)
def assign(self, n, x):
return _GraphLib_c.SizeVectorVector_assign(self, n, x)
def resize(self, *args):
return _GraphLib_c.SizeVectorVector_resize(self, *args)
def insert(self, *args):
return _GraphLib_c.SizeVectorVector_insert(self, *args)
def reserve(self, n):
return _GraphLib_c.SizeVectorVector_reserve(self, n)
def capacity(self):
return _GraphLib_c.SizeVectorVector_capacity(self)
__swig_destroy__ = _GraphLib_c.delete_SizeVectorVector
__del__ = lambda self: None
SizeVectorVector_swigregister = _GraphLib_c.SizeVectorVector_swigregister
SizeVectorVector_swigregister(SizeVectorVector)
class IntVectorVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVectorVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVectorVector, name)
__repr__ = _swig_repr
def iterator(self):
return _GraphLib_c.IntVectorVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _GraphLib_c.IntVectorVector___nonzero__(self)
def __bool__(self):
return _GraphLib_c.IntVectorVector___bool__(self)
def __len__(self):
return _GraphLib_c.IntVectorVector___len__(self)
def __getslice__(self, i, j):
return _GraphLib_c.IntVectorVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _GraphLib_c.IntVectorVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _GraphLib_c.IntVectorVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _GraphLib_c.IntVectorVector___delitem__(self, *args)
def __getitem__(self, *args):
return _GraphLib_c.IntVectorVector___getitem__(self, *args)
def __setitem__(self, *args):
return _GraphLib_c.IntVectorVector___setitem__(self, *args)
def pop(self):
return _GraphLib_c.IntVectorVector_pop(self)
def append(self, x):
return _GraphLib_c.IntVectorVector_append(self, x)
def empty(self):
return _GraphLib_c.IntVectorVector_empty(self)
def size(self):
return _GraphLib_c.IntVectorVector_size(self)
def swap(self, v):
return _GraphLib_c.IntVectorVector_swap(self, v)
def begin(self):
return _GraphLib_c.IntVectorVector_begin(self)
def end(self):
return _GraphLib_c.IntVectorVector_end(self)
def rbegin(self):
return _GraphLib_c.IntVectorVector_rbegin(self)
def rend(self):
return _GraphLib_c.IntVectorVector_rend(self)
def clear(self):
return _GraphLib_c.IntVectorVector_clear(self)
def get_allocator(self):
return _GraphLib_c.IntVectorVector_get_allocator(self)
def pop_back(self):
return _GraphLib_c.IntVectorVector_pop_back(self)
def erase(self, *args):
return _GraphLib_c.IntVectorVector_erase(self, *args)
def __init__(self, *args):
this = _GraphLib_c.new_IntVectorVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x):
return _GraphLib_c.IntVectorVector_push_back(self, x)
def front(self):
return _GraphLib_c.IntVectorVector_front(self)
def back(self):
return _GraphLib_c.IntVectorVector_back(self)
def assign(self, n, x):
return _GraphLib_c.IntVectorVector_assign(self, n, x)
def resize(self, *args):
return _GraphLib_c.IntVectorVector_resize(self, *args)
def insert(self, *args):
return _GraphLib_c.IntVectorVector_insert(self, *args)
def reserve(self, n):
return _GraphLib_c.IntVectorVector_reserve(self, n)
def capacity(self):
return _GraphLib_c.IntVectorVector_capacity(self)
__swig_destroy__ = _GraphLib_c.delete_IntVectorVector
__del__ = lambda self: None
IntVectorVector_swigregister = _GraphLib_c.IntVectorVector_swigregister
IntVectorVector_swigregister(IntVectorVector)
class SizePair(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SizePair, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SizePair, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _GraphLib_c.new_SizePair(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_setmethods__["first"] = _GraphLib_c.SizePair_first_set
__swig_getmethods__["first"] = _GraphLib_c.SizePair_first_get
if _newclass:
first = _swig_property(_GraphLib_c.SizePair_first_get, _GraphLib_c.SizePair_first_set)
__swig_setmethods__["second"] = _GraphLib_c.SizePair_second_set
__swig_getmethods__["second"] = _GraphLib_c.SizePair_second_get
if _newclass:
second = _swig_property(_GraphLib_c.SizePair_second_get, _GraphLib_c.SizePair_second_set)
def __len__(self):
return 2
def __repr__(self):
return str((self.first, self.second))
def __getitem__(self, index):
if not (index % 2):
return self.first
else:
return self.second
def __setitem__(self, index, val):
if not (index % 2):
self.first = val
else:
self.second = val
__swig_destroy__ = _GraphLib_c.delete_SizePair
__del__ = lambda self: None
SizePair_swigregister = _GraphLib_c.SizePair_swigregister
SizePair_swigregister(SizePair)
class SizeVectorPair(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SizeVectorPair, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SizeVectorPair, name)
__repr__ = _swig_repr
def iterator(self):
return _GraphLib_c.SizeVectorPair_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _GraphLib_c.SizeVectorPair___nonzero__(self)
def __bool__(self):
return _GraphLib_c.SizeVectorPair___bool__(self)
def __len__(self):
return _GraphLib_c.SizeVectorPair___len__(self)
def __getslice__(self, i, j):
return _GraphLib_c.SizeVectorPair___getslice__(self, i, j)
def __setslice__(self, *args):
return _GraphLib_c.SizeVectorPair___setslice__(self, *args)
def __delslice__(self, i, j):
return _GraphLib_c.SizeVectorPair___delslice__(self, i, j)
def __delitem__(self, *args):
return _GraphLib_c.SizeVectorPair___delitem__(self, *args)
def __getitem__(self, *args):
return _GraphLib_c.SizeVectorPair___getitem__(self, *args)
def __setitem__(self, *args):
return _GraphLib_c.SizeVectorPair___setitem__(self, *args)
def pop(self):
return _GraphLib_c.SizeVectorPair_pop(self)
def append(self, x):
return _GraphLib_c.SizeVectorPair_append(self, x)
def empty(self):
return _GraphLib_c.SizeVectorPair_empty(self)
def size(self):
return _GraphLib_c.SizeVectorPair_size(self)
def swap(self, v):
return _GraphLib_c.SizeVectorPair_swap(self, v)
def begin(self):
return _GraphLib_c.SizeVectorPair_begin(self)
def end(self):
return _GraphLib_c.SizeVectorPair_end(self)
def rbegin(self):
return _GraphLib_c.SizeVectorPair_rbegin(self)
def rend(self):
| |
ret["changes"] = {"kubernetes.secret": {"new": "absent", "old": "present"}}
ret["comment"] = "Secret deleted"
return ret
def secret_present(
name, namespace="default", data=None, source=None, template=None, **kwargs
):
"""
Ensures that the named secret is present inside of the specified namespace
with the given data.
If the secret exists it will be replaced.
name
The name of the secret.
namespace
The namespace holding the secret. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the secrets.
source
A file containing the data of the secret in plain format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if data and source:
return _error(ret, "'source' cannot be used in combination with 'data'")
secret = __salt__["kubernetes.show_secret"](name, namespace, **kwargs)
if secret is None:
if data is None:
data = {}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The secret is going to be created"
return ret
res = __salt__["kubernetes.create_secret"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The secret is going to be replaced"
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the service")
ret["comment"] = "The secret is already present. Forcing recreation"
res = __salt__["kubernetes.replace_secret"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"] = {
# Omit values from the return. They are unencrypted
# and can contain sensitive data.
"data": list(res["data"])
}
ret["result"] = True
return ret
def configmap_absent(name, namespace="default", **kwargs):
"""
Ensures that the named configmap is absent from the given namespace.
name
The name of the configmap
namespace
The namespace holding the configmap. The 'default' one is going to be
used unless a different one is specified.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
configmap = __salt__["kubernetes.show_configmap"](name, namespace, **kwargs)
if configmap is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The configmap does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The configmap is going to be deleted"
ret["result"] = None
return ret
__salt__["kubernetes.delete_configmap"](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a configmap
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret["result"] = True
ret["changes"] = {"kubernetes.configmap": {"new": "absent", "old": "present"}}
ret["comment"] = "ConfigMap deleted"
return ret
def configmap_present(
name, namespace="default", data=None, source=None, template=None, **kwargs
):
"""
Ensures that the named configmap is present inside of the specified namespace
with the given data.
If the configmap exists it will be replaced.
name
The name of the configmap.
namespace
The namespace holding the configmap. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the configmaps.
source
A file containing the data of the configmap in plain format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if data and source:
return _error(ret, "'source' cannot be used in combination with 'data'")
elif data is None:
data = {}
configmap = __salt__["kubernetes.show_configmap"](name, namespace, **kwargs)
if configmap is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The configmap is going to be created"
return ret
res = __salt__["kubernetes.create_configmap"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The configmap is going to be replaced"
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the service")
ret["comment"] = "The configmap is already present. Forcing recreation"
res = __salt__["kubernetes.replace_configmap"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"] = {"data": res["data"]}
ret["result"] = True
return ret
def pod_absent(name, namespace="default", **kwargs):
"""
Ensures that the named pod is absent from the given namespace.
name
The name of the pod
namespace
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
pod = __salt__["kubernetes.show_pod"](name, namespace, **kwargs)
if pod is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The pod does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The pod is going to be deleted"
ret["result"] = None
return ret
res = __salt__["kubernetes.delete_pod"](name, namespace, **kwargs)
if res["code"] == 200 or res["code"] is None:
ret["result"] = True
ret["changes"] = {"kubernetes.pod": {"new": "absent", "old": "present"}}
if res["code"] is None:
ret["comment"] = "In progress"
else:
ret["comment"] = res["message"]
else:
ret["comment"] = "Something went wrong, response: {}".format(res)
return ret
def pod_present(
name,
namespace="default",
metadata=None,
spec=None,
source="",
template="",
**kwargs
):
"""
Ensures that the named pod is present inside of the specified
namespace with the given metadata and spec.
If the pod exists it will be replaced.
name
The name of the pod.
namespace
The namespace holding the pod. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the pod object.
spec
The spec of the pod object.
source
A file containing the definition of the pod (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if (metadata or spec) and source:
return _error(
ret, "'source' cannot be used in combination with 'metadata' or 'spec'"
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
pod = __salt__["kubernetes.show_pod"](name, namespace, **kwargs)
if pod is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The pod is going to be created"
return ret
res = __salt__["kubernetes.create_pod"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
return ret
# TODO: fix replace_namespaced_pod validation issues
ret["comment"] = (
"salt is currently unable to replace a pod without "
"deleting it. Please perform the removal of the pod requiring "
"the 'pod_absent' state if this is the desired behaviour."
)
ret["result"] = False
return ret
ret["changes"] = {"metadata": metadata, "spec": spec}
ret["result"] = True
return ret
def node_label_absent(name, node, **kwargs):
"""
Ensures that the named label is absent from the node.
name
The name of the label
node
The name of the node
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels = __salt__["kubernetes.node_labels"](node, **kwargs)
if name not in labels:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The label does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The label is going to be deleted"
ret["result"] = None
return ret
__salt__["kubernetes.node_remove_label"](node_name=node, label_name=name, **kwargs)
ret["result"] = True
ret["changes"] = {"kubernetes.node_label": {"new": "absent", "old": "present"}}
ret["comment"] = "Label removed from node"
return ret
def node_label_folder_absent(name, node, **kwargs):
"""
Ensures the label folder doesn't exist on the specified node.
name
The name of label folder
node
The name of the node
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels = __salt__["kubernetes.node_labels"](node, **kwargs)
folder = name.strip("/") + "/"
labels_to_drop = []
new_labels = []
for label in labels:
if label.startswith(folder):
labels_to_drop.append(label)
else:
new_labels.append(label)
if not labels_to_drop:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The label folder does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The label folder is going to be deleted"
ret["result"] = None
return ret
for label in labels_to_drop:
__salt__["kubernetes.node_remove_label"](
node_name=node, label_name=label, **kwargs
)
ret["result"] = True
ret["changes"] = {
"kubernetes.node_label_folder_absent": {"old": list(labels), "new": new_labels}
}
ret["comment"] = "Label folder removed from node"
return ret
def node_label_present(name, node, value, **kwargs):
"""
Ensures that the named label is set on the named node
with the given value.
If the label exists it will be replaced.
name
The name of the label.
value
Value of the label.
node
Node to change.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels | |
= _messages.StringField(12)
options = _messages.MessageField('BuildOptions', 13)
projectId = _messages.StringField(14)
queueTtl = _messages.StringField(15)
results = _messages.MessageField('Results', 16)
secrets = _messages.MessageField('Secret', 17, repeated=True)
serviceAccount = _messages.StringField(18)
source = _messages.MessageField('Source', 19)
sourceProvenance = _messages.MessageField('SourceProvenance', 20)
startTime = _messages.StringField(21)
status = _messages.EnumField('StatusValueValuesEnum', 22)
statusDetail = _messages.StringField(23)
steps = _messages.MessageField('BuildStep', 24, repeated=True)
substitutions = _messages.MessageField('SubstitutionsValue', 25)
tags = _messages.StringField(26, repeated=True)
timeout = _messages.StringField(27)
timing = _messages.MessageField('TimingValue', 28)
warnings = _messages.MessageField('Warning', 29, repeated=True)
class BuildApproval(_messages.Message):
r"""BuildApproval describes a build's approval configuration, state, and
result.
Enums:
StateValueValuesEnum: Output only. The state of this build's approval.
Fields:
config: Output only. Configuration for manual approval of this build.
result: Output only. Result of manual approval for this Build.
state: Output only. The state of this build's approval.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The state of this build's approval.
Values:
STATE_UNSPECIFIED: Default enum type. This should not be used.
PENDING: Build approval is pending.
APPROVED: Build approval has been approved.
REJECTED: Build approval has been rejected.
CANCELLED: Build was cancelled while it was still pending approval.
"""
STATE_UNSPECIFIED = 0
PENDING = 1
APPROVED = 2
REJECTED = 3
CANCELLED = 4
config = _messages.MessageField('ApprovalConfig', 1)
result = _messages.MessageField('ApprovalResult', 2)
state = _messages.EnumField('StateValueValuesEnum', 3)
class BuildOperationMetadata(_messages.Message):
r"""Metadata for build operations.
Fields:
build: The build that the operation is tracking.
"""
build = _messages.MessageField('Build', 1)
class BuildOptions(_messages.Message):
r"""Optional arguments to enable specific features of builds.
Enums:
DockerDaemonValueValuesEnum: Optional. Option to specify how (or if) a
Docker daemon is provided for the build.
LogStreamingOptionValueValuesEnum: Option to define build log streaming
behavior to Google Cloud Storage.
LoggingValueValuesEnum: Option to specify the logging mode, which
determines if and where build logs are stored.
MachineTypeValueValuesEnum: Compute Engine machine type on which to run
the build.
RequestedVerifyOptionValueValuesEnum: Requested verifiability options.
SourceProvenanceHashValueListEntryValuesEnum:
SubstitutionOptionValueValuesEnum: Option to specify behavior when there
is an error in the substitution checks. NOTE: this is always set to
ALLOW_LOOSE for triggered builds and cannot be overridden in the build
configuration file.
Fields:
anthosCluster: Details about how this build should be executed on a Anthos
cluster.
cluster: Details about how this build should be executed on a GKE cluster.
diskSizeGb: Requested disk size for the VM that runs the build. Note that
this is *NOT* "disk free"; some of the space will be used by the
operating system and build utilities. Also note that this is the minimum
disk size that will be allocated for the build -- the build may run with
a larger disk than requested. At present, the maximum disk size is
1000GB; builds that request more than the maximum are rejected with an
error.
dockerDaemon: Optional. Option to specify how (or if) a Docker daemon is
provided for the build.
dynamicSubstitutions: Option to specify whether or not to apply bash style
string operations to the substitutions. NOTE: this is always enabled for
triggered builds and cannot be overridden in the build configuration
file.
env: A list of global environment variable definitions that will exist for
all build steps in this build. If a variable is defined in both globally
and in a build step, the variable will use the build step value. The
elements are of the form "KEY=VALUE" for the environment variable "KEY"
being given the value "VALUE".
logStreamingOption: Option to define build log streaming behavior to
Google Cloud Storage.
logging: Option to specify the logging mode, which determines if and where
build logs are stored.
machineType: Compute Engine machine type on which to run the build.
pool: Optional. Specification for execution on a `WorkerPool`. See
[running builds in a private
pool](https://cloud.google.com/build/docs/private-pools/run-builds-in-
private-pool) for more information.
requestedVerifyOption: Requested verifiability options.
secretEnv: A list of global environment variables, which are encrypted
using a Cloud Key Management Service crypto key. These values must be
specified in the build's `Secret`. These variables will be available to
all build steps in this build.
sourceProvenanceHash: Requested hash for SourceProvenance.
substitutionOption: Option to specify behavior when there is an error in
the substitution checks. NOTE: this is always set to ALLOW_LOOSE for
triggered builds and cannot be overridden in the build configuration
file.
volumes: Global list of volumes to mount for ALL build steps Each volume
is created as an empty volume prior to starting the build process. Upon
completion of the build, volumes and their contents are discarded.
Global volume names and paths cannot conflict with the volumes defined a
build step. Using a global volume in a build with only one step is not
valid as it is indicative of a build request with an incorrect
configuration.
workerPool: This field deprecated; please use `pool.name` instead.
"""
class DockerDaemonValueValuesEnum(_messages.Enum):
r"""Optional. Option to specify how (or if) a Docker daemon is provided
for the build.
Values:
DOCKER_DAEMON_UNSPECIFIED: If the option is unspecified, a default will
be set based on the environment.
NO_DOCKER: No Docker daemon or functionality will be provided to the
build.
NON_PRIVILEGED: A Docker daemon is available during the build that is
running without privileged mode.
PRIVILEGED: A Docker daemon will be available that is running in
privileged mode. This is potentially a security vulnerability and
should only be used if the user is fully aware of the associated
risks.
"""
DOCKER_DAEMON_UNSPECIFIED = 0
NO_DOCKER = 1
NON_PRIVILEGED = 2
PRIVILEGED = 3
class LogStreamingOptionValueValuesEnum(_messages.Enum):
r"""Option to define build log streaming behavior to Google Cloud Storage.
Values:
STREAM_DEFAULT: Service may automatically determine build log streaming
behavior.
STREAM_ON: Build logs should be streamed to Google Cloud Storage.
STREAM_OFF: Build logs should not be streamed to Google Cloud Storage;
they will be written when the build is completed.
"""
STREAM_DEFAULT = 0
STREAM_ON = 1
STREAM_OFF = 2
class LoggingValueValuesEnum(_messages.Enum):
r"""Option to specify the logging mode, which determines if and where
build logs are stored.
Values:
LOGGING_UNSPECIFIED: The service determines the logging mode. The
default is `LEGACY`. Do not rely on the default logging behavior as it
may change in the future.
LEGACY: Build logs are stored in Cloud Logging and Cloud Storage.
GCS_ONLY: Build logs are stored in Cloud Storage.
STACKDRIVER_ONLY: This option is the same as CLOUD_LOGGING_ONLY.
CLOUD_LOGGING_ONLY: Build logs are stored in Cloud Logging. Selecting
this option will not allow [logs
streaming](https://cloud.google.com/sdk/gcloud/reference/builds/log).
NONE: Turn off all logging. No build logs will be captured.
"""
LOGGING_UNSPECIFIED = 0
LEGACY = 1
GCS_ONLY = 2
STACKDRIVER_ONLY = 3
CLOUD_LOGGING_ONLY = 4
NONE = 5
class MachineTypeValueValuesEnum(_messages.Enum):
r"""Compute Engine machine type on which to run the build.
Values:
UNSPECIFIED: Standard machine type.
N1_HIGHCPU_8: Highcpu machine with 8 CPUs.
N1_HIGHCPU_32: Highcpu machine with 32 CPUs.
E2_HIGHCPU_8: Highcpu e2 machine with 8 CPUs.
E2_HIGHCPU_32: Highcpu e2 machine with 32 CPUs.
"""
UNSPECIFIED = 0
N1_HIGHCPU_8 = 1
N1_HIGHCPU_32 = 2
E2_HIGHCPU_8 = 3
E2_HIGHCPU_32 = 4
class RequestedVerifyOptionValueValuesEnum(_messages.Enum):
r"""Requested verifiability options.
Values:
NOT_VERIFIED: Not a verifiable build. (default)
VERIFIED: Verified build.
"""
NOT_VERIFIED = 0
VERIFIED = 1
class SourceProvenanceHashValueListEntryValuesEnum(_messages.Enum):
r"""SourceProvenanceHashValueListEntryValuesEnum enum type.
Values:
NONE: No hash requested.
SHA256: Use a sha256 hash.
MD5: Use a md5 hash.
"""
NONE = 0
SHA256 = 1
MD5 = 2
class SubstitutionOptionValueValuesEnum(_messages.Enum):
r"""Option to specify behavior when there is an error in the substitution
checks. NOTE: this is always set to ALLOW_LOOSE for triggered builds and
cannot be overridden in the build configuration file.
Values:
MUST_MATCH: Fails the build if error in substitutions checks, like
missing a substitution in the template or in the map.
ALLOW_LOOSE: Do not fail the build if error in substitutions checks.
"""
MUST_MATCH = 0
ALLOW_LOOSE = 1
anthosCluster = _messages.MessageField('AnthosWorkerPool', 1)
cluster = _messages.MessageField('ClusterOptions', 2)
diskSizeGb = _messages.IntegerField(3)
dockerDaemon = _messages.EnumField('DockerDaemonValueValuesEnum', 4)
dynamicSubstitutions = _messages.BooleanField(5)
env = _messages.StringField(6, repeated=True)
logStreamingOption = _messages.EnumField('LogStreamingOptionValueValuesEnum', 7)
logging = _messages.EnumField('LoggingValueValuesEnum', 8)
machineType = _messages.EnumField('MachineTypeValueValuesEnum', 9)
pool = _messages.MessageField('PoolOption', 10)
requestedVerifyOption = _messages.EnumField('RequestedVerifyOptionValueValuesEnum', 11)
secretEnv = _messages.StringField(12, repeated=True)
sourceProvenanceHash = _messages.EnumField('SourceProvenanceHashValueListEntryValuesEnum', 13, repeated=True)
substitutionOption = _messages.EnumField('SubstitutionOptionValueValuesEnum', 14)
volumes = _messages.MessageField('Volume', 15, | |
from functools import reduce, partial
from operator import __rshift__
from typing import Mapping, Any, Union, Tuple, List
from hbutils.collection import nested_map
from hbutils.design import SingletonMark
from hbutils.string import truncate
from .base import BaseUnit, _to_unit, UnitProcessProxy, raw
from .build import TransformUnit, CalculateUnit, UnitBuilder
from .utils import keep
from ..base import PValue, ParseResult
class GetItemUnit(TransformUnit):
"""
Overview:
Unit for getting item from list, tuple or dict object, using ``__getitem__``.
"""
__names__ = ('item',)
__errors__ = (KeyError, IndexError)
def __init__(self, item, offset: bool = True):
"""
Constructor of :class:`GetItemUnit`.
:param item: Item data.
:param offset: Create offset when getting item, default is ``True``.
"""
self._offset = offset
TransformUnit.__init__(self, item)
def _transform(self, v: PValue, pres: Mapping[str, Any]) -> PValue:
item = pres['item'].value
try:
res = v.val(v.value[item])
if self._offset:
res = res.child(item)
return res
except self.__errors__ as err:
raise type(err)(f'Item {repr(item)} not found in value.')
def _rinfo(self):
_, children = super()._rinfo()
return [('offset', self._offset)], children
def getitem_(*items, offset: bool = True):
"""
Overview:
Getting item from list, tuple and dict, based on ``__getitem__``.
:param items: Items to be got, units are supported, multiple items are also supported.
:param offset: Enable offset or not, default is ``True``.
:return: A unit for getting item.
Examples::
- Get item from list and tuple
>>> from argsloader.units import getitem_
>>> u = getitem_(2)
>>> u([2, 3, 5, 7, 11, 13])
5
>>> u((2, 3, 5, 7, 11, 13))
5
>>> u([2, 3])
IndexParseError: Item 2 not found in value.
- Get item from dict
>>> u = getitem_('b')
>>> u({'a': 12, 'b': 23})
23
>>> u({'b': 24, 'bb': 233})
24
>>> u({'a': 12, 'c': 23})
KeyParseError: "Item 'b' not found in value."
- Multiple levels
>>> u = getitem_('a', 2)
>>> u({'a': [2, 3, 5, 7], 'b': 2})
5
>>> u({'a': [2, 3], 'b': 2})
IndexParseError: Item 2 not found in value.
>>> u({'aa': [2, 3, 5, 7], 'b': 2})
KeyParseError: "Item 'a' not found in value."
.. note::
When :func:`getitem_` is used, the position of value will be switched to child-level. \
This can be seen when method :meth:`argsloader.units.base.BaseUnit.call` is called. For example,
>>> from argsloader.units import getitem_, is_type
>>> u = getitem_(2) >> is_type(int)
>>> u.call([2, 3, 5.0])
argsloader.base.exception.MultipleParseError: (1 error)
<root>.2: TypeParseError: Value type not match - int expected but float found.
>>>
>>> u = getitem_('b') >> is_type(int)
>>> u.call({'a': 12, 'b': 23.0})
argsloader.base.exception.MultipleParseError: (1 error)
<root>.b: TypeParseError: Value type not match - int expected but float found.
But sometimes this offset should not be kept, so we can disable it. For example, in the following code, \
This positions of the errors are all ``<root>`` instead of ``<root>.2`` or ``<root>.b``.
>>> from argsloader.units import getitem_, is_type
>>> u = getitem_(2, offset=False) >> is_type(int)
>>> u.call([2, 3, 5.0])
argsloader.base.exception.MultipleParseError: (1 error)
<root>: TypeParseError: Value type not match - int expected but float found.
>>>
>>> u = getitem_('b', offset=False) >> is_type(int)
>>> u.call({'a': 12, 'b': 23.0})
argsloader.base.exception.MultipleParseError: (1 error)
<root>: TypeParseError: Value type not match - int expected but float found.
"""
if not items:
return keep()
else:
return reduce(__rshift__, map(partial(GetItemUnit, offset=not not offset), items))
class GetAttrUnit(CalculateUnit):
"""
Overview:
Unit for getting attribute from object, using ``__getattr__``.
"""
__names__ = ('attr',)
__errors__ = (AttributeError,)
def __init__(self, attr):
"""
Constructor of :class:`GetAttrUnit`.
:param attr: Attribute data.
"""
CalculateUnit.__init__(self, attr)
def _calculate(self, v, pres: Mapping[str, Any]) -> object:
return getattr(v, pres['attr'])
def getattr_(attr) -> 'GetAttrUnit':
"""
Overview:
Getting attribute from object, based on ``__getattr__``.
:param attr: Attribute to be got, units are supported.
:return: A unit for getting attribute.
Examples::
>>> from argsloader.units import getattr_
>>> from easydict import EasyDict
>>> u = getattr_('a')
>>> u(EasyDict({'a': 1, 'b': 2}))
1
>>>
>>> u = getattr_('__dict__')
>>> u(EasyDict({'a': 1, 'b': 2}))
{'a': 1, 'b': 2}
"""
return GetAttrUnit(attr)
class StructUnit(BaseUnit):
"""
Overview:
Unit for building structure.
"""
def __init__(self, struct_):
"""
Constructor of :class:`StructUnit`.
:param struct_: Structure data.
"""
self._struct = nested_map(_to_unit, struct_)
def _easy_process(self, v: PValue, proxy: UnitProcessProxy) -> ParseResult:
valid = True
def _sprocess(u: BaseUnit):
nonlocal valid
res = u._process(v)
if not res.status.valid:
valid = False
return res
records = nested_map(_sprocess, self._struct)
if valid:
return proxy.success(v.val(nested_map(lambda r: r.result.value, records)), records)
else:
return proxy.error(None, records)
def _rinfo(self):
return [], [('struct', self._struct)]
def struct(struct_):
"""
Overview:
Quickly build a structure, based on the given ``struct`` data.
:param struct_: Structure data, which should contain one or multiple units.
:return: Structed data, which has the same structure with the given ``struct_``.
Examples::
>>> u = struct({
... 'a': add.by(2) >> interval.LR(1, 15),
... 'b': (sub.by(3), 'this is b'),
... 'c': [233, mul.by(4)],
... })
>>> u(5)
{'a': 7, 'b': (2, 'this is b'), 'c': [233, 20]}
>>> u(10)
{'a': 12, 'b': (7, 'this is b'), 'c': [233, 40]}
>>> u(20)
ValueParseError: Value not in interval - [1, 15] expected but 22 found.
.. note::
Extended class of list, tuple or dict's type will be kept. For example,
>>> from argsloader.units import struct, add, mul, sub, interval
>>> from easydict import EasyDict
>>> u = struct({
... 'a': add.by(2) >> interval.LR(1, 15),
... 'b': (sub.by(3), 'this is b'),
... 'c': EasyDict({'x': 233, 'y': mul.by(4)}),
... })
>>> u(5)
{'a': 7, 'b': (2, 'this is b'), 'c': {'x': 233, 'y': 20}}
>>> type(u(5))
<class 'dict'>
>>> type(u(5)['c'])
<class 'easydict.EasyDict'>
.. note::
Like the ``&`` operator, all the units will be executed, so all the errors will be recorded when \
method :meth:`argsloader.units.base.BaseUnit.call` is used. For example,
>>> from argsloader.units import struct, add, mul, sub, interval
>>> u = struct({
... 'a': add.by(2) >> interval.LR(1, 10),
... 'b': (sub.by(3) >> interval.LR(0, 5), 'this is b'),
... 'c': [233, mul.by(4) >> interval.LR(10, 30)],
... })
>>> u.call(5)
{'a': 7, 'b': (2, 'this is b'), 'c': [233, 20]}
>>> u.call(10)
argsloader.base.exception.MultipleParseError: (3 errors)
<root>: ValueParseError: Value not in interval - [1, 10] expected but 12 found.
<root>: ValueParseError: Value not in interval - [0, 5] expected but 7 found.
<root>: ValueParseError: Value not in interval - [10, 30] expected but 40 found.
.. warning::
If the given ``struct_`` is not a dict, list or tuple, the return value will be a simple unit instead of \
:class:`StructUnit`.
"""
if isinstance(struct_, (dict, list, tuple)):
return StructUnit(struct_)
else:
return _to_unit(struct_)
class MappingUnit(BaseUnit):
"""
Overview:
Unit for processing sequence-based objects.
"""
def __init__(self, f, offset: bool = True):
"""
Constructor of :class:`MappingUnit`.
:param f: Processor function.
:param offset: Create offset when getting item, default is ``True``.
"""
self._func = _to_unit(f)
self._offset = offset
def _easy_process(self, v: PValue, proxy: UnitProcessProxy) -> ParseResult:
lst: Union[Tuple, List] = v.value
valid, records = True, []
for index, item in enumerate(lst):
iv = v.val(item)
if self._offset:
iv = iv.child(index)
res = self._func._process(iv)
valid = valid and res.status.valid
records.append(res)
if valid:
return proxy.success(v.val(type(v.value)(map(lambda x: x.result.value, records))), records)
else:
return proxy.error(None, records)
def _rinfo(self):
return [], [('func', self._func)]
def mapping(func, offset: bool = True) -> MappingUnit:
"""
Overview:
Mapping items from sequence based object.
:param func: Processor function for mapping.
:param offset: Enable offset or not, default is ``True``.
:return: A unit for mapping sequence based-object.
Examples::
>>> from argsloader.units import mapping, interval, add
>>> u = mapping(add.by(2) >> interval.LR(1, 10))
>>> u([2, 3, 5, 7])
[4, 5, 7, 9]
>>> u([2, 3, 5, 71, 11, 13])
ValueParseError: Value not in interval - [1, 10] expected but 73 found.
.. note::
All the values will be processed by the unit, so all the errors will be recorded. For example,
>>> from argsloader.units import mapping, interval, add
>>> u = mapping(add.by(2) >> interval.LR(1, 10))
>>> u.call([2, 3, 5, 7])
[4, 5, 7, 9]
>>> u.call([2, 3, 5, 71, 11, 13])
argsloader.base.exception.MultipleParseError: (3 errors)
<root>.3: ValueParseError: Value not in interval - [1, 10] expected but 73 found.
<root>.4: ValueParseError: Value not in interval - [1, 10] expected but 13 found.
<root>.5: ValueParseError: Value not in interval - [1, 10] expected but 15 found.
.. | |
"""
Base class for authenticated API calls used by Entity, Content and Upload
Manages the authentication token lifetime and namespace versions.
author: <NAME>
licence: Apache License 2.0
"""
import configparser
import hashlib
import json
import logging
import os
import re
import sys
import threading
import time
import unicodedata
import xml.etree.ElementTree
from enum import Enum
import requests
import pyPreservica
logger = logging.getLogger(__name__)
CHUNK_SIZE = 1024 * 2
NS_XIP_ROOT = "http://preservica.com/XIP/"
NS_ENTITY_ROOT = "http://preservica.com/EntityAPI/"
NS_RM_ROOT = "http://preservica.com/RetentionManagement/"
NS_SEC_ROOT = "http://preservica.com/SecurityAPI"
NS_WORKFLOW = "http://workflow.preservica.com"
NS_ADMIN = "http://preservica.com/AdminAPI"
NS_XIP_V6 = "http://preservica.com/XIP/v6.0"
NS_ENTITY = "http://preservica.com/EntityAPI/v6.0"
HEADER_TOKEN = "Preservica-Access-Token"
IO_PATH = "information-objects"
SO_PATH = "structural-objects"
CO_PATH = "content-objects"
HASH_BLOCK_SIZE = 65536
class FileHash:
"""
A wrapper around the hashlib hash algorithms that allows an entire file to
be hashed in a chunked manner.
"""
def __init__(self, algorithm):
self.algorithm = algorithm
def get_algorithm(self):
return self.algorithm
def __call__(self, file):
hash_algorithm = self.algorithm()
with open(file, 'rb') as f:
buf = f.read(HASH_BLOCK_SIZE)
while len(buf) > 0:
hash_algorithm.update(buf)
buf = f.read(HASH_BLOCK_SIZE)
return hash_algorithm.hexdigest()
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError("invalid truth value %r" % (val,))
def _make_stored_zipfile(base_name, base_dir, owner, group, verbose=0, dry_run=0, zlogger=None):
"""
Create a non compressed zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Returns the
name of the output zip file.
"""
import zipfile # late import for breaking circular dependency
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if archive_dir and not os.path.exists(archive_dir):
if zlogger is not None:
zlogger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
if zlogger is not None:
zlogger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
with zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_STORED) as zf:
path = os.path.normpath(base_dir)
if path != os.curdir:
zf.write(path, path)
if zlogger is not None:
zlogger.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path)
if zlogger is not None:
zlogger.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
if zlogger is not None:
zlogger.info("adding '%s'", path)
return zip_filename
class PagedSet:
"""
Class to represent a page of results
The results object contains the list of objects of interest
"""
def __init__(self, results, has_more: bool, total: int, next_page: str):
self.results = results
self.has_more = bool(has_more)
self.total = int(total)
self.next_page = next_page
def __str__(self):
return self.results.__str__()
def get_results(self):
return self.results
def get_total(self):
return self.total
def has_more_pages(self):
return self.has_more
class Sha1FixityCallBack:
def __call__(self, filename, full_path):
sha = FileHash(hashlib.sha1)
return "SHA1", sha(full_path)
class Sha256FixityCallBack:
def __call__(self, filename, full_path):
sha = FileHash(hashlib.sha256)
return "SHA256", sha(full_path)
class Sha512FixityCallBack:
def __call__(self, filename, full_path):
sha = FileHash(hashlib.sha512)
return "SHA512", sha(full_path)
class ReportProgressConsoleCallback:
def __init__(self, prefix='Progress:', suffix='', length=100, fill='█', printEnd="\r"):
self.prefix = prefix
self.suffix = suffix
self.length = length
self.fill = fill
self.printEnd = printEnd
self._lock = threading.Lock()
self.print_progress_bar(0)
def __call__(self, value):
with self._lock:
values = value.split(":")
self.total = int(values[1])
self.current = int(values[0])
if self.total == 0:
percentage = 100.0
else:
percentage = (self.current / self.total) * 100
self.print_progress_bar(percentage)
if int(percentage) == int(100):
self.print_progress_bar(100.0)
sys.stdout.write(self.printEnd)
sys.stdout.flush()
def print_progress_bar(self, percentage):
filled_length = int(self.length * (percentage / 100.0))
bar_sym = self.fill * filled_length + '-' * (self.length - filled_length)
sys.stdout.write(
'\r%s |%s| (%.2f%%) %s ' % (self.prefix, bar_sym, percentage, self.suffix))
sys.stdout.flush()
class UploadProgressConsoleCallback:
def __init__(self, filename: str, prefix='Progress:', suffix='', length=100, fill='█', printEnd="\r"):
self.prefix = prefix
self.suffix = suffix
self.length = length
self.fill = fill
self.printEnd = printEnd
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self.start = time.time()
self._lock = threading.Lock()
self.print_progress_bar(0, 0)
def __call__(self, bytes_amount):
with self._lock:
seconds = time.time() - self.start
if seconds == 0:
seconds = 1
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
rate = (self._seen_so_far / (1024 * 1024)) / seconds
self.print_progress_bar(percentage, rate)
if int(self._seen_so_far) == int(self._size):
self.print_progress_bar(100.0, rate)
sys.stdout.write(self.printEnd)
sys.stdout.flush()
def print_progress_bar(self, percentage, rate):
filled_length = int(self.length * (percentage / 100.0))
bar_sym = self.fill * filled_length + '-' * (self.length - filled_length)
sys.stdout.write(
'\r%s |%s| (%.2f%%) (%.2f %s) %s ' % (self.prefix, bar_sym, percentage, rate, "Mb/s", self.suffix))
sys.stdout.flush()
class UploadProgressCallback:
"""
Default implementation of a callback class to show upload progress of a file
"""
def __init__(self, filename: str):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write("\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far, self._size, percentage))
sys.stdout.flush()
class RelationshipDirection(Enum):
FROM = "From"
TO = "To"
class EntityType(Enum):
"""
Enumeration of the Entity Types
"""
ASSET = "IO"
FOLDER = "SO"
CONTENT_OBJECT = "CO"
class HTTPException(Exception):
"""
Custom Exception non 404 errors
"""
def __init__(self, reference, http_status_code, url, method_name, message):
self.reference = reference
self.url = url
self.method_name = method_name
self.http_status_code = http_status_code
self.msg = message
Exception.__init__(self, self.reference, self.http_status_code, self.url, self.msg)
def __str__(self):
return f"Calling method {self.method_name}() {self.url} returned HTTP {self.http_status_code}. {self.msg}"
class ReferenceNotFoundException(Exception):
"""
Custom Exception for failed lookups by reference 404 Errors
"""
def __init__(self, reference, http_status_code, url, method_name):
self.reference = reference
self.url = url
self.method_name = method_name
self.http_status_code = http_status_code
self.msg = f"The requested reference {self.reference} is not found in the repository"
Exception.__init__(self, self.reference, self.http_status_code, self.url, self.msg)
def __str__(self):
return f"Calling method {self.method_name}() {self.url} returned HTTP {self.http_status_code}. {self.msg}"
class Relationship:
DCMI_hasFormat = "http://purl.org/dc/terms/hasFormat"
DCMI_isFormatOf = "http://purl.org/dc/terms/isFormatOf"
DCMI_hasPart = "http://purl.org/dc/terms/hasPart"
DCMI_isPartOf = "http://purl.org/dc/terms/isPartOf"
DCMI_hasVersion = "http://purl.org/dc/terms/hasVersion"
DCMI_isVersionOf = "http://purl.org/dc/terms/isVersionOf"
DCMI_isReferencedBy = "http://purl.org/dc/terms/isReferencedBy"
DCMI_references = "http://purl.org/dc/terms/references"
DCMI_isReplacedBy = "http://purl.org/dc/terms/isReplacedBy"
DCMI_replaces = "http://purl.org/dc/terms/replaces"
DCMI_isRequiredBy = "http://purl.org/dc/terms/isRequiredBy"
DCMI_requires = "http://purl.org/dc/terms/requires"
DCMI_conformsTo = "http://purl.org/dc/terms/conformsTo"
def __init__(self, relationship_id: str, relationship_type: str, direction: RelationshipDirection, other_ref: str,
title: str, entity_type: EntityType, this_ref: str, api_id: str):
self.api_id = api_id
self.this_ref = this_ref
self.entity_type = entity_type
self.title = title
self.other_ref = other_ref
self.direction = direction
self.relationship_type = relationship_type
self.relationship_id = relationship_id
def __str__(self):
if self.direction == RelationshipDirection.FROM:
return f"{self.this_ref} {self.relationship_type} {self.other_ref}"
else:
return f"{self.other_ref} {self.relationship_type} {self.this_ref}"
class IntegrityCheck:
"""
Class to hold information about completed integrity checks
"""
def __init__(self, check_type, success, date, adapter, fixed, reason):
self.check_type = check_type
self.success = bool(success)
self.date = date
self.adapter = adapter
self.fixed = bool(fixed)
self.reason = reason
def __str__(self):
return f"Type:\t\t\t{self.check_type}\n" \
f"Success:\t\t\t{self.success}\n" \
f"Date:\t{self.date}\n" \
f"Storage Adapter:\t{self.adapter}\n"
def __repr__(self):
return self.__str__()
def get_adapter(self):
return self.adapter
def get_success(self):
return self.success
class Bitstream:
"""
Class to represent the Bitstream Object or digital file in the Preservica data model
"""
def __init__(self, filename: str, length: int, fixity: dict, content_url: str):
self.filename = filename
self.length = int(length)
self.fixity = fixity
self.content_url = content_url
def __str__(self):
return f"Filename:\t\t\t{self.filename}\n" \
f"FileSize:\t\t\t{self.length}\n" \
f"Content:\t{self.content_url}\n" \
f"Fixity:\t{self.fixity}"
def __repr__(self):
return self.__str__()
class Generation:
"""
Class to represent the Generation Object in the Preservica data model
"""
def __init__(self, original: bool, active: bool, format_group: str, effective_date: str, bitstreams: list):
self.original = original
self.active = active
self.content_object = None
self.format_group = format_group
self.effective_date = effective_date
self.bitstreams = bitstreams
def __str__(self):
return f"Active:\t\t\t{self.active}\n" \
f"Original:\t\t\t{self.original}\n" \
f"Format_group:\t{self.format_group}"
def __repr__(self):
return self.__str__()
class Entity:
"""
Base Class of Assets, Folders and Content Objects
"""
def __init__(self, reference: str, title: str, description: str, security_tag: str, parent: str, metadata: dict):
self.reference = reference
self.title = title
self.description = description
self.security_tag = security_tag
self.parent = parent
self.metadata = metadata
self.entity_type = None
self.path = None
self.tag = None
def __str__(self):
return f"Ref:\t\t\t{self.reference}\n" \
f"Title:\t\t\t{self.title}\n" \
f"Description:\t{self.description}\n" \
f"Security Tag:\t{self.security_tag}\n" \
f"Parent:\t\t\t{self.parent}\n\n"
def __repr__(self):
return self.__str__()
def has_metadata(self):
return bool(self.metadata)
def metadata_namespaces(self):
return list(self.metadata.values())
class Folder(Entity):
"""
Class to represent the Structural Object or Folder in the Preservica data model
"""
def __init__(self, reference: str, title: str, description: str = None, security_tag: str = None,
parent: str = None, metadata: dict = None):
super().__init__(reference, title, description, security_tag, parent, metadata)
self.entity_type = EntityType.FOLDER
self.path = SO_PATH
self.tag = "StructuralObject"
class Asset(Entity):
"""
Class to represent the Information Object or Asset in the Preservica data model
"""
def __init__(self, reference: str, title: str, description: str = | |
# File: bmcremedy_connector.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import json
import re
import phantom.app as phantom
import phantom.rules as ph_rules
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
# Local imports
import bmcremedy_consts as consts
class RetVal3(tuple):
def __new__(cls, val1, val2=None, val3=None):
return tuple.__new__(RetVal3, (val1, val2, val3))
class BmcremedyConnector(BaseConnector):
""" This is an AppConnector class that inherits the BaseConnector class. It implements various actions supported by
BMC Remedy and helper methods required to run the actions.
"""
def __init__(self):
# Calling the BaseConnector's init function
super(BmcremedyConnector, self).__init__()
self._base_url = None
self._api_username = None
self._api_password = None
self._token = None
self._verify_server_cert = None
self._state = dict()
return
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
config = self.get_config()
# Initialize configuration parameters
self._base_url = config[consts.BMCREMEDY_CONFIG_SERVER].strip('/')
self._api_username = config[consts.BMCREMEDY_CONFIG_API_USERNAME]
self._api_password = config[consts.BMCREMEDY_CONFIG_API_PASSWORD]
self._verify_server_cert = config.get(consts.BMCREMEDY_CONFIG_SERVER_CERT, False)
# Load any saved configurations
self._state = self.load_state()
if not isinstance(self._state, dict):
self.debug_print("Resetting the state file with the default format")
self._state = {
"app_version": self.get_app_json().get('app_version')
}
return self.set_status(phantom.APP_ERROR, consts.BMCREMEDY_STATE_FILE_CORRUPT_ERR)
self._token = self._state.get('token')
# Return response_status
return phantom.APP_SUCCESS
def _check_login_status(self, action_result, response):
if not hasattr(response, 'headers'):
return action_result.set_status(phantom.APP_ERROR, "Response missing headers, cannot determine success")
x_ar_messages = response.headers.get('x-ar-messages')
if not x_ar_messages:
return phantom.APP_SUCCESS
# will need to parse the messages
try:
x_ar_messages = json.loads(x_ar_messages)
except:
return action_result.set_status(phantom.APP_ERROR, "Unable to process X-AR-Messages")
for curr_msg_dict in x_ar_messages:
message_text = curr_msg_dict.get('messageText')
if not message_text:
continue
if 'login failed' in message_text.lower():
return action_result.set_status(phantom.APP_ERROR, "Login failed, please check your credentials")
return phantom.APP_SUCCESS
def _get_error_message_from_exception(self, e):
"""
Get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_code = consts.ERR_CODE_MSG
error_msg = consts.ERR_MSG_UNAVAILABLE
try:
if hasattr(e, "args"):
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = consts.ERR_CODE_MSG
error_msg = e.args[0]
except:
pass
try:
if error_code in consts.ERR_CODE_MSG:
error_text = "Error Message: {}".format(error_msg)
else:
error_text = "Error Code: {}. Error Message: {}".format(error_code, error_msg)
except:
self.debug_print(consts.PARSE_ERR_MSG)
error_text = consts.PARSE_ERR_MSG
return error_text
def _validate_integer(self, action_result, parameter, key, allow_zero=False):
"""
Validate an integer.
:param action_result: Action result or BaseConnector object
:param parameter: input parameter
:param key: input parameter message key
:allow_zero: whether zero should be considered as valid value or not
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS, integer value of the parameter or None in case of failure
"""
if parameter is not None:
try:
if not float(parameter).is_integer():
return action_result.set_status(phantom.APP_ERROR, consts.BMCREMEDY_VALID_INT_MSG.format(param=key)), None
parameter = int(parameter)
except:
return action_result.set_status(phantom.APP_ERROR, consts.BMCREMEDY_VALID_INT_MSG.format(param=key)), None
if parameter < 0:
return action_result.set_status(phantom.APP_ERROR, consts.BMCREMEDY_NON_NEG_INT_MSG.format(param=key)), None
if not allow_zero and parameter == 0:
return action_result.set_status(
phantom.APP_ERROR,
consts.BMCREMEDY_NON_NEG_NON_ZERO_INT_MSG.format(param=key)
), None
return phantom.APP_SUCCESS, parameter
def _parse_html_response(self, response):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
if not error_text:
error_text = "Empty response and no information received"
message = "Status Code: {}. Error Details: {}".format(status_code, error_text)
message = message.replace('{', '{{').replace('}', '}}')
return message
def _generate_api_token(self, action_result):
""" Generate new token based on the credentials provided. Token generated is valid for 60 minutes.
:param action_result: object of ActionResult class
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR (along with appropriate message)
"""
self._token = ""
# Prepare request headers
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
# Prepare request body
payload = {'username': self._api_username, 'password': self._<PASSWORD>}
# Make call
response_status, response_dict, response = self._make_rest_call(consts.BMCREMEDY_TOKEN_ENDPOINT, action_result,
headers=headers, data=payload)
# Something went wrong with the request
if phantom.is_fail(response_status):
return action_result.get_status()
if not response_dict:
self.debug_print(consts.BMCREMEDY_TOKEN_GENERATION_ERROR_MSG)
return action_result.set_status(phantom.APP_ERROR, consts.BMCREMEDY_TOKEN_GENERATION_ERROR_MSG)
# check the header for any message that denote a failure
ret_val = self._check_login_status(action_result, response)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Saving the token to be used in subsequent actions
self._state['token'] = self._token = response_dict["content"].decode("utf-8")
return phantom.APP_SUCCESS
def _provide_attachment_details(self, attachment_list, action_result):
""" Helper function that is used to get attachment from the vault, and provide attachment details which can be
used to add attachment to an incident.
:param attachment_list: list of vault IDs
:param action_result: object of ActionResult class
:return: status (success/failure) and (add_attachment_params_dict dictionary having attachment related
information and attachment_data dictionary containing attachment) / None
"""
file_obj = []
filename = []
attachment_data = dict()
add_attachment_params_dict = dict()
attachment_list = [value.strip() for value in attachment_list.split(',') if value.strip()]
if not attachment_list:
self.debug_print(consts.BMCREMEDY_ERR_INVALID_FIELDS.format(field='vault_id'))
return action_result.set_status(
phantom.APP_ERROR,
consts.BMCREMEDY_ERR_INVALID_FIELDS.format(field='vault_id')
), None, None
# At most, three attachments should be provided
if len(attachment_list) > 3:
self.debug_print(consts.BMCREMEDY_ATTACHMENT_LIMIT_EXCEED)
return action_result.set_status(phantom.APP_ERROR, consts.BMCREMEDY_ATTACHMENT_LIMIT_EXCEED), None, None
try:
# Searching for file with vault id in current container
_, _, files_array = (ph_rules.vault_info(container_id=self.get_container_id()))
files_array = list(files_array)
for vault_id in attachment_list:
file_found = False
for file_data in files_array:
if file_data[consts.BMCREMEDY_JSON_VAULT_ID] == vault_id:
# Getting filename to use
filename.append(file_data['name'])
# Reading binary data of file
with open(file_data.get('path'), 'rb') as f:
file_obj.append(f.read())
file_found = True
break
if not file_found:
self.debug_print("{}: {}".format(consts.BMCREMEDY_UNKNOWN_VAULT_ID, vault_id))
return action_result.set_status(
phantom.APP_ERROR,
"{}: {}".format(consts.BMCREMEDY_UNKNOWN_VAULT_ID, vault_id)
), None, None
except Exception as e:
err_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, err_msg), None, None
for index, value in enumerate(file_obj):
add_attachment_params_dict['z2AF Work Log0{}'.format(index + 1)] = filename[index]
attachment_data['attach-z2AF Work Log0{}'.format(index + 1)] = (filename[index], value)
return phantom.APP_SUCCESS, add_attachment_params_dict, attachment_data
def _add_attachment(self, attachment_data, action_result):
""" Helper function used to add attachment to an incident.
:param attachment_data: dictionary containing details of attachment
:param action_result: Object of ActionResult() class
:return: status (success/failure) and (response obtained after adding attachment or None)
"""
# If attachment is to be added, then details will be provided in 'entry' field
files = []
data_params = None
if "entry" in attachment_data:
for key, value in attachment_data.items():
if key == "entry":
tup = (key, (None, json.dumps(value).encode(), 'text/json'))
else:
tup = (key, (value[0], value[1]))
files.append(tup)
else:
data_params = json.dumps(attachment_data)
# Create incident using given input parameters
response_status, response_data = self._make_rest_call_abstract(consts.BMCREMEDY_COMMENT_ENDPOINT, action_result,
data=data_params, method="post",
files=files)
if phantom.is_fail(response_status):
return action_result.get_status(), None
return phantom.APP_SUCCESS, response_data
def _get_url(self, action_result, incident_number):
""" Helper function returns the url for the set status and update ticket action.
:param incident_number: ID of incident
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR (along with appropriate message) and url to be used
"""
params = {'q': "'Incident Number'=\"{}\"".format(incident_number)}
response_status, response_data = self._make_rest_call_abstract(consts.BMCREMEDY_GET_TICKET, action_result,
params=params, method='get')
if phantom.is_fail(response_status):
return phantom.APP_ERROR, None
# If incident is not found
if not response_data.get("entries"):
return phantom.APP_SUCCESS, None
try:
url = response_data["entries"][0].get('_links', {}).get('self', [])[0].get('href', None)
if url:
url = re.findall("(?:/api).*", url)[0]
except Exception as e:
self.debug_print(consts.BMCREMEDY_ERROR_FETCHING_URL.format(error=e))
return phantom.APP_ERROR, None
return phantom.APP_SUCCESS, url
def _make_rest_call_abstract(self, endpoint, action_result, data=None, params=None, method="post",
accept_headers=None, files=None):
""" This method generates a new token if it is not available or if the existing token has expired
and makes the call using _make_rest_call method.
:param endpoint: REST endpoint
:param action_result: object of ActionResult class
:param data: request body
:param params: request params
:param method: GET/POST/PUT/DELETE (Default will be POST)
:param accept_headers: requests headers
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR (along with appropriate message) and API response
"""
# Use this object for _make_rest_call
# Final status of action_result will be determined after retry, in case the token is expired
intermediate_action_result = ActionResult()
response_data = None
| |
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from sunshine_conversations_client.api_client import ApiClient
from sunshine_conversations_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SwitchboardsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_switchboard(self, app_id, **kwargs): # noqa: E501
"""Create Switchboard # noqa: E501
Create a switchboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_switchboard(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SwitchboardResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_switchboard_with_http_info(app_id, **kwargs) # noqa: E501
def create_switchboard_with_http_info(self, app_id, **kwargs): # noqa: E501
"""Create Switchboard # noqa: E501
Create a switchboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_switchboard_with_http_info(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SwitchboardResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_switchboard" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `create_switchboard`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/switchboards', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SwitchboardResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_switchboard(self, app_id, switchboard_id, **kwargs): # noqa: E501
"""Delete Switchboard # noqa: E501
Deletes the switchboard and all its switchboard integrations. The integrations linked to these switchboard integrations are not deleted and will start receiving all conversation events. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_switchboard(app_id, switchboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str switchboard_id: Identifies the switchboard. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_switchboard_with_http_info(app_id, switchboard_id, **kwargs) # noqa: E501
def delete_switchboard_with_http_info(self, app_id, switchboard_id, **kwargs): # noqa: E501
"""Delete Switchboard # noqa: E501
Deletes the switchboard and all its switchboard integrations. The integrations linked to these switchboard integrations are not deleted and will start receiving all conversation events. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_switchboard_with_http_info(app_id, switchboard_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str switchboard_id: Identifies the switchboard. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'switchboard_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_switchboard" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `delete_switchboard`") # noqa: E501
# verify the required parameter 'switchboard_id' is set
if self.api_client.client_side_validation and ('switchboard_id' not in local_var_params or # noqa: E501
local_var_params['switchboard_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `switchboard_id` when calling `delete_switchboard`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'switchboard_id' in local_var_params:
path_params['switchboardId'] = local_var_params['switchboard_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/switchboards/{switchboardId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_switchboards(self, app_id, **kwargs): # noqa: E501
"""List Switchboards # noqa: E501
Lists all switchboards belonging to the app. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_switchboards(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SwitchboardListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_switchboards_with_http_info(app_id, **kwargs) # noqa: E501
def list_switchboards_with_http_info(self, app_id, **kwargs): # noqa: E501
"""List Switchboards # noqa: E501
Lists all switchboards belonging to the app. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_switchboards_with_http_info(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SwitchboardListResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id'
]
all_params.extend(
[
| |
for the first 5 is
``[1, 1, 5, 5, 1]`` and the window for the second 5 is
``[1, 5, 5, 1, 1]``. For both points, the other 5 in the window
prevents the center 5 from being considered an outlier.
For another example, make up some data and, with carefully chosen
inputs, demonstrate how the routine runs by plotting one iteration
at a time:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> np.set_printoptions(linewidth=65)
>>> x = [100, 2, 3, -4, 25, -6, 6, 3, -2, 4, -2, -100]
>>> _ = plt.figure('Example', figsize=(8, 11))
>>> plt.clf()
>>> for i in range(5):
... s = dsp.despike(x, n=9, sigma=2, maxiter=1,
... threshold_sigma=0.1,
... exclude_point='middle')
... _ = plt.subplot(5, 1, i+1)
... _ = plt.plot(x)
... _ = plt.plot(s.hilim, 'k--')
... _ = plt.plot(s.lolim, 'k--')
... _ = plt.title(f'Iteration {i+1}')
... x = s.x
>>> plt.tight_layout()
>>> s.x
array([ 2, 3, 6, 3, -2, 4, -2])
Run all iterations at once to see what ``s.pv`` looks like:
>>> x = [100, 2, 3, -4, 25, -6, 6, 3, -2, 4, -2, -100]
>>> s = dsp.despike(x, n=9, sigma=2,
... threshold_sigma=0.1,
... exclude_point='middle')
>>> s.x
array([ 2, 3, 6, 3, -2, 4, -2])
>>> s.pv
array([ True, False, False, True, True, True, False, False,
False, False, False, True], dtype=bool)
"""
x = np.atleast_1d(x)
if x.ndim > 1:
raise ValueError("`x` must be 1d")
if n > x.size:
n = x.size - 1
min_limit = _get_min_limit(x, n, threshold_sigma, threshold_value)
PV = np.zeros(x.shape, bool)
# start generator:
gen = _find_outlier_peaks(x, n, sigma, min_limit, xp=exclude_point)
for i, (pv, hi, lo) in zip(itertools.count(1), gen):
if pv is None:
break
PV[pv] = True
if maxiter > 0 and i >= maxiter:
break
return SimpleNamespace(x=x[~PV], pv=PV, hilim=hi, lolim=lo, niter=i)
def _sweep_out_priors_diff(y, i, limit, ave):
# see if we can consider points before the detected outlier
# also as outliers:
pv = [i]
lim = limit[i]
av = ave[i]
next_y = y[i + 1]
for k in range(i - 1, -1, -1):
new_dy = next_y - y[k]
if abs(new_dy - av) <= lim:
break
pv.append(k)
pv.reverse()
return pv
def _sweep_out_nexts_diff(y, i, limit, ave):
# see if we can consider points after the detected outlier
# also as outliers:
pv = [i]
lim = limit[i - 1]
av = ave[i - 1]
prev_y = y[i - 1]
for k in range(i + 1, y.size):
new_dy = y[k] - prev_y
if abs(new_dy - av) <= lim:
break
pv.append(k)
return pv
def _outs_first_diff(
y, dy, n, sigma, min_limit, xp, ave, dy_delta, var, std, limit, dpv
):
while True:
if dpv.any():
# keep only last one ... previous ones can change
i = dpv.nonzero()[0][-1]
# since we're grabbing last spike in dy, that index
# is also what we need for y:
# dy -> y
# 0 -> 1-0
# 1 -> 2-1
# 2 -> 3-2
# say 1 of dy is last spike ... 2 isn't. So 3-2
# of original is okay. spike in original has to be 1.
pv = _sweep_out_priors_diff(y, i, limit, ave)
else:
pv = None
yield pv
i, j = pv[0], pv[-1]
if i == 0:
yield None # we're done
dpv[i : j + 1] = False
# To determine if point before i is a spike, need n-1
# valid points after j:
k = min(y.size, j + n)
count = k - (j + 1) # n-1 if away from end
# shift good points backward in time to get rid of
# spikes:
# <---
# ......ssss+++++ ==> ......+++++
# i j
y[i : i + count] = y[j + 1 : k]
# update only sections that need it: from i-n to i
j = i
i = max(i - n, 0)
dy[i:k] = np.diff(y[i : k + 1])
ave[i:j] = exclusive_sgfilter(dy[i:k], n, exclude_point=xp)[: j - i]
dy_delta[i:j] = abs(dy[i:j] - ave[i:j])
avsq = exclusive_sgfilter(dy[i:k] ** 2, n, exclude_point=xp)[: j - i]
var[i:j] = avsq - ave[i:j] ** 2
# use abs to care of negative numerical zeros:
std[i:j] = np.sqrt(abs(var[i:j]))
limit[i:j] = np.fmax(sigma * std[i:j], min_limit)
dpv[i:j] = dy_delta[i:j] > limit[i:j]
def _outs_last_diff(
y, dy, n, sigma, min_limit, xp, ave, dy_delta, var, std, limit, dpv
):
while True:
if dpv.any():
# keep only first one ... later ones can change
i = dpv.nonzero()[0][0]
# since we're grabbing first spike in dy, that index
# plus 1 is what we need for y:
# dy -> y
# 0 -> 1-0
# 1 -> 2-1
# 2 -> 3-2
# say 1 of dy is first spike ... 0 isn't. So 1-0
# of original is okay. spike in original has to be 2.
pv = _sweep_out_nexts_diff(y, i + 1, limit, ave)
else:
pv = None
yield pv
i, j = pv[0], pv[-1]
if j == dy.size:
yield None # we're done
dpv[i - 1 : j] = False
# To determine if point after j is a spike, need n-1
# valid points before i:
k = max(0, i - n + 1)
count = i - k # n-1 if away from start
# shift good points forward in time to get rid of spikes:
# --->
# ......ssss+++++ ==> ......+++++
# i j
y[j - count + 1 : j + 1] = y[k:i]
# update only sections that need it: from j to j+n
i = j
j = min(j + n, dy.size)
m = i - j # -(j-i) ... keep last j-i points
dy[k:j] = np.diff(y[k : j + 1])
ave[i:j] = exclusive_sgfilter(dy[k:j], n, exclude_point=xp)[m:]
dy_delta[i:j] = abs(dy[i:j] - ave[i:j])
avsq = exclusive_sgfilter(dy[k:j] ** 2, n, exclude_point=xp)[m:]
var[i:j] = avsq - ave[i:j] ** 2
# use abs to care of negative numerical zeros:
std[i:j] = np.sqrt(abs(var[i:j]))
limit[i:j] = np.fmax(sigma * std[i:j], min_limit)
dpv[i:j] = dy_delta[i:j] > limit[i:j]
def _find_outlier_peaks_diff(y, dy, n, sigma, min_limit, xp):
ave = exclusive_sgfilter(dy, n, exclude_point=xp)
dy_delta = abs(dy - ave)
var = exclusive_sgfilter(dy ** 2, n, exclude_point=xp) - ave ** 2
# use abs to care of negative numerical zeros:
std = np.sqrt(abs(var))
limit = np.fmax(sigma * std, min_limit)
dpv = dy_delta > limit
if xp in ("first", 0):
yield from _outs_first_diff(
y, dy, n, sigma, min_limit, xp, ave, dy_delta, var, std, limit, dpv
)
elif xp in ("last", n - 1):
yield from _outs_last_diff(
y, dy, n, sigma, min_limit, xp, ave, dy_delta, var, std, limit, dpv
)
else:
raise ValueError("invalid `exclude_point` for :func:`despike_diff` routine")
def despike_diff(
x,
n,
sigma=8.0,
maxiter=-1,
threshold_sigma=2.0,
threshold_value=None,
exclude_point="first",
**kwargs,
):
"""
Delete outlier data points from signal based on level changes
Parameters
----------
x : 1d array_like
Signal to de-spike.
n : odd integer
Number of points for moving average; if even, it is reset to
``n+1``. If greater than the dimension of `x`, it is reset to
the dimension or 1 less.
sigma : real scalar; optional
Number of standard deviations beyond which a point is
considered an outlier. The default value is quite high; this
is possible because the point itself is excluded from the
calculations.
maxiter : integer; optional
Maximum number of iterations of outlier removal allowed. If
`exclude_point` is 'first', only the last spike is removed on
each iteration; if it is 'last', only the first spike is
removed on each iteration. It is done this way because
removing a spike can expose other points as spikes (but didn't
appear to be because the removed spike was present). If <= 0,
there is no set limit and the looping will stop when no more
outliers are detected. Routine will always run at least 1 loop
(setting `maxiter` to 0 is the same as setting it to 1).
threshold_sigma : scalar; optional
Number of standard deviations below which all data is kept.
This standard deviation is computed from `x`. Let | |
O
ATOM 5 CB GLU 13.304 12.735 12.196 1.00 0.00 C
ATOM 6 CG GLU 11.939 12.350 11.647 1.00 0.00 C
ATOM 7 CD GLU 11.354 11.134 12.339 1.00 0.00 C
ATOM 8 OE1 GLU 10.290 10.744 11.992 1.00 0.00 O
ATOM 9 OE2 GLU 12.058 10.554 13.306 1.00 0.00 O
ATOM 10 H GLU 14.917 13.526 9.882 1.00 0.00 H
ATOM 11 HA GLU 13.309 14.719 11.647 1.00 0.00 H
ATOM 12 HB2 GLU 13.898 11.979 12.069 1.00 0.00 H
ATOM 13 HB3 GLU 13.201 12.916 13.144 1.00 0.00 H
ATOM 14 HG2 GLU 11.327 13.091 11.773 1.00 0.00 H
ATOM 15 HG3 GLU 12.024 12.145 10.703 1.00 0.00 H
ATOM 16 HE2 GLU 11.561 9.816 13.635 1.00 0.00 H
TER
END
"""
glu = """
CRYST1 26.121 25.436 23.126 90.00 90.00 90.00 P 1
ATOM N GLU 14.107 13.721 10.099 1.00 0.00 N
ATOM CA GLU 13.923 13.965 11.525 1.00 0.00 C
ATOM C GLU 15.250 14.315 12.188 1.00 0.00 C
ATOM O GLU 15.375 15.353 12.838 1.00 0.00 O
ATOM CB GLU 13.312 12.736 12.200 1.00 0.00 C
ATOM CG GLU 11.947 12.345 11.656 1.00 0.00 C
ATOM CD GLU 11.368 11.129 12.352 1.00 0.00 C
ATOM OE1 GLU 10.246 10.714 11.992 1.00 0.00 O
ATOM OE2 GLU 12.034 10.587 13.260 1.00 0.00 O
TER
END
"""
gly_h = """
CRYST1 22.802 22.387 22.249 90.00 90.00 90.00 P 1
ATOM N GLY 10.690 11.634 12.050 1.00 0.00 N
ATOM CA GLY 9.834 10.465 12.109 1.00 0.00 C
ATOM C GLY 8.514 10.688 11.397 1.00 0.00 C
ATOM O GLY 7.548 11.163 11.994 1.00 0.00 O
ATOM H GLY 11.449 11.532 12.440 1.00 0.00 H
ATOM HA2 GLY 10.284 9.712 11.694 1.00 0.00 H
ATOM HA3 GLY 9.651 10.243 13.035 1.00 0.00 H
TER
END
"""
gly = """
CRYST1 22.802 22.387 22.249 90.00 90.00 90.00 P 1
ATOM N GLY 10.690 11.634 12.050 1.00 0.00 N
ATOM CA GLY 9.834 10.465 12.109 1.00 0.00 C
ATOM C GLY 8.514 10.688 11.397 1.00 0.00 C
ATOM O GLY 7.548 11.163 11.994 1.00 0.00 O
TER
END
"""
his_h = """
CRYST1 24.176 25.565 23.504 90.00 90.00 90.00 P 1
ATOM N HIS 14.439 12.693 12.792 1.00 0.00 N
ATOM CA HIS 13.448 13.609 12.240 1.00 0.00 C
ATOM C HIS 13.157 14.742 13.219 1.00 0.00 C
ATOM O HIS 13.216 14.556 14.434 1.00 0.00 O
ATOM CB HIS 12.156 12.860 11.912 1.00 0.00 C
ATOM CG HIS 12.333 11.761 10.911 1.00 0.00 C
ATOM ND1 HIS 12.206 11.963 9.554 1.00 0.00 N
ATOM CD2 HIS 12.628 10.449 11.070 1.00 0.00 C
ATOM CE1 HIS 12.414 10.823 8.920 1.00 0.00 C
ATOM NE2 HIS 12.672 9.888 9.817 1.00 0.00 N
ATOM H HIS 14.112 12.107 13.330 1.00 0.00 H
ATOM HA HIS 13.796 14.001 11.412 1.00 0.00 H
ATOM HB2 HIS 11.513 13.490 11.550 1.00 0.00 H
ATOM HB3 HIS 11.808 12.466 12.727 1.00 0.00 H
ATOM HD1 HIS 12.020 12.714 9.178 1.00 0.00 H
ATOM HD2 HIS 12.773 10.010 11.877 1.00 0.00 H
ATOM HE1 HIS 12.384 10.699 7.998 1.00 0.00 H
ATOM HE2 HIS 12.841 9.063 9.644 1.00 0.00 H
TER
END
"""
his = """
CRYST1 24.176 25.565 23.504 90.00 90.00 90.00 P 1
ATOM N HIS 14.439 12.693 12.792 1.00 0.00 N
ATOM CA HIS 13.448 13.609 12.240 1.00 0.00 C
ATOM C HIS 13.157 14.742 13.219 1.00 0.00 C
ATOM O HIS 13.216 14.556 14.434 1.00 0.00 O
ATOM CB HIS 12.156 12.860 11.912 1.00 0.00 C
ATOM CG HIS 12.333 11.761 10.911 1.00 0.00 C
ATOM ND1 HIS 12.206 11.963 9.554 1.00 0.00 N
ATOM CD2 HIS 12.628 10.449 11.070 1.00 0.00 C
ATOM CE1 HIS 12.414 10.823 8.920 1.00 0.00 C
ATOM NE2 HIS 12.672 9.888 9.817 1.00 0.00 N
TER
END
"""
ile_h = """
CRYST1 25.412 23.292 23.916 90.00 90.00 90.00 P 1
ATOM 1 N ILE 12.301 11.470 15.047 1.00 0.00 N
ATOM 2 CA ILE 11.713 11.920 13.792 1.00 0.00 C
ATOM 3 C ILE 10.597 10.981 13.342 1.00 0.00 C
ATOM 4 O ILE 10.391 9.920 13.932 1.00 0.00 O
ATOM 5 CB ILE 12.774 12.021 12.676 1.00 0.00 C
ATOM 6 CG1 ILE 13.536 10.697 12.538 1.00 0.00 C
ATOM 7 CG2 ILE 13.738 13.163 12.972 1.00 0.00 C
ATOM 8 CD1 ILE 14.482 10.647 11.352 1.00 0.00 C
ATOM 9 H ILE 11.834 10.877 15.460 1.00 0.00 H
ATOM 10 HA ILE 11.328 12.801 13.921 1.00 0.00 H
ATOM 11 HB ILE 12.324 12.207 11.837 1.00 0.00 H
ATOM 12 HG12 ILE 14.065 10.555 13.339 1.00 0.00 H
ATOM 13 HG13 ILE 12.901 9.972 12.433 1.00 0.00 H
ATOM 14 HG21 ILE 14.177 13.433 12.150 1.00 0.00 H
ATOM 15 HG22 ILE 13.238 13.910 13.337 1.00 0.00 H
ATOM 16 HG23 ILE 14.396 12.862 13.618 1.00 0.00 H
ATOM 17 HD11 ILE 14.054 11.067 10.589 1.00 0.00 H
ATOM 18 HD12 ILE 15.299 11.120 11.576 1.00 0.00 H
ATOM 19 HD13 ILE 14.684 9.720 11.151 1.00 0.00 H
TER
END
"""
ile = """
CRYST1 25.412 23.292 23.916 90.00 90.00 90.00 P 1
ATOM N ILE 12.305 11.475 15.047 1.00 0.00 N
ATOM CA ILE 11.714 11.920 13.791 1.00 0.00 C
ATOM C ILE 10.602 10.976 13.345 1.00 0.00 C
ATOM O ILE 10.397 9.918 13.940 1.00 0.00 O
ATOM CB ILE 12.773 12.022 12.675 1.00 0.00 C
ATOM CG1 ILE 13.530 10.696 12.532 1.00 0.00 C
ATOM CG2 ILE 13.740 13.159 12.972 1.00 0.00 C
ATOM CD1 ILE 14.484 10.651 11.353 1.00 0.00 C
TER
END
"""
leu_h = """
CRYST1 23.694 25.206 23.362 90.00 90.00 90.00 P 1
ATOM 1 N LEU 11.496 10.501 10.237 1.00 0.00 N
ATOM 2 CA LEU 12.729 11.070 10.767 1.00 0.00 C
ATOM 3 C LEU 13.677 9.966 11.221 1.00 0.00 C
ATOM 4 O LEU 13.743 8.900 10.608 1.00 0.00 O
ATOM 5 CB LEU 12.423 12.006 11.939 1.00 0.00 C
ATOM 6 CG LEU 11.480 13.181 11.658 1.00 0.00 C
ATOM 7 CD1 LEU 11.040 13.830 12.961 1.00 0.00 C
ATOM 8 CD2 LEU 12.129 14.208 10.739 1.00 0.00 C
ATOM 9 H LEU 11.216 10.881 9.518 1.00 0.00 H
ATOM 10 HA LEU 13.172 11.580 10.071 1.00 0.00 H
ATOM 11 HB2 LEU 13.261 12.380 12.253 1.00 0.00 H
ATOM 12 HB3 LEU 12.022 11.481 12.649 1.00 0.00 H
ATOM 13 HG LEU 10.687 12.845 11.213 1.00 0.00 H
ATOM 14 HD11 LEU 10.578 13.170 13.502 1.00 0.00 H
ATOM 15 HD12 LEU 11.823 14.154 13.432 1.00 0.00 H
ATOM 16 HD13 LEU 10.444 14.569 12.760 1.00 0.00 H
ATOM 17 HD21 LEU 13.010 14.427 11.081 1.00 0.00 H
ATOM 18 HD22 LEU 12.203 13.835 9.847 1.00 0.00 H
ATOM 19 HD23 LEU 11.575 15.004 10.717 1.00 0.00 H
TER
END
"""
leu = """
CRYST1 23.694 25.206 23.362 90.00 90.00 90.00 P 1
ATOM 1 N LEU 11.498 10.510 10.231 1.00 0.00 N
ATOM 2 CA LEU 12.730 11.073 10.769 1.00 0.00 C
ATOM 3 C LEU 13.674 9.966 11.221 1.00 0.00 C
ATOM 4 O LEU 13.739 8.902 10.605 1.00 0.00 O
ATOM 5 CB LEU 12.421 12.004 11.944 1.00 0.00 C
ATOM 6 CG LEU 11.478 13.179 11.661 1.00 0.00 C
ATOM 7 CD1 LEU 11.043 13.834 12.963 1.00 0.00 C
ATOM 8 CD2 LEU 12.126 14.201 10.736 1.00 0.00 C
TER
END
"""
lys_h = """
CRYST1 25.386 27.917 22.984 90.00 90.00 90.00 P 1
ATOM N LYS 10.286 15.612 10.841 1.00 0.00 N
ATOM CA LYS 11.191 15.832 11.962 1.00 0.00 C
ATOM C LYS 11.489 17.317 12.133 1.00 0.00 C
ATOM O LYS 10.576 18.133 12.258 1.00 0.00 O
ATOM CB LYS 12.495 15.060 11.752 1.00 0.00 C
ATOM CG LYS 12.313 13.555 11.639 1.00 0.00 C
ATOM CD LYS 13.643 12.847 11.440 1.00 0.00 C
ATOM CE LYS 13.461 11.342 11.328 1.00 0.00 C
ATOM NZ LYS 14.756 10.635 11.132 1.00 0.00 N
ATOM H LYS 9.453 15.663 11.049 1.00 0.00 H
ATOM HA LYS 10.771 15.506 12.786 1.00 0.00 H
ATOM HB2 LYS 12.912 15.370 10.932 1.00 0.00 H
ATOM HB3 LYS 13.084 15.232 12.503 1.00 0.00 H
ATOM HG2 LYS 11.909 13.220 12.455 1.00 0.00 H
ATOM HG3 LYS 11.746 13.357 10.878 1.00 0.00 H
ATOM HD2 LYS 14.056 13.166 10.622 1.00 0.00 H
ATOM HD3 LYS 14.219 13.028 12.199 1.00 0.00 H
ATOM HE2 LYS 13.055 11.009 12.143 1.00 0.00 H
ATOM HE3 LYS 12.892 11.147 10.567 1.00 0.00 H
ATOM HZ1 LYS 14.617 9.758 11.070 1.00 0.00 H
ATOM HZ2 LYS 15.297 10.794 11.821 1.00 0.00 H
ATOM HZ3 LYS 15.148 10.919 10.385 1.00 0.00 H
TER
END
"""
lys = """
CRYST1 25.386 27.917 22.984 90.00 90.00 90.00 P 1
ATOM N LYS 10.286 15.612 10.841 1.00 0.00 N
ATOM CA LYS 11.191 15.832 11.962 1.00 0.00 C
ATOM C LYS 11.489 17.317 12.133 1.00 0.00 | |
method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Solarize(0.5, threshold=(32, 128))
Invert the colors in ``50`` percent of all images for pixels with a
value between ``32`` and ``128`` or more. The threshold is sampled once
per image. The thresholding operation happens per channel.
"""
def __init__(
self,
p=1.0,
threshold=128,
seed=None,
name=None,
random_state="deprecated",
deterministic="deprecated",
):
super(Solarize, self).__init__(
p=p,
per_channel=False,
min_value=None,
max_value=None,
threshold=threshold,
invert_above_threshold=True,
seed=seed,
name=name,
random_state=random_state,
deterministic=deterministic,
)
class Posterize(colorlib.Posterize):
"""Augmenter with identical outputs to PIL's ``posterize()`` function.
This augmenter quantizes each array component to ``N`` bits.
This class is currently an alias for
:class:`~imgaug.augmenters.color.Posterize`, which again is an alias
for :class:`~imgaug.augmenters.color.UniformColorQuantizationToNBits`,
i.e. all three classes are right now guarantueed to have the same
outputs as PIL's function.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.color.Posterize`.
"""
class Equalize(meta.Augmenter):
"""Equalize the image histogram.
This augmenter has identical outputs to ``PIL.ImageOps.equalize``.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.pillike.equalize_`.
Parameters
----------
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.pillike.Equalize()
Equalize the histograms of all input images.
"""
# Added in 0.4.0.
def __init__(
self,
seed=None,
name=None,
random_state="deprecated",
deterministic="deprecated",
):
super(Equalize, self).__init__(
seed=seed, name=name, random_state=random_state, deterministic=deterministic
)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
# pylint: disable=no-self-use
if batch.images is not None:
for image in batch.images:
image[...] = equalize_(image)
return batch
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return []
class Autocontrast(contrastlib._ContrastFuncWrapper):
"""Adjust contrast by cutting off ``p%`` of lowest/highest histogram values.
This augmenter has identical outputs to ``PIL.ImageOps.autocontrast``.
See :func:`~imgaug.augmenters.pillike.autocontrast` for more details.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.pillike.autocontrast`.
Parameters
----------
cutoff : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Percentage of values to cut off from the low and high end of each
image's histogram, before stretching it to ``[0, 255]``.
* If ``int``: The value will be used for all images.
* If ``tuple`` ``(a, b)``: A value will be uniformly sampled from
the discrete interval ``[a..b]`` per image.
* If ``list``: A random value will be sampled from the list
per image.
* If ``StochasticParameter``: A value will be sampled from that
parameter per image.
per_channel : bool or float, optional
Whether to use the same value for all channels (``False``) or to
sample a new value for each channel (``True``). If this value is a
float ``p``, then for ``p`` percent of all images `per_channel` will
be treated as ``True``, otherwise as ``False``.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.pillike.Autocontrast()
Modify the contrast of images by cutting off the ``0`` to ``20%`` lowest
and highest values from the histogram, then stretching it to full length.
>>> aug = iaa.pillike.Autocontrast((10, 20), per_channel=True)
Modify the contrast of images by cutting off the ``10`` to ``20%`` lowest
and highest values from the histogram, then stretching it to full length.
The cutoff value is sampled per *channel* instead of per *image*.
"""
# pylint: disable=protected-access
# Added in 0.4.0.
def __init__(
self,
cutoff=(0, 20),
per_channel=False,
seed=None,
name=None,
random_state="deprecated",
deterministic="deprecated",
):
params1d = [
iap.handle_discrete_param(
cutoff,
"cutoff",
value_range=(0, 49),
tuple_to_uniform=True,
list_to_choice=True,
)
]
func = autocontrast
super(Autocontrast, self).__init__(
func,
params1d,
per_channel,
dtypes_allowed="uint8",
dtypes_disallowed="uint16 uint32 uint64 int8 int16 int32 int64 "
"float16 float32 float64 float128 "
"bool",
seed=seed,
name=name,
random_state=random_state,
deterministic=deterministic,
)
# Added in 0.4.0.
class _EnhanceBase(meta.Augmenter):
# Added in 0.4.0.
def __init__(
self,
func,
factor,
factor_value_range,
seed=None,
name=None,
random_state="deprecated",
deterministic="deprecated",
):
super(_EnhanceBase, self).__init__(
seed=seed, name=name, random_state=random_state, deterministic=deterministic
)
self.func = func
self.factor = iap.handle_continuous_param(
factor,
"factor",
value_range=factor_value_range,
tuple_to_uniform=True,
list_to_choice=True,
)
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
factors = self._draw_samples(len(batch.images), random_state)
for image, factor in zip(batch.images, factors):
image[...] = self.func(image, factor)
return batch
# Added in 0.4.0.
def _draw_samples(self, nb_rows, random_state):
return self.factor.draw_samples((nb_rows,), random_state=random_state)
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.factor]
class EnhanceColor(_EnhanceBase):
"""Convert images to grayscale.
This augmenter has identical outputs to ``PIL.ImageEnhance.Color``.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.pillike.enhance_color`.
Parameters
----------
factor : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Colorfulness of the output image. Values close to ``0.0`` lead
to grayscale images, values above ``1.0`` increase the strength of
colors. Sane values are roughly in ``[0.0, 3.0]``.
* If ``number``: The value will be used for all images.
* If ``tuple`` ``(a, b)``: A value will be uniformly sampled per
image from the interval ``[a, b)``.
* If ``list``: A random value will be picked from the list per
image.
* If ``StochasticParameter``: Per batch of size ``N``, the
parameter will be queried once to return ``(N,)`` samples.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.pillike.EnhanceColor()
Create an augmenter to remove a random fraction of color from
input images.
"""
# Added in 0.4.0.
def __init__(
self,
factor=(0.0, 3.0),
seed=None,
name=None,
random_state="deprecated",
deterministic="deprecated",
):
super(EnhanceColor, self).__init__(
func=enhance_color,
factor=factor,
factor_value_range=(0.0, None),
seed=seed,
name=name,
random_state=random_state,
deterministic=deterministic,
)
class EnhanceContrast(_EnhanceBase):
"""Change the contrast of images.
This augmenter has identical outputs to ``PIL.ImageEnhance.Contrast``.
Added in 0.4.0.
**Supported dtypes**:
See :func:`~imgaug.augmenters.pillike.enhance_contrast`.
Parameters
----------
factor : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of contrast in the image. Values below ``1.0`` decrease the
contrast, leading to a gray image around ``0.0``. Values
above ``1.0`` increase the contrast. Sane values are roughly in
``[0.5, 1.5]``.
* If ``number``: The value will be used for all images.
* If ``tuple`` ``(a, b)``: A value will be uniformly sampled per
image from the interval ``[a, b)``.
* If ``list``: A random value will be picked from the list per
image.
* If ``StochasticParameter``: Per batch of size ``N``, the
parameter will be queried once to return ``(N,)`` samples.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
curdir = os.path.dirname(os.path.realpath(__file__))
miedir = os.path.join(curdir, "..", "mie")
sys.path.append(miedir)
import traceback
import time
import httplib
import ssl
import json
import datetime
from bprint import varprt, varfmt
from xlogger import klog
from dotdict import DotDict
from confcenter import XConfCenter
from huawei_netconf import *
klog.to_stdout()
cfgfile = os.path.join(curdir, "cfg")
conf = XConfCenter("huawei", [cfgfile])
klog.d("Loading %s ..." % __file__)
ip2num = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
num2ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
### #####################################################################
## Helper
#
def_scheme = conf.xget("api_default/url/scheme", "https")
def_hostname = conf.xget("api_default/url/hostname", "172.19.45.185")
def_port = conf.xget("api_default/url/port", 8182)
def dmstr(dat):
'''DotDict from a String'''
try:
dic = json.JSONDecoder().decode(str(dat))
except:
dic = {}
return DotDict(**dic)
def hget(scheme, hostname, port, path, method="GET", dat=None, hdr=None, timeout=None):
'''Http get'''
hdr = hdr or {}
hdr["Content-Type"] = hdr.get("Content-Type", "application/json")
if scheme == "https":
context = ssl._create_unverified_context()
hc = httplib.HTTPSConnection(hostname, port, timeout, context=context)
else:
hc = httplib.HTTPConnection(hostname, port, timeout)
params = json.dumps(dat)
if method == "POST" and not params:
klog.e("!!!!!!!! NO PARAMETERS FOR POST METHOD !!!!!!!!")
klog.d()
klog.d("+" * 30)
klog.d(" METHOD : %s" % method)
klog.d(" SCHEME : %s" % scheme)
klog.d(" HOST : %s" % hostname)
klog.d(" PORT : %s" % port)
klog.d(" PATH : %s" % path)
klog.d(" BODY : %s" % params)
klog.d("HEADERS : %s" % json.dumps(hdr))
hc.request(method, path, params, hdr)
r = hc.getresponse()
res = r.read()
klog.d(" STATUS : %d" % r.status)
klog.d(" REASON : %s" % r.reason)
klog.d(varfmt(dmstr(res), "DUMP HGET DATA"))
klog.d("-" * 30)
klog.d()
return r.status, r.reason, res
def call(cls, *args, **kwargs):
api = cls()
ok, reason, res = api.dotan(*args, **kwargs)
if reason == "Unauthorized":
token.token(True)
ok, reason, res = api.dotan(*args, **kwargs)
return ok, reason, res
# return cls().dotan(*args, **kwargs)
### #####################################################################
## Token
#
class TokenGetter():
def __init__(self):
self.url_scheme = conf.xget("api_token/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_token/url/hostname", def_hostname)
self.url_port = conf.xget("api_token/url/port", def_port)
self.url_pathpat = "/controller/v2/tokens"
self.token_str = None
self.expired_time = 0
username = conf.xget("api_tokenGetter/user/name", "<EMAIL>")
userpass = conf.xget("api_tokenGetter/user/pass", "<PASSWORD>")
self.set_userinfo(username, userpass)
def set_userinfo(self, username, userpass):
self.username = username
self.userpass = userpass
self.userinfo = {"userName": self.username, "password": self.userpass}
def _parsetime(self, timestr):
# 2016-10-12T02:17:47,960+08:00
try:
x = timestr.split(',')[0]
d = x.replace("T", "").replace(":", "").replace("-", "")
return int(d)
except:
return self._nowtime() + 3600
def _nowtime(self):
now = datetime.datetime.now()
now = "%04d%02d%02d%02d%02d%02d" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
return int(now)
def _check_expire(self):
now = self._nowtime()
exp = self.expired_time
# expire 10 seconds ahead.
res = now > (exp + 10)
return res
def _get(self):
# Get from remote
status, reason, resp = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, "POST", self.userinfo)
dic = dmstr(resp)
try:
data = dic["data"]
self.token_str = data["token_id"]
self.expired_time = self._parsetime(data["expiredDate"])
except:
klog.e("xxxxxxxxxxxxxxxxxxx")
klog.e(traceback.format_exc())
self.token_str = None
self.expired_time = 0
def token(self, refresh=False):
if refresh or not self.token_str or self._check_expire():
self._get()
return self.token_str
def todic(self, refresh=False):
return {"X-Access-Token": self.token(refresh)}
token = TokenGetter()
### #####################################################################
## Equips
#
class EquipInfo():
def __init__(self, map_uid_obj=None, map_loopback_uid=None, map_loopback_port=None,
map_node_port_id_ip = None, map_link_ip_obj = None):
self.set_map(map_uid_obj, map_loopback_uid, map_loopback_port,
map_node_port_id_ip, map_link_ip_obj)
def set_map(self, map_uid_obj=None, map_loopback_uid=None, map_loopback_port=None,
map_node_port_id_ip = None, map_link_ip_obj = None):
self.map_uid_obj = map_uid_obj or {}
self.map_loopback_uid = map_loopback_uid or {}
self.map_loopback_port = map_loopback_port or {}
self.map_node_port_id_ip = map_node_port_id_ip or {}
self.map_link_ip_obj = map_link_ip_obj or {}
def fr_uid(self, uid):
return self.map_uid_obj.get(uid)
def fr_loopback(self, loopback):
uid = self.map_loopback_uid.get(loopback)
return self.fr_uid(uid)
def fr_netconf_port(self, loopback):
return self.map_loopback_port.get(loopback)
def set_map_node_port_id_ip(self, map_node):
self.map_node_port_id_ip = map_node
def set_map_link_ip_obj(self, map_link):
self.map_link_ip_obj = map_link
def fr_link_ip_obj(self, link_ip):
return self.map_link_ip_obj.get(link_ip)
einfo = EquipInfo()
### #####################################################################
## Apis
#
class AutoApproveConfigSet():
''' doc: 2.4.1 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspStatistic/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspStatistic/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspStatistic/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-lsp-reoptimization:lsp-reoptimization-cfg/autoapprove-config"
self.method = "POST"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class AutoApproveConfigQuery():
''' doc: 2.4.2 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspStatistic/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspStatistic/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspStatistic/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-lsp-reoptimization:lsp-reoptimization-cfg/autoapprove-config"
self.method = "GET"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class LspConfirm():
''' doc: 2.4.8 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspStatistic/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspStatistic/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspStatistic/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-lsp-reoptimization:lsp-reoptimization-cfg/confirming-lsp-infos"
self.method = "GET"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class PceAprove_2():
''' doc: 2.4.9 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspStatistic/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspStatistic/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspStatistic/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-lsp-reoptimization:lsp-reoptimization-cfg/pceAprove"
self.method = "POST"
def dotan(self, req):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class LspStatistic():
''' doc: 2.4.11 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspStatistic/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspStatistic/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspStatistic/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-lsp-reoptimization:lsp-reoptimization-cfg/lsp-statistic"
self.method = "GET"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class NeCfg_All():
''' doc: 2.1.1 '''
def __init__(self):
self.url_scheme = conf.xget("api_neCfg_All/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neCfg_All/url/hostname", def_hostname)
self.url_port = conf.xget("api_neCfg_All/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-inventory:inventory-cfg/nes"
self.method = "GET"
def dotan(self):
klog.d(varfmt(self))
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class NeCfg_One():
''' doc: 2.1.5 '''
def __init__(self):
self.url_scheme = conf.xget("api_neCfg_One/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neCfg_One/url/hostname", def_hostname)
self.url_port = conf.xget("api_neCfg_One/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-inventory:inventory-cfg/nes/{neid}"
self.method = "GET"
def dotan(self, neid):
hdr = token.todic()
url_pathpat = self.url_pathpat.format(neid=neid)
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class NeOper_All():
''' doc: 2.1.2 '''
def __init__(self):
self.url_scheme = conf.xget("api_neOper_All/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neOper_All/url/hostname", def_hostname)
self.url_port = conf.xget("api_neOper_All/url/port", def_port)
self.url_pathpat = "/restconf/operational/huawei-ac-inventory:inventory-oper/nes"
self.method = "GET"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class NeOper_One():
''' doc: 2.1.6 '''
def __init__(self):
self.url_scheme = conf.xget("api_neOper_One/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neOper_One/url/hostname", def_hostname)
self.url_port = conf.xget("api_neOper_One/url/port", def_port)
self.url_pathpat = "/restconf/operational/huawei-ac-inventory:inventory-oper/nes/{neid}"
self.method = "GET"
def dotan(self, neid):
hdr = token.todic()
url_pathpat = self.url_pathpat.format(neid=neid)
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class LspInfos():
''' doc: 2.4.6 '''
def __init__(self):
self.url_scheme = conf.xget("api_lspInfos/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_lspInfos/url/hostname", def_hostname)
self.url_port = conf.xget("api_lspInfos/url/port", def_port)
self.url_pathpat = "/restconf/operational/huawei-ac-lsp-reoptimization:lsp-reoptimization-oper/lsp-infos"
self.method = "GET"
def dotan(self):
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class L3_LinkCfg_All():
''' doc: 2.2.1 '''
def __init__(self):
self.url_scheme = conf.xget("api_neCfg_All/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neCfg_All/url/hostname", def_hostname)
self.url_port = conf.xget("api_neCfg_All/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-network-te-topology:l3-topology-cfg/topologies/topology/4acbd130-846b-3536-a142-8a42d8a3c4b8/links"
self.method = "GET"
def dotan(self):
klog.d(varfmt(self))
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class L3_NodeCfg_All():
''' doc: 2.2.4 '''
def __init__(self):
self.url_scheme = conf.xget("api_neCfg_All/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neCfg_All/url/hostname", def_hostname)
self.url_port = conf.xget("api_neCfg_All/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-network-te-topology:l3-topology-cfg/topologies/topology/4acbd130-846b-3536-a142-8a42d8a3c4b8/nodes"
self.method = "GET"
def dotan(self):
klog.d(varfmt(self))
hdr = token.todic()
status, reason, res = hget(self.url_scheme, self.url_hostname, self.url_port, self.url_pathpat, self.method, None, hdr)
return 0 if status == 200 else -1, reason, res
class L3_Topo():
def __init__(self):
self.url_scheme = conf.xget("api_neCfg_All/url/scheme", def_scheme)
self.url_hostname = conf.xget("api_neCfg_All/url/hostname", def_hostname)
self.url_port = conf.xget("api_neCfg_All/url/port", def_port)
self.url_pathpat = "/restconf/config/huawei-ac-network-te-topology:l3-topology-cfg/topologies/topology/4acbd130-846b-3536-a142-8a42d8a3c4b8/topo"
self.method = "GET"
def dotan(self):
klog.d(varfmt(self))
err, msg, res = call(L3_NodeCfg_All)
if err:
return -1, "Get L3_NodeCfg_All Information failed", None
res = dmstr(res)
# set node map content
res_map_node = {}
if (res and 'node' in | |
IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# MeasurementLaterality
0x00240113L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# LossyImageCompressionMethod
0x00282114L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# ReferencedSeriesSequence
0x00081115L: {
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Presentation State', 'Spatial Registration', 'Spatial Fiducials', 'Instance Availability Notification', 'Image', 'Surface', 'Segmentation', 'Real World Value Mapping', 'Stereometric Relationship', 'Plan'],
'SURFACE SEGMENTATION IOD': ['Surface'],
'SEGMENTATION IOD': ['Segmentation'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Plan'],
'SPATIAL REGISTRATION IOD': ['Spatial Registration'],
'REAL WORLD VALUE MAPPING IOD': ['Real World Value Mapping'],
'STEREOMETRIC RELATIONSHIP IOD': ['Stereometric Relationship'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'INSTANCE AVAILABILITY NOTIFICATION IOD': ['Instance Availability Notification'],
},
# MIMETypeOfEncapsulatedDocument
0x00420012L: {
'ENCAPSULATED CDA IOD': ['Encapsulated Document'],
'ENCAPSULATED PDF IOD': ['Encapsulated Document'],
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Encapsulated Document', 'Implant Assembly'],
},
# FovealPointNormativeDataFlag
0x00240117L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# FovealPointProbabilityValue
0x00240118L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# DistanceReceptorPlaneToDetectorHousing
0x00189426L: {
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# UnspecifiedLateralityLensSequence
0x00460016L: {
'LENSOMETRY MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# BlendingLUT1Sequence
0x00281404L: {
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
None: ['Image'],
},
# ReferencedPatientSequence
0x00081120L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient', 'Modality Performed Procedure Step'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ProtocolName
0x00181030L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
None: ['Series'],
'SEGMENTATION IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE | |
<gh_stars>0
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hashing layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.engine import input_layer
from keras.engine import training
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing import preprocessing_test_utils
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class HashingTest(test_combinations.TestCase):
@parameterized.named_parameters(
("list", list),
("tuple", tuple),
("numpy", np.array),
("array_like", preprocessing_test_utils.ArrayLike),
)
def test_tensor_like_inputs(self, data_fn):
input_data = data_fn([0, 1, 2, 3, 4])
expected_output = [1, 0, 1, 0, 2]
layer = hashing.Hashing(num_bins=3)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
def test_hash_single_bin(self):
layer = hashing.Hashing(num_bins=1)
inp = np.asarray([["A"], ["B"], ["C"], ["D"], ["E"]])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = [["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ["omar", "stringer", "marlo", "wire", "skywalker"]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
def test_hash_sparse_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
def test_hash_sparse_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
def test_hash_sparse_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
def test_hash_sparse_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_hash_ragged_string_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[0, 0, 1, 0], [1, 0, 0]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_input_mask_value(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value="")
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value="omar")
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
empty_mask_output = empty_mask_layer(inp_data)
omar_mask_output = omar_mask_layer(inp_data)
# Outputs should be one more than test_hash_ragged_string_input_farmhash
# (the zeroth bin is now reserved for masks).
expected_output = [[1, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, empty_mask_output)
# 'omar' should map to 0.
expected_output = [[0, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, omar_mask_output)
def test_hash_ragged_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 0, 0, 2], [1, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_string_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp_data = tf.ragged.constant(
[
["omar", "stringer", "marlo", "wire"],
["marlo", "skywalker", "wire"],
],
dtype=tf.string,
)
out_data = layer(inp_data)
# Same hashed output as test_hash_dense_input_siphash
expected_output = [[0, 1, 0, 1], [0, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
out_data = layer_2(inp_data)
expected_output = [[1, 0, 1, 0], [1, 1, 0]]
self.assertAllEqual(expected_output, out_data)
out_t = layer_2(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 1, 0, 1], [2, 1, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = hashing.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = hashing.Hashing(num_bins=-1)
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=2, salt="string")
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = hashing.Hashing(num_bins=1, salt=tf.constant([133, 137]))
def test_one_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(1,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="one_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [1.0, 1.0, 1.0]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(3,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="multi_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [2.0, 2.0, 1.0]
expected_output_shape = [None, 3]
inputs = keras.Input(shape=(3,), dtype="int32")
layer = hashing.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
@parameterized.named_parameters(
("int32", tf.int32),
("int64", tf.int64),
)
def test_output_dtype(self, dtype):
input_data = keras.Input(batch_size=16, shape=(4,), dtype="string")
layer = hashing.Hashing(num_bins=3, dtype=dtype)
output = | |
max_sum = 0
for i in range(num_bins):
v = (edges[i] + edges[i + 1]) / 2
if histogram[i] > max_count:
max_count = histogram[i]
num_maxs = 1
max_sum = v
elif histogram[i] == max_count:
num_maxs += 1
max_sum += v
return max_sum / num_maxs
@njit(fastmath=True, cache=True)
def _long_stretch(X_binary, val):
last_val = 0
max_stretch = 0
for i in range(len(X_binary)):
if X_binary[i] != val or i == len(X_binary) - 1:
stretch = i - last_val
if stretch > max_stretch:
max_stretch = stretch
last_val = i
return max_stretch
@njit(fastmath=True, cache=True)
def _outlier_include(X):
total = 0
threshold = 0
for v in X:
if v >= 0:
total += 1
if v > threshold:
threshold = v
if threshold < 0.01:
return 0
num_thresholds = int(threshold / 0.01) + 1
means = np.zeros(num_thresholds)
dists = np.zeros(num_thresholds)
medians = np.zeros(num_thresholds)
for i in range(num_thresholds):
d = i * 0.01
count = 0
r = np.zeros(len(X))
for n in range(len(X)):
if X[n] >= d:
r[count] = n + 1
count += 1
if count == 0:
continue
diff = np.zeros(count - 1)
for n in range(len(diff)):
diff[n] = r[n + 1] - r[n]
means[i] = np.mean(diff) if len(diff) > 0 else 9999999999
dists[i] = len(diff) * 100 / total
medians[i] = np.median(r[:count]) / (len(X) / 2) - 1
mj = 0
fbi = num_thresholds - 1
for i in range(num_thresholds):
if dists[i] > 2:
mj = i
if means[i] == 9999999999:
fbi = num_thresholds - 1 - i
trim_limit = max(mj, fbi)
return np.median(medians[: trim_limit + 1])
def _autocorr(X, X_fft):
ca = np.fft.ifft(_multiply_complex_arr(X_fft))
return _get_acf(X, ca)
@njit(fastmath=True, cache=True)
def _multiply_complex_arr(X_fft):
c = np.zeros(len(X_fft), dtype=np.complex128)
for i, n in enumerate(X_fft):
c[i] = n * (n.real + 1j * -n.imag)
return c
@njit(fastmath=True, cache=True)
def _get_acf(X, ca):
acf = np.zeros(len(X))
if ca[0].real != 0:
for i in range(len(X)):
acf[i] = ca[i].real / ca[0].real
return acf
@njit(fastmath=True, cache=True)
def _summaries_welch_rect(X, centroid, X_fft):
new_length = int(len(X_fft) / 2) + 1
p = np.zeros(new_length)
pi2 = 2 * math.pi
p[0] = (np.power(_complex_magnitude(X_fft[0]), 2) / len(X)) / pi2
for i in range(1, new_length - 1):
p[i] = ((np.power(_complex_magnitude(X_fft[i]), 2) / len(X)) * 2) / pi2
p[new_length - 1] = (
np.power(_complex_magnitude(X_fft[new_length - 1]), 2) / len(X)
) / pi2
w = np.zeros(new_length)
a = 1.0 / len(X_fft)
for i in range(0, new_length):
w[i] = i * a * math.pi * 2
if centroid:
cs = np.zeros(new_length)
cs[0] = p[0]
for i in range(1, new_length):
cs[i] = cs[i - 1] + p[i]
threshold = cs[new_length - 1] / 2
for i in range(1, new_length):
if cs[i] > threshold:
return w[i]
return np.nan
else:
tau = int(np.floor(new_length / 5))
nsum = 0
for i in range(tau):
nsum += p[i]
return nsum * (w[1] - w[0])
@njit(fastmath=True, cache=True)
def _complex_magnitude(c):
return np.sqrt(c.real * c.real + c.imag * c.imag)
@njit(fastmath=True, cache=True)
def _local_simple_mean(X, train_length):
res = np.zeros(len(X) - train_length)
for i in range(len(res)):
nsum = 0
for n in range(train_length):
nsum += X[i + n]
res[i] = X[i + train_length] - nsum / train_length
return res
@njit(fastmath=True, cache=True)
def _ac_first_zero(X_ac):
for i in range(1, len(X_ac)):
if X_ac[i] <= 0:
return i
return len(X_ac)
@njit(fastmath=True, cache=True)
def _fluct_prop(X, og_length, dfa):
a = np.zeros(50, dtype=np.int_)
a[0] = 5
n_tau = 1
smin = 1.6094379124341003 # Math.log(5);
smax = np.log(og_length / 2)
inc = (smax - smin) / 49
for i in range(1, 50):
val = int(np.round(np.exp(smin + inc * i) + 0.000000000001))
if val != a[n_tau - 1]:
a[n_tau] = val
n_tau += 1
if n_tau < 12:
return np.nan
f = np.zeros(n_tau)
for i in range(n_tau):
tau = a[i]
buff_size = int(len(X) / tau)
lag = 0
if buff_size == 0:
buff_size = 1
lag = 1
buffer = np.zeros((buff_size, tau))
count = 0
for n in range(buff_size):
for j in range(tau - lag):
buffer[n][j] = X[count]
count += 1
d = np.zeros(tau)
for n in range(tau):
d[n] = n + 1
for n in range(buff_size):
c1, c2 = _linear_regression(d, buffer[n], tau, 0)
for j in range(tau):
buffer[n][j] = buffer[n][j] - (c1 * (j + 1) + c2)
if dfa:
for j in range(tau):
f[i] += buffer[n][j] * buffer[n][j]
else:
f[i] += np.power(np.max(buffer[n]) - np.min(buffer[n]), 2)
if dfa:
f[i] = np.sqrt(f[i] / (buff_size * tau))
else:
f[i] = np.sqrt(f[i] / buff_size)
log_a = np.zeros(n_tau)
log_f = np.zeros(n_tau)
for i in range(n_tau):
log_a[i] = np.log(a[i])
log_f[i] = np.log(f[i])
sserr = np.zeros(n_tau - 11)
for i in range(6, n_tau - 5):
c1_1, c1_2 = _linear_regression(log_a, log_f, i, 0)
c2_1, c2_2 = _linear_regression(log_a, log_f, n_tau - i + 1, i - 1)
sum1 = 0
for n in range(i):
sum1 += np.power(log_a[n] * c1_1 + c1_2 - log_f[n], 2)
sserr[i - 6] += np.sqrt(sum1)
sum2 = 0
for n in range(n_tau - i + 1):
sum2 += np.power(log_a[n + i - 1] * c2_1 + c2_2 - log_f[n + i - 1], 2)
sserr[i - 6] += np.sqrt(sum2)
return (np.argmin(sserr) + 6) / n_tau
@njit(fastmath=True, cache=True)
def _linear_regression(X, y, n, lag):
sumx = 0
sumx2 = 0
sumxy = 0
sumy = 0
for i in range(lag, n + lag):
sumx += X[i]
sumx2 += X[i] * X[i]
sumxy += X[i] * y[i]
sumy += y[i]
denom = n * sumx2 - sumx * sumx
if denom == 0:
return 0, 0
return (n * sumxy - sumx * sumy) / denom, (sumy * sumx2 - sumx * sumxy) / denom
@njit(fastmath=True, cache=True)
def _spline_fit(X):
breaks = np.array([0, len(X) / 2 - 1, len(X) - 1])
h0 = np.array([breaks[1] - breaks[0], breaks[2] - breaks[1]])
h_copy = np.array([h0[0], h0[1], h0[0], h0[1]])
hl = np.array([h_copy[3], h_copy[2], h_copy[1]])
hr = np.array([h_copy[0], h_copy[1], h_copy[2]])
hlCS = np.zeros(3)
hlCS[0] = hl[0]
for i in range(1, 3):
hlCS[i] = hlCS[i - 1] + hl[i]
bl = np.zeros(3)
for i in range(3):
bl[i] = breaks[0] - hlCS[i]
hrCS = np.zeros(3)
hrCS[0] = hr[0]
for i in range(1, 3):
hrCS[i] = hrCS[i - 1] + hr[i]
br = np.zeros(3)
for i in range(3):
br[i] = breaks[2] - hrCS[i]
breaksExt = np.zeros(9)
for i in range(3):
breaksExt[i] = bl[2 - i]
breaksExt[i + 3] = breaks[i]
breaksExt[i + 6] = br[i]
hExt = np.zeros(8)
for i in range(8):
hExt[i] = breaksExt[i + 1] - breaksExt[i]
coeffs = np.zeros((32, 4))
for i in range(0, 32, 4):
coeffs[i][0] = 1
ii = np.zeros((4, 8), dtype=np.int32)
for i in range(8):
ii[0][i] = i
ii[1][i] = min(1 + i, 7)
ii[2][i] = min(2 + i, 7)
ii[3][i] = min(3 + i, 7)
H = np.zeros(32)
for i in range(32):
H[i] = hExt[ii[i % 4][int(i / 4)]]
for k in range(1, 4):
for j in range(k):
for u in range(32):
coeffs[u][j] *= H[u] / (k - j)
Q = np.zeros((4, 8))
for u in range(32):
for m in range(4):
Q[u % 4][int(u / 4)] += coeffs[u][m]
for u in range(8):
for m in range(1, 4):
Q[m][u] += Q[m - 1][u]
for u in range(32):
if u % 4 > 0:
coeffs[u][k] = Q[u % 4 - 1][int(u / 4)]
fmax = np.zeros(32)
for i in range(8):
for j in range(4):
fmax[i * 4 + j] = Q[3][i]
for j in range(k + 1):
for u in range(32):
coeffs[u][j] /= fmax[u]
for i in range(29):
for j in range(k + 1):
coeffs[i][j] -= coeffs[3 + i][j]
for i in range(0, 32, 4):
coeffs[i][k] = 0
scale = np.ones(32)
for k in range(3):
for i in range(32):
scale[i] /= H[i]
for i in range(32):
coeffs[i][3 - (k + 1)] *= scale[i]
jj = np.zeros((4, 2), dtype=np.int32)
for i in range(4):
for j in range(2):
if i == 0:
jj[i][j] = 4 * (1 + j)
else:
jj[i][j] = 3
for i in range(1, 4):
for j in range(2):
jj[i][j] += jj[i - 1][j]
coeffs_out = np.zeros((8, 4))
for i in range(8):
coeffs_out[i] = coeffs[jj[i % 4][int(i / 4)] - 1]
xsB = np.zeros(len(X) * 4)
indexB = np.zeros(len(xsB), dtype=np.int32)
| |
<reponame>joommf/joommfutils
import keyword
import numbers
import numpy as np
class Descriptor:
"""Descriptor base class from which all descriptors in
``ubermagutil.typesystem`` are derived.
Before setting the attribute value of a decorated class is allowed, certain
type and value checks are performed. If they are not according to the
specifications in the ``__set__`` method (defined in the derived class),
``TypeError`` or ``ValueError`` is raised. If ``const=True`` is passed when
the class is instantiated, no value changes are allowed after the initial
assignment. Deleting attributes of a decorated class is never allowed.
Parameters
----------
name : str
Attribute name. It must be a valid Python variable name. Defaults to
``None``.
const : bool, optional
If ``const=True``, the attribute of the decorated class is constant and
its value cannot be changed after the first set.
Example
-------
1. Deriving a descriptor class from the base class ``Descriptor``, which
only allows positive integer values.
>>> import ubermagutil.typesystem as ts
...
>>> class PositiveInt(ts.Descriptor):
... def __set__(self, instance, value):
... if not isinstance(value, int):
... raise TypeError('Allowed only type(value) == int.')
... if value < 0:
... raise ValueError('Allowed only value >= 0.')
... super().__set__(instance, value)
...
>>> @ts.typesystem(myattribute=PositiveInt())
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=5)
>>> dc.myattribute
5
>>> dc.myattribute = 101 # valid set
>>> dc.myattribute
101
>>> dc.myattribute = -1 # invalid set - negative value
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = 3.14 # invalid set - float value
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute # value has not beed affected by invalid sets
101
"""
def __init__(self, name=None, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
def __set__(self, instance, value):
"""If ``self.const=True``, changing the value of a decorated class
attribute after the initial set is not allowed.
Raises
------
AttributeError
If changing the value of a decorated class attribute is attempted.
Example
-------
1. Changing the value of a constant decorated class attribute.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Descriptor(const=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute="<NAME>")
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = '<NAME>'
Traceback (most recent call last):
...
AttributeError: ...
"""
if hasattr(self, "const"):
if not self.const or self.name not in instance.__dict__:
instance.__dict__[self.name] = value
else:
msg = f"Changing {self.name} not allowed."
raise AttributeError(msg)
else:
instance.__dict__[self.name] = value
def __delete__(self, instance):
"""Deleting the decorated class attribute is never allowed and
``AttributeError`` is raised.
Raises
------
AttributeError
If deleting decorated class attribute is attempted.
Example
-------
1. Deleting an attribute of a decorated class.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Descriptor())
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute="<NAME>")
>>> dc.myattribute
'<NAME>'
>>> del dc.myattribute
Traceback (most recent call last):
...
AttributeError: ...
"""
msg = f"Deleting {self.name} not allowed."
raise AttributeError(msg)
class Typed(Descriptor):
"""Descriptor allowing setting attributes only with values of a certain
type.
Parameters
----------
expected_type : type
Allowed type of value.
allow_none : bool
If ``True``, the value can be set with ``None``.
Raises
------
TypeError
If ``type(value) != expected_type``.
Example
-------
1. Usage of ``Typed`` descriptor.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Typed(expected_type=str))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute='<NAME>')
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = '<NAME>' # valid set
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = 3.14 # invalid set
Traceback (most recent call last):
...
TypeError: ...
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "allow_none"):
if self.allow_none and value is None:
super().__set__(instance, value)
return None
if not isinstance(value, self.expected_type):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
super().__set__(instance, value)
class Scalar(Descriptor):
"""Descriptor allowing setting attributes only with scalars
(``numbers.Real``).
Parameters
----------
expected_type : int or float type, optional
Allowed type of ``value``. It should be a subset of ``numbers.Real``
(e.g. ``int`` or ``float``).
positive : bool, optional
If ``positive=True``, value must be positive (>0).
unsigned : bool, optional
If ``unsigned=True``, value must be unsigned (>=0).
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Raises
------
TypeError
If ``type(value)`` is neither ``numbers.Real`` nor ``expected_type``
(if passed).
ValueError
If ``value < 0`` and ``unsigned=True`` is passed or ``value <= 0`` and
``positive=True`` is passed.
Example
-------
1. Usage of ``Scalar`` descriptor for defining a positive integer.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Scalar(expected_type=int, positive=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=5)
>>> dc.myattribute
5
>>> dc.myattribute = 10 # valid set
>>> dc.myattribute
10
>>> dc.myattribute = 3.14 # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute = 0 # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute # the value was not affected by invalid sets
10
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if not isinstance(value, numbers.Real):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if hasattr(self, "expected_type"):
if not isinstance(value, self.expected_type):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if hasattr(self, "unsigned"):
if self.unsigned and value < 0:
msg = f"Cannot set {self.name} with value = {value} < 0."
raise ValueError(msg)
if hasattr(self, "positive"):
if self.positive and value <= 0:
msg = f"Cannot set {self.name} with value = {value} <= 0."
raise ValueError(msg)
super().__set__(instance, value)
class Vector(Descriptor):
"""Descriptor allowing setting attributes only with vectors (``list``,
``tuple``, or ``numpy.ndarray``), whose elements are of ``numbers.Real``
type.
Parameters
----------
component_type : int or float type, optional
Type of the vector components. It should be a subset of
``numbers.Real`` (``int``, ``float``).
size : int, optional
Size (length, number of elements) of the vector.
positive : bool, optional
If ``positive=True``, values of all vector elements must be positive
(>0).
unsigned : bool, optional
If ``unsigned=True``, values of all vector components must be unsigned
(>=0).
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Raises
------
TypeError
If the ``type(value)`` is not ``list``, ``tuple``, or ``numpy.ndarray``
or if the type of vector components is neither ``numbers.Real`` nor
``expected_type`` (if passed).
ValueError
If vector component value is ``value < 0`` and ``unsigned=True`` or
``value <= 0`` and ``positive=True``.
Example
-------
1. Usage of ``Vector`` descriptor for defining a three-dimensional vector,
whose components myattribute positive integer components.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Vector(size=3, component_type=int,
... positive=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=(1, 2, 12))
>>> dc.myattribute
(1, 2, 12)
>>> dc.myattribute = (10, 11, 12) # valid set
>>> dc.myattribute
(10, 11, 12)
>>> dc.myattribute = (11, 12) # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = (0, 1, 2) # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = (1, 3.14, 2) # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute # the value was not affected by invalid sets
(10, 11, 12)
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if not isinstance(value, (tuple, list, np.ndarray)):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if not all(isinstance(i, numbers.Real) for i in value):
msg = "Allowed only type(value[i]) == numbers.Real."
raise TypeError(msg)
if hasattr(self, "size"):
if len(value) != self.size:
msg = f"Cannot set {self.name} with length {len(value)} value."
raise ValueError(msg)
if hasattr(self, | |
any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/groups/{groupId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"groupId": group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_identity_provider(self, identity_provider_id, **kwargs):
"""
Deletes the specified identity provider. The identity provider must not have
any group mappings (see :class:`IdpGroupMapping`).
:param str identity_provider_id: (required)
The OCID of the identity provider.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/identityProviders/{identityProviderId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_identity_provider got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_idp_group_mapping(self, identity_provider_id, mapping_id, **kwargs):
"""
Deletes the specified group mapping.
:param str identity_provider_id: (required)
The OCID of the identity provider.
:param str mapping_id: (required)
The OCID of the group mapping.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/identityProviders/{identityProviderId}/groupMappings/{mappingId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_idp_group_mapping got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id,
"mappingId": mapping_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):
"""
Deletes the specified MFA TOTP device for the specified user.
:param str user_id: (required)
The OCID of the user.
:param str mfa_totp_device_id: (required)
The OCID of the MFA TOTP device.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_network_source(self, network_source_id, **kwargs):
"""
Deletes the specified network source
:param str network_source_id: (required)
The OCID of the network source.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/networkSources/{networkSourceId}"
method = "DELETE"
# Don't | |
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from . import (
assert_array_equal,
assert_chunks_equal,
assert_equal,
assert_identical,
raise_if_dask_computes,
requires_cftime,
requires_dask,
)
class TestDatetimeAccessor:
@pytest.fixture(autouse=True)
def setup(self):
nt = 100
data = np.random.rand(10, 10, nt)
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
self.times = pd.date_range(start="2000/01/01", freq="H", periods=nt)
self.data = xr.DataArray(
data,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
self.times_arr = np.random.choice(self.times, size=(10, 10, nt))
self.times_data = xr.DataArray(
self.times_arr,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
@pytest.mark.parametrize(
"field",
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
"nanosecond",
"week",
"weekofyear",
"dayofweek",
"weekday",
"dayofyear",
"quarter",
"date",
"time",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
],
)
def test_field_access(self, field) -> None:
if LooseVersion(pd.__version__) >= "1.1.0" and field in ["week", "weekofyear"]:
data = self.times.isocalendar()["week"]
else:
data = getattr(self.times, field)
expected = xr.DataArray(data, name=field, coords=[self.times], dims=["time"])
if field in ["week", "weekofyear"]:
with pytest.warns(
FutureWarning, match="dt.weekofyear and dt.week have been deprecated"
):
actual = getattr(self.data.time.dt, field)
else:
actual = getattr(self.data.time.dt, field)
assert_equal(expected, actual)
@pytest.mark.parametrize(
"field, pandas_field",
[
("year", "year"),
("week", "week"),
("weekday", "day"),
],
)
def test_isocalendar(self, field, pandas_field) -> None:
if LooseVersion(pd.__version__) < "1.1.0":
with pytest.raises(
AttributeError, match=r"'isocalendar' not available in pandas < 1.1.0"
):
self.data.time.dt.isocalendar()[field]
return
# pandas isocalendar has dtypy UInt32Dtype, convert to Int64
expected = pd.Int64Index(getattr(self.times.isocalendar(), pandas_field))
expected = xr.DataArray(
expected, name=field, coords=[self.times], dims=["time"]
)
actual = self.data.time.dt.isocalendar()[field]
assert_equal(expected, actual)
def test_strftime(self) -> None:
assert (
"2000-01-01 01:00:00" == self.data.time.dt.strftime("%Y-%m-%d %H:%M:%S")[1]
)
def test_not_datetime_type(self) -> None:
nontime_data = self.data.copy()
int_data = np.arange(len(self.data.time)).astype("int8")
nontime_data = nontime_data.assign_coords(time=int_data)
with pytest.raises(TypeError, match=r"dt"):
nontime_data.time.dt
@pytest.mark.filterwarnings("ignore:dt.weekofyear and dt.week have been deprecated")
@requires_dask
@pytest.mark.parametrize(
"field",
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
"nanosecond",
"week",
"weekofyear",
"dayofweek",
"weekday",
"dayofyear",
"quarter",
"date",
"time",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
],
)
def test_dask_field_access(self, field) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, field)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, field)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual.compute(), expected.compute())
@requires_dask
@pytest.mark.parametrize(
"field",
[
"year",
"week",
"weekday",
],
)
def test_isocalendar_dask(self, field) -> None:
import dask.array as da
if LooseVersion(pd.__version__) < "1.1.0":
with pytest.raises(
AttributeError, match=r"'isocalendar' not available in pandas < 1.1.0"
):
self.data.time.dt.isocalendar()[field]
return
expected = getattr(self.times_data.dt.isocalendar(), field)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = dask_times_2d.dt.isocalendar()[field]
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual.compute(), expected.compute())
@requires_dask
@pytest.mark.parametrize(
"method, parameters",
[
("floor", "D"),
("ceil", "D"),
("round", "D"),
("strftime", "%Y-%m-%d %H:%M:%S"),
],
)
def test_dask_accessor_method(self, method, parameters) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, method)(parameters)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, method)(parameters)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual.compute(), expected.compute())
def test_seasons(self) -> None:
dates = pd.date_range(start="2000/01/01", freq="M", periods=12)
dates = xr.DataArray(dates)
seasons = xr.DataArray(
[
"DJF",
"DJF",
"MAM",
"MAM",
"MAM",
"JJA",
"JJA",
"JJA",
"SON",
"SON",
"SON",
"DJF",
]
)
assert_array_equal(seasons.values, dates.dt.season.values)
@pytest.mark.parametrize(
"method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")]
)
def test_accessor_method(self, method, parameters) -> None:
dates = pd.date_range("2014-01-01", "2014-05-01", freq="H")
xdates = xr.DataArray(dates, dims=["time"])
expected = getattr(dates, method)(parameters)
actual = getattr(xdates.dt, method)(parameters)
assert_array_equal(expected, actual)
class TestTimedeltaAccessor:
@pytest.fixture(autouse=True)
def setup(self):
nt = 100
data = np.random.rand(10, 10, nt)
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
self.times = pd.timedelta_range(start="1 day", freq="6H", periods=nt)
self.data = xr.DataArray(
data,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
self.times_arr = np.random.choice(self.times, size=(10, 10, nt))
self.times_data = xr.DataArray(
self.times_arr,
coords=[lons, lats, self.times],
dims=["lon", "lat", "time"],
name="data",
)
def test_not_datetime_type(self) -> None:
nontime_data = self.data.copy()
int_data = np.arange(len(self.data.time)).astype("int8")
nontime_data = nontime_data.assign_coords(time=int_data)
with pytest.raises(TypeError, match=r"dt"):
nontime_data.time.dt
@pytest.mark.parametrize(
"field", ["days", "seconds", "microseconds", "nanoseconds"]
)
def test_field_access(self, field) -> None:
expected = xr.DataArray(
getattr(self.times, field), name=field, coords=[self.times], dims=["time"]
)
actual = getattr(self.data.time.dt, field)
assert_equal(expected, actual)
@pytest.mark.parametrize(
"method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")]
)
def test_accessor_methods(self, method, parameters) -> None:
dates = pd.timedelta_range(start="1 day", end="30 days", freq="6H")
xdates = xr.DataArray(dates, dims=["time"])
expected = getattr(dates, method)(parameters)
actual = getattr(xdates.dt, method)(parameters)
assert_array_equal(expected, actual)
@requires_dask
@pytest.mark.parametrize(
"field", ["days", "seconds", "microseconds", "nanoseconds"]
)
def test_dask_field_access(self, field) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, field)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, field)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual, expected)
@requires_dask
@pytest.mark.parametrize(
"method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")]
)
def test_dask_accessor_method(self, method, parameters) -> None:
import dask.array as da
expected = getattr(self.times_data.dt, method)(parameters)
dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50))
dask_times_2d = xr.DataArray(
dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data"
)
with raise_if_dask_computes():
actual = getattr(dask_times_2d.dt, method)(parameters)
assert isinstance(actual.data, da.Array)
assert_chunks_equal(actual, dask_times_2d)
assert_equal(actual.compute(), expected.compute())
_CFTIME_CALENDARS = [
"365_day",
"360_day",
"julian",
"all_leap",
"366_day",
"gregorian",
"proleptic_gregorian",
]
_NT = 100
@pytest.fixture(params=_CFTIME_CALENDARS)
def calendar(request):
return request.param
@pytest.fixture()
def times(calendar):
import cftime
return cftime.num2date(
np.arange(_NT),
units="hours since 2000-01-01",
calendar=calendar,
only_use_cftime_datetimes=True,
)
@pytest.fixture()
def data(times):
data = np.random.rand(10, 10, _NT)
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
return xr.DataArray(
data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@pytest.fixture()
def times_3d(times):
lons = np.linspace(0, 11, 10)
lats = np.linspace(0, 20, 10)
times_arr = np.random.choice(times, size=(10, 10, _NT))
return xr.DataArray(
times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@requires_cftime
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_field_access(data, field) -> None:
if field == "dayofyear" or field == "dayofweek":
pytest.importorskip("cftime", minversion="1.0.2.1")
result = getattr(data.time.dt, field)
expected = xr.DataArray(
getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field),
name=field,
coords=data.time.coords,
dims=data.time.dims,
)
assert_equal(result, expected)
@requires_cftime
def test_isocalendar_cftime(data) -> None:
with pytest.raises(
AttributeError, match=r"'CFTimeIndex' object has no attribute 'isocalendar'"
):
data.time.dt.isocalendar()
@requires_cftime
def test_date_cftime(data) -> None:
with pytest.raises(
AttributeError,
match=r"'CFTimeIndex' object has no attribute `date`. Consider using the floor method instead, for instance: `.time.dt.floor\('D'\)`.",
):
data.time.dt.date()
@requires_cftime
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_cftime_strftime_access(data) -> None:
"""compare cftime formatting against datetime formatting"""
date_format = "%Y%m%d%H"
result = data.time.dt.strftime(date_format)
datetime_array = xr.DataArray(
xr.coding.cftimeindex.CFTimeIndex(data.time.values).to_datetimeindex(),
name="stftime",
coords=data.time.coords,
dims=data.time.dims,
)
expected = datetime_array.dt.strftime(date_format)
assert_equal(result, expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_dask_field_access_1d(data, field) -> None:
import dask.array as da
if field == "dayofyear" or field == "dayofweek":
pytest.importorskip("cftime", minversion="1.0.2.1")
expected = xr.DataArray(
getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field),
name=field,
dims=["time"],
)
times = xr.DataArray(data.time.values, dims=["time"]).chunk({"time": 50})
result = getattr(times.dt, field)
assert isinstance(result.data, da.Array)
assert result.chunks == times.chunks
assert_equal(result.compute(), expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
def test_dask_field_access(times_3d, data, field) -> None:
import dask.array as da
if field == "dayofyear" or field == "dayofweek":
pytest.importorskip("cftime", minversion="1.0.2.1")
expected = xr.DataArray(
getattr(
xr.coding.cftimeindex.CFTimeIndex(times_3d.values.ravel()), field
).reshape(times_3d.shape),
name=field,
coords=times_3d.coords,
dims=times_3d.dims,
)
times_3d = times_3d.chunk({"lon": 5, "lat": 5, "time": 50})
result = getattr(times_3d.dt, field)
assert isinstance(result.data, da.Array)
assert result.chunks == times_3d.chunks
assert_equal(result.compute(), expected)
@pytest.fixture()
def cftime_date_type(calendar):
from .test_coding_times import _all_cftime_date_types
return _all_cftime_date_types()[calendar]
@requires_cftime
def test_seasons(cftime_date_type) -> None:
dates = xr.DataArray(
np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)])
)
seasons = xr.DataArray(
[
"DJF",
"DJF",
"MAM",
"MAM",
"MAM",
"JJA",
"JJA",
"JJA",
"SON",
"SON",
"SON",
"DJF",
]
)
assert_array_equal(seasons.values, dates.dt.season.values)
@pytest.fixture
def cftime_rounding_dataarray(cftime_date_type):
return xr.DataArray(
[
[cftime_date_type(1, 1, 1, 1), cftime_date_type(1, 1, 1, 15)],
[cftime_date_type(1, 1, 1, 23), cftime_date_type(1, 1, 2, 1)],
]
)
@requires_cftime
@requires_dask
@pytest.mark.parametrize("use_dask", [False, True])
def test_cftime_floor_accessor(
cftime_rounding_dataarray, cftime_date_type, use_dask
) -> None:
import dask.array as da
freq = "D"
expected = xr.DataArray(
[
[cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 1, 0)],
[cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)],
],
name="floor",
)
if use_dask:
chunks = {"dim_0": 1}
# Currently a compute is done to inspect a single value of the array
# if it is of object dtype to check if it is a cftime.datetime (if not
# we raise an error when using the dt accessor).
with raise_if_dask_computes(max_computes=1):
result = cftime_rounding_dataarray.chunk(chunks).dt.floor(freq)
expected = expected.chunk(chunks)
assert isinstance(result.data, da.Array)
assert result.chunks == expected.chunks
else:
result = cftime_rounding_dataarray.dt.floor(freq)
assert_identical(result, expected)
@requires_cftime
@requires_dask
@pytest.mark.parametrize("use_dask", [False, True])
def test_cftime_ceil_accessor(
cftime_rounding_dataarray, cftime_date_type, use_dask
) -> None:
import dask.array as da
freq = "D"
expected = xr.DataArray(
[
[cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)],
[cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 3, 0)],
],
name="ceil",
)
if use_dask:
chunks = {"dim_0": 1}
# Currently a compute is done to inspect a single value of the array
# if it is of object | |
# -*- coding: utf-8 -*-
"""
testerlib.controllers.form_params_controller
This file was automatically generated for Stamplay by APIMATIC BETA v2.0 on 08/03/2016
"""
from testerlib.controllers.base_controller import *
from testerlib.models.server_response import ServerResponse
class FormParamsController(BaseController):
"""A Controller to access Endpoints in the testerlib API."""
def __init__(self, http_client = None, http_call_back = None):
"""Constructor which allows a different HTTP client for this controller."""
BaseController.__init__(self, http_client, http_call_back)
def send_long(self,
value):
"""Does a POST request to /form/number.
TODO: type endpoint description here.
Args:
value (long|int): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if value == None:
raise ValueError("Required parameter 'value' cannot be None.")
# The base uri for api requests
_query_builder = Configuration.BASE_URI
# Prepare query string for API call
_query_builder += '/form/number'
# Validate and preprocess url
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'user-agent': 'Stamplay SDK',
'accept': 'application/json'
}
# Prepare form parameters
_form_parameters = {
'value': value
}
# Form encode parameters.
_form_parameters = APIHelper.form_encode_parameters(_form_parameters)
# Prepare the API call.
_http_request = self.http_client.post(_query_url, headers=_headers, parameters=_form_parameters)
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(_http_request)
# Invoke the API call to fetch the response.
_response = self.http_client.execute_as_string(_http_request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(_response)
# Endpoint error handling using HTTP status codes.
if _response.status_code == 404:
return None
# Global error handling using HTTP status codes.
self.validate_response(_response)
# Return appropriate type
return APIHelper.json_deserialize(_response.raw_body, ServerResponse.from_dictionary)
def send_integer_array(self,
integers):
"""Does a POST request to /form/number.
TODO: type endpoint description here.
Args:
integers (list of int): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if integers == None:
raise ValueError("Required parameter 'integers' cannot be None.")
# The base uri for api requests
_query_builder = Configuration.BASE_URI
# Prepare query string for API call
_query_builder += '/form/number'
# Process optional query parameters
_query_parameters = {
'array': True
}
# Validate and preprocess url
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'user-agent': 'Stamplay SDK',
'accept': 'application/json'
}
# Prepare form parameters
_form_parameters = {
'integers': integers
}
# Form encode parameters.
_form_parameters = APIHelper.form_encode_parameters(_form_parameters)
# Prepare the API call.
_http_request = self.http_client.post(_query_url, headers=_headers, query_parameters=_query_parameters, parameters=_form_parameters)
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(_http_request)
# Invoke the API call to fetch the response.
_response = self.http_client.execute_as_string(_http_request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(_response)
# Endpoint error handling using HTTP status codes.
if _response.status_code == 404:
return None
# Global error handling using HTTP status codes.
self.validate_response(_response)
# Return appropriate type
return APIHelper.json_deserialize(_response.raw_body, ServerResponse.from_dictionary)
def send_string_array(self,
strings):
"""Does a POST request to /form/string.
TODO: type endpoint description here.
Args:
strings (list of string): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if strings == None:
raise ValueError("Required parameter 'strings' cannot be None.")
# The base uri for api requests
_query_builder = Configuration.BASE_URI
# Prepare query string for API call
_query_builder += '/form/string'
# Process optional query parameters
_query_parameters = {
'array': True
}
# Validate and preprocess url
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'user-agent': 'Stamplay SDK',
'accept': 'application/json'
}
# Prepare form parameters
_form_parameters = {
'strings': strings
}
# Form encode parameters.
_form_parameters = APIHelper.form_encode_parameters(_form_parameters)
# Prepare the API call.
_http_request = self.http_client.post(_query_url, headers=_headers, query_parameters=_query_parameters, parameters=_form_parameters)
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(_http_request)
# Invoke the API call to fetch the response.
_response = self.http_client.execute_as_string(_http_request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(_response)
# Endpoint error handling using HTTP status codes.
if _response.status_code == 404:
return None
# Global error handling using HTTP status codes.
self.validate_response(_response)
# Return appropriate type
return APIHelper.json_deserialize(_response.raw_body, ServerResponse.from_dictionary)
def send_model(self,
model):
"""Does a POST request to /form/model.
TODO: type endpoint description here.
Args:
model (Employee): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if model == None:
raise ValueError("Required parameter 'model' cannot be None.")
# The base uri for api requests
_query_builder = Configuration.BASE_URI
# Prepare query string for API call
_query_builder += '/form/model'
# Validate and preprocess url
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'user-agent': 'Stamplay SDK',
'accept': 'application/json'
}
# Prepare form parameters
_form_parameters = {
'model': model
}
# Form encode parameters.
_form_parameters = APIHelper.form_encode_parameters(_form_parameters)
# Prepare the API call.
_http_request = self.http_client.post(_query_url, headers=_headers, parameters=_form_parameters)
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(_http_request)
# Invoke the API call to fetch the response.
_response = self.http_client.execute_as_string(_http_request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(_response)
# Endpoint error handling using HTTP status codes.
if _response.status_code == 404:
return None
# Global error handling using HTTP status codes.
self.validate_response(_response)
# Return appropriate type
return APIHelper.json_deserialize(_response.raw_body, ServerResponse.from_dictionary)
def send_model_array(self,
models):
"""Does a POST request to /form/model.
TODO: type endpoint description here.
Args:
models (list of Employee): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if models == None:
raise ValueError("Required parameter 'models' cannot be None.")
# The base uri for api requests
_query_builder = Configuration.BASE_URI
# Prepare query string for API call
_query_builder += '/form/model'
# Process optional query parameters
_query_parameters = {
'array': True
}
# Validate and preprocess url
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'user-agent': 'Stamplay SDK',
'accept': 'application/json'
}
# Prepare form parameters
_form_parameters = {
'models': models
}
# Form encode parameters.
_form_parameters = APIHelper.form_encode_parameters(_form_parameters)
# Prepare the API call.
_http_request = self.http_client.post(_query_url, headers=_headers, query_parameters=_query_parameters, parameters=_form_parameters)
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(_http_request)
# Invoke the API call to fetch the response.
_response = self.http_client.execute_as_string(_http_request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(_response)
# Endpoint error handling using HTTP status codes.
if _response.status_code == 404:
return None
# Global error handling using HTTP status codes.
self.validate_response(_response)
# Return appropriate type
return APIHelper.json_deserialize(_response.raw_body, ServerResponse.from_dictionary)
def send_file(self,
file):
"""Does a POST request to /form/file.
TODO: type endpoint description here.
Args:
file (string): TODO: type description here. Example:
Returns:
ServerResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
if file == None:
raise ValueError("Required parameter 'file' | |
float
ancho efectivo del elemento no rigidizado bajo compresion uniforme
midC: diccionario,
calculos intermedios.
Raises
------
none
Test
----
>>> b, midC = sec2_3_1(w= 50, t= 1 , f= 5, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 50.00 | esbeltez: 0.37 | rho: 1.00
>>> b, midC = sec2_3_1(w= 50, t= 1 , f= 200, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 19.27 | esbeltez: 2.35 | rho: 0.39
'''
b_eff, midC = sec2_2_1(w= w, t= t, f=f, E= E, k= k)
return b_eff, midC
def sec2_3_2(w, t, f3, E, k = 0.5):
'''Unstiffened Elements and Edge Stiffeners with Stress Gradient. Load Capacity Determination or Deflection Determination.
Parameters
----------
w: float,
ancho plano del elemento (ver figura 3 - ASCE 8).
t: float,
espsesor del elemento.
f3: float
tension sobre el elemento (ver figura 5 - ASCE 8).
E: float,
modulo de elasticidad.
k: float,
coeficiente de pandeo en placas para el elemento en consideracion.
Returns
-------
b_eff: float,
ancho efectivo del elemento no rigidizado bajo compresion uniforme.
midC: diccionario,
calculos intermedios.
Raises
------
none
Test
----
>>> b, midC = sec2_2_1(w= 50, t= 1, f= 20, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 50.00 | esbeltez: 0.26 | rho: 1.00
>>> b, midC = sec2_2_1(w= 50, t= 1 , f= 200, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 44.22 | esbeltez: 0.83 | rho: 0.88
'''
b, midC = sec2_2_1(w=w, t=t, f=f3, E=E, k=k)
return b, midC
# 2.4 EFFECTIVE WIDTHS OF ELEMENTS WITH EDGE STIFFENERS OR ONE INTERMEDIATE STIFFENERS
def sec2_4_1():
print('Seccion 2.4.1 No implementada.')
raise NotImplementedError
def sec2_4_2(E0, f, w, t, d, r_out, theta = 90, stiff = 'SL'):
'''Uniformly Compressed Elements with Edge Stiffener. Load Capacity or Deflection Determiation.
Parameters
----------
E0: float,
modulo de elasticidad inicial.
f: float,
tension en el elemento.
w: float,
ancho del elemento sin tener en cuenta las curvaturas (ver figura 5 - ASCE 8).
t: float
espesor del elemento.
d: float,
ancho del rigidizador sin tener en cuenta la curvatura (ver figura 5 - ASCE 8).
ds_prima: float,
ancho efectivo del rigidizador calculado segun seccion 2.3.1 (ver figura 5 - ASCE 8).
r_out: float,
radio externo de la curvatura del rigidizador.
theta: float,
angulo de inclinacion del rigidizador de labio simple.
stiff: string,
clase de rigidizador (labio simple u otro).
Returns
-------
b: float,
ancho efectivo del elemento.
midC: diccionario,
calculos intermedios.
Raises
------
none
Tests
-----
# Ejemplo 18.1 - I-section
>>> b, midC = sec2_4_2(E0=27000, f=23.52, w=1.855, t=0.135, d=0.498, r_out=3/16+0.135, theta=90, stiff='SL')
>>> print('b: {:{fmt}} | S: {m[S]:{fmt}} | Is: {m[Is]:{fmt}} | Ia: {m[Ia]:{fmt}} | As: {m[As]:{fmt}} | As_prima: {m[As_prima]:{fmt}} | ds: {m[ds]:{fmt}} | ds_prima: {m[ds_prima]:{fmt}} | k: {m[k]:{fmt}}'.format(b, m = midC, fmt = '.5f'))
b: 1.85500 | S: 43.36838 | Is: 0.00139 | Ia: 0.00000 | As: 0.06723 | As_prima: 0.06723 | ds: 0.49800 | ds_prima: 0.49800 | k: 0.50000
# Ejemplo 16.1 - C-section with wide flanges - Diferencia en resultado de Ia por rendondeo: Ia_referencia=0.000842
>>> b, midC = sec2_4_2(E0=27000, f=19.92, w=2.914, t=0.105, d=0.607, r_out=3/16+0.105, theta=90, stiff='SL')
>>> print('b: {:{fmt2}} | S: {m[S]:{fmt2}} | Is: {m[Is]:{fmt5}} | Ia: {m[Ia]:{fmt5}} | As: {m[As]:{fmt5}} | As_prima: {m[As_prima]:{fmt5}} | ds: {m[ds]:{fmt2}} | ds_prima: {m[ds_prima]:{fmt2}} | k: {m[k]:{fmt2}}'.format(b, m = midC, fmt2 = '.2f', fmt5 = '.6f'))
b: 2.91 | S: 47.12 | Is: 0.001957 | Ia: 0.000863 | As: 0.063735 | As_prima: 0.063735 | ds: 0.61 | ds_prima: 0.61 | k: 3.71
# Ejemplo CASE III
>>> b, midC = sec2_4_2(E0=27000, f=150.0, w=3.0, t=0.135, d=0.498, r_out=3/16+0.135, theta=90, stiff='SL')
>>> print('b: {:{fmt}} | Is: {m[Is]:{fmt}} | Ia: {m[Ia]:{fmt}} | As: {m[As]:{fmt}} | As_prima: {m[As_prima]:{fmt}} | ds: {m[ds]:{fmt}} | ds_prima: {m[ds_prima]:{fmt}} | k: {m[k]:{fmt}}'.format(b, m = midC, fmt = '.5f'))
b: 1.76703 | Is: 0.00139 | Ia: 0.05109 | As: 0.00183 | As_prima: 0.06723 | ds: 0.01354 | ds_prima: 0.49800 | k: 1.46826
'''
S = E_2_4_e1(E=E0, f=f)
Is = E_2_4_e2(d=d, t=t, theta=theta)
ds_prima, _ = sec2_3_1(w=d, t=t, f=f, E=E0)
As_prima = E_2_4_e3(ds_prima=ds_prima, t=t)
# a partir del radio de curvatura del rigidizador y del angulo calculo D
D = d + r_out*(1 - cos(theta*pi/180))/sin(theta*pi/180)
if D/w > 0.8:
print('No se cumple la condicion D/w < 0.8')
raise Exception('>> Analisis abortado <<')
if w/t <= S/3: # Ec 2.4.2-1
b, midC = sec2_4_2_CASEI(Is=Is, As_prima=As_prima, w=w, ds_prima=ds_prima, t=t)
elif S/3 < w/t and w/t < S:
b, midC = sec2_4_2_CASEII(E0=E0, f=f, t=t, w=w, theta=theta, D=D, ds_prima=ds_prima, stiff=stiff, S=S, Is=Is, As_prima=As_prima)
else:
b, midC = sec2_4_2_CASEIII(E0=E0, f=f, t=t, w=w, theta=theta, D=D, ds_prima=ds_prima, stiff=stiff, S=S, Is=Is, As_prima=As_prima)
midC['S'] = S
return b, midC
def E_2_4_e1(E, f):
'''Ecuacion 2.4-1
Parameters
----------
E: float,
modulo de elasticidad.
f: float,
tension en el elemento.
Returns
-------
S: float,
parametro.
Raises
------
none
Tests
-----
>>> round(E_2_4_e1(E = 200e3, f = 200.0), 3)
40.477
'''
S = 1.28*(E/f)**0.5
return S
def E_2_4_e2(d, t, theta = 90):
'''Is: Ecuacion 2.4-2.
Parameters
----------
d: float,
ancho total del rigidizador sin contar la curvatura.
t: float,
espesor del rigidizador.
theta: float,
angulo de inclinacion del rigidizador de labio simple.
Returns
-------
Is: float,
momento de inercia del rigidizador con respecto al eje paralelo del elemento al que rigidiza, medido en su centroide.
Raises
------
none
Tests
-----
>>> round(E_2_4_e2(d = 20.0, t = 3.0), 2)
2000.0
'''
theta = theta*pi/180
Is = (d**3*t*sin(theta))/12
return Is
def E_2_4_e3(ds_prima, t):
'''As_prima: Ecuacion 2.4-3.
Parameters
----------
ds_prima: float,
ancho efectivo del rigidizador calculado segun seccion 2.3.1 (ver figura 5 - ASCE 8).
t: float,
espesor del rigidizador.
Returns
-------
As_prima: float,
area efectiva del rigidizador.
Raises
------
none
Tests
-----
>>> round(E_2_4_e3(10.0, 3.0), 2)
30.0
'''
As_prima = ds_prima*t
return As_prima
def sec2_4_2_CASEI(Is, As_prima, w, ds_prima, t, k = 0.5):
'''Ecuacion 2.4.2-CASE I
Parameters
----------
w: float,
ancho del elemento sin tener en cuenta las curvaturas (ver figura 5 - ASCE 8).
ds_prima: float,
ancho efectivo del rigidizador calculado segun seccion 2.3.1 (ver figura 5 - ASCE 8).
t: float,
espesor del rigidizador.
Returns
-------
b: float,
ancho efectivo del elemento.
midC: diccionario,
calculos intermedios y valores de propiedades geometricas.
Raises
------
none
'''
Ia = 0
b = w
ds = ds_prima
As = As_prima
midC = {'Is': Is, 'Ia': Ia, 'As': As, 'As_prima': As_prima, 'ds': ds, 'ds_prima': ds_prima, 'k': k,'esbeltez': 'N/A', 'rho': 'N/A'}
midC['CASE'] = 'CASEI'
return b, midC #devolver todas las propiedades efectivas + midC
def sec2_4_2_CASEII(E0, f, t, w, theta, D, ds_prima, stiff, S, Is, As_prima):
'''Ecuacion 2.4.2-CASE II
Parameters
----------
E0: float,
modulo de elasticidad inicial.
f: float,
tension en el elemento.
t: float,
espesor del elemento.
w: float,
ancho del elemento sin tener en cuenta las curvaturas (ver figura 5 - ASCE 8).
theta: float,
angulo de inclinacion del rigidizador de labio simple.
D: float,
ancho del rigidizador teniendo en cuenta la curvatura (ver figura 5 - ASCE 8).
ds_prima: float,
ancho efectivo del rigidizador calculado segun seccion 2.3.1 (ver figura 5 - ASCE 8).
stiff: float,
clase de rigidizador (labio simple u otro).
S: float;
parametro.
Is: float,
momento de inercia del rigidizador con respecto al eje paralelo del elemento al que rigidiza, medido en su centroide.
As_prima: float;
area efectiva del rigidizador.
Returns
-------
b: float,
ancho efectivo del elemento.
midC: diccionario,
calculos intermedios.
Raises
------
none
'''
n = 0.5
k_u = 0.43
Ia = t**4*399*(w/t/S - (k_u/4)**0.5)**3 # Ec 2.4.2-6
b, midC = E_2_4_2_CASES(E0=E0, f=f, t=t, w=w, theta=theta, D=D, ds_prima=ds_prima, stiff=stiff, Is=Is, As_prima=As_prima, n=n, Ia=Ia, k_u=k_u)
midC['CASE'] = 'CASEII'
return b, midC
def sec2_4_2_CASEIII(E0, f, t, w, theta, D, ds_prima, stiff, S, Is, As_prima):
'''Ecuacion 2.4.2-CASE III
| |
int('300600B2', 16)
ROIPhysicalPropertyValue = int('300600B4', 16)
ROIElementalCompositionSequence = int('300600B6', 16)
ROIElementalCompositionAtomicNumber = int('300600B7', 16)
ROIElementalCompositionAtomicMassFraction = int('300600B8', 16)
AdditionalRTROIIdentificationCodeSequence = int('300600B9', 16)
FrameofReferenceRelationshipSequence = int('300600C0', 16)
RelatedFrameofReferenceUID = int('300600C2', 16)
FrameofReferenceTransformationType = int('300600C4', 16)
FrameofReferenceTransformationMatrix = int('300600C6', 16)
FrameofReferenceTransformationComment = int('300600C8', 16)
MeasuredDoseReferenceSequence = int('30080010', 16)
MeasuredDoseDescription = int('30080012', 16)
MeasuredDoseType = int('30080014', 16)
MeasuredDoseValue = int('30080016', 16)
TreatmentSessionBeamSequence = int('30080020', 16)
TreatmentSessionIonBeamSequence = int('30080021', 16)
CurrentFractionNumber = int('30080022', 16)
TreatmentControlPointDate = int('30080024', 16)
TreatmentControlPointTime = int('30080025', 16)
TreatmentTerminationStatus = int('3008002A', 16)
TreatmentTerminationCode = int('3008002B', 16)
TreatmentVerificationStatus = int('3008002C', 16)
ReferencedTreatmentRecordSequence = int('30080030', 16)
SpecifiedPrimaryMeterset = int('30080032', 16)
SpecifiedSecondaryMeterset = int('30080033', 16)
DeliveredPrimaryMeterset = int('30080036', 16)
DeliveredSecondaryMeterset = int('30080037', 16)
SpecifiedTreatmentTime = int('3008003A', 16)
DeliveredTreatmentTime = int('3008003B', 16)
ControlPointDeliverySequence = int('30080040', 16)
IonControlPointDeliverySequence = int('30080041', 16)
SpecifiedMeterset = int('30080042', 16)
DeliveredMeterset = int('30080044', 16)
MetersetRateSet = int('30080045', 16)
MetersetRateDelivered = int('30080046', 16)
ScanSpotMetersetsDelivered = int('30080047', 16)
DoseRateDelivered = int('30080048', 16)
TreatmentSummaryCalculatedDoseReferenceSequence = int('30080050', 16)
CumulativeDosetoDoseReference = int('30080052', 16)
FirstTreatmentDate = int('30080054', 16)
MostRecentTreatmentDate = int('30080056', 16)
NumberofFractionsDelivered = int('3008005A', 16)
OverrideSequence = int('30080060', 16)
ParameterSequencePointer = int('30080061', 16)
OverrideParameterPointer = int('30080062', 16)
ParameterItemIndex = int('30080063', 16)
MeasuredDoseReferenceNumber = int('30080064', 16)
ParameterPointer = int('30080065', 16)
OverrideReason = int('30080066', 16)
CorrectedParameterSequence = int('30080068', 16)
CorrectionValue = int('3008006A', 16)
CalculatedDoseReferenceSequence = int('30080070', 16)
CalculatedDoseReferenceNumber = int('30080072', 16)
CalculatedDoseReferenceDescription = int('30080074', 16)
CalculatedDoseReferenceDoseValue = int('30080076', 16)
StartMeterset = int('30080078', 16)
EndMeterset = int('3008007A', 16)
ReferencedMeasuredDoseReferenceSequence = int('30080080', 16)
ReferencedMeasuredDoseReferenceNumber = int('30080082', 16)
ReferencedCalculatedDoseReferenceSequence = int('30080090', 16)
ReferencedCalculatedDoseReferenceNumber = int('30080092', 16)
BeamLimitingDeviceLeafPairsSequence = int('300800A0', 16)
RecordedWedgeSequence = int('300800B0', 16)
RecordedCompensatorSequence = int('300800C0', 16)
RecordedBlockSequence = int('300800D0', 16)
TreatmentSummaryMeasuredDoseReferenceSequence = int('300800E0', 16)
RecordedSnoutSequence = int('300800F0', 16)
RecordedRangeShifterSequence = int('300800F2', 16)
RecordedLateralSpreadingDeviceSequence = int('300800F4', 16)
RecordedRangeModulatorSequence = int('300800F6', 16)
RecordedSourceSequence = int('30080100', 16)
SourceSerialNumber = int('30080105', 16)
TreatmentSessionApplicationSetupSequence = int('30080110', 16)
ApplicationSetupCheck = int('30080116', 16)
RecordedBrachyAccessoryDeviceSequence = int('30080120', 16)
ReferencedBrachyAccessoryDeviceNumber = int('30080122', 16)
RecordedChannelSequence = int('30080130', 16)
SpecifiedChannelTotalTime = int('30080132', 16)
DeliveredChannelTotalTime = int('30080134', 16)
SpecifiedNumberofPulses = int('30080136', 16)
DeliveredNumberofPulses = int('30080138', 16)
SpecifiedPulseRepetitionInterval = int('3008013A', 16)
DeliveredPulseRepetitionInterval = int('3008013C', 16)
RecordedSourceApplicatorSequence = int('30080140', 16)
ReferencedSourceApplicatorNumber = int('30080142', 16)
RecordedChannelShieldSequence = int('30080150', 16)
ReferencedChannelShieldNumber = int('30080152', 16)
BrachyControlPointDeliveredSequence = int('30080160', 16)
SafePositionExitDate = int('30080162', 16)
SafePositionExitTime = int('30080164', 16)
SafePositionReturnDate = int('30080166', 16)
SafePositionReturnTime = int('30080168', 16)
PulseSpecificBrachyControlPointDeliveredSequence = int('30080171', 16)
PulseNumber = int('30080172', 16)
BrachyPulseControlPointDeliveredSequence = int('30080173', 16)
CurrentTreatmentStatus = int('30080200', 16)
TreatmentStatusComment = int('30080202', 16)
FractionGroupSummarySequence = int('30080220', 16)
ReferencedFractionNumber = int('30080223', 16)
FractionGroupType = int('30080224', 16)
BeamStopperPosition = int('30080230', 16)
FractionStatusSummarySequence = int('30080240', 16)
TreatmentDate = int('30080250', 16)
TreatmentTime = int('30080251', 16)
RTPlanLabel = int('300A0002', 16)
RTPlanName = int('300A0003', 16)
RTPlanDescription = int('300A0004', 16)
RTPlanDate = int('300A0006', 16)
RTPlanTime = int('300A0007', 16)
TreatmentProtocols = int('300A0009', 16)
PlanIntent = int('300A000A', 16)
TreatmentSites = int('300A000B', 16)
RTPlanGeometry = int('300A000C', 16)
PrescriptionDescription = int('300A000E', 16)
DoseReferenceSequence = int('300A0010', 16)
DoseReferenceNumber = int('300A0012', 16)
DoseReferenceUID = int('300A0013', 16)
DoseReferenceStructureType = int('300A0014', 16)
NominalBeamEnergyUnit = int('300A0015', 16)
DoseReferenceDescription = int('300A0016', 16)
DoseReferencePointCoordinates = int('300A0018', 16)
NominalPriorDose = int('300A001A', 16)
DoseReferenceType = int('300A0020', 16)
ConstraintWeight = int('300A0021', 16)
DeliveryWarningDose = int('300A0022', 16)
DeliveryMaximumDose = int('300A0023', 16)
TargetMinimumDose = int('300A0025', 16)
TargetPrescriptionDose = int('300A0026', 16)
TargetMaximumDose = int('300A0027', 16)
TargetUnderdoseVolumeFraction = int('300A0028', 16)
OrganatRiskFullvolumeDose = int('300A002A', 16)
OrganatRiskLimitDose = int('300A002B', 16)
OrganatRiskMaximumDose = int('300A002C', 16)
OrganatRiskOverdoseVolumeFraction = int('300A002D', 16)
ToleranceTableSequence = int('300A0040', 16)
ToleranceTableNumber = int('300A0042', 16)
ToleranceTableLabel = int('300A0043', 16)
GantryAngleTolerance = int('300A0044', 16)
BeamLimitingDeviceAngleTolerance = int('300A0046', 16)
BeamLimitingDeviceToleranceSequence = int('300A0048', 16)
BeamLimitingDevicePositionTolerance = int('300A004A', 16)
SnoutPositionTolerance = int('300A004B', 16)
PatientSupportAngleTolerance = int('300A004C', 16)
TableTopEccentricAngleTolerance = int('300A004E', 16)
TableTopPitchAngleTolerance = int('300A004F', 16)
TableTopRollAngleTolerance = int('300A0050', 16)
TableTopVerticalPositionTolerance = int('300A0051', 16)
TableTopLongitudinalPositionTolerance = int('300A0052', 16)
TableTopLateralPositionTolerance = int('300A0053', 16)
RTPlanRelationship = int('300A0055', 16)
FractionGroupSequence = int('300A0070', 16)
FractionGroupNumber = int('300A0071', 16)
FractionGroupDescription = int('300A0072', 16)
NumberofFractionsPlanned = int('300A0078', 16)
NumberofFractionPatternDigitsPerDay = int('300A0079', 16)
RepeatFractionCycleLength = int('300A007A', 16)
FractionPattern = int('300A007B', 16)
NumberofBeams = int('300A0080', 16)
BeamDoseSpecificationPoint = int('300A0082', 16)
BeamDose = int('300A0084', 16)
BeamMeterset = int('300A0086', 16)
BeamDosePointDepth = int('300A0088', 16)
BeamDosePointEquivalentDepth = int('300A0089', 16)
BeamDosePointSSD = int('300A008A', 16)
BeamDoseMeaning = int('300A008B', 16)
BeamDoseVerificationControlPointSequence = int('300A008C', 16)
AverageBeamDosePointDepth = int('300A008D', 16)
AverageBeamDosePointEquivalentDepth = int('300A008E', 16)
AverageBeamDosePointSSD = int('300A008F', 16)
BeamDoseType = int('300A0090', 16)
AlternateBeamDose = int('300A0091', 16)
AlternateBeamDoseType = int('300A0092', 16)
NumberofBrachyApplicationSetups = int('300A00A0', 16)
BrachyApplicationSetupDoseSpecificationPoint = int('300A00A2', 16)
BrachyApplicationSetupDose = int('300A00A4', 16)
BeamSequence = int('300A00B0', 16)
TreatmentMachineName = int('300A00B2', 16)
PrimaryDosimeterUnit = int('300A00B3', 16)
SourceAxisDistance = int('300A00B4', 16)
BeamLimitingDeviceSequence = int('300A00B6', 16)
RTBeamLimitingDeviceType = int('300A00B8', 16)
SourcetoBeamLimitingDeviceDistance = int('300A00BA', 16)
IsocentertoBeamLimitingDeviceDistance = int('300A00BB', 16)
NumberofLeafJawPairs = int('300A00BC', 16)
LeafPositionBoundaries = int('300A00BE', 16)
BeamNumber = int('300A00C0', 16)
BeamName = int('300A00C2', 16)
BeamDescription = int('300A00C3', 16)
BeamType = int('300A00C4', 16)
BeamDeliveryDurationLimit = int('300A00C5', 16)
RadiationType = int('300A00C6', 16)
HighDoseTechniqueType = int('300A00C7', 16)
ReferenceImageNumber = int('300A00C8', 16)
PlannedVerificationImageSequence = int('300A00CA', 16)
ImagingDeviceSpecificAcquisitionParameters = int('300A00CC', 16)
TreatmentDeliveryType = int('300A00CE', 16)
NumberofWedges = int('300A00D0', 16)
WedgeSequence = int('300A00D1', 16)
WedgeNumber = int('300A00D2', 16)
WedgeType = int('300A00D3', 16)
WedgeID = int('300A00D4', 16)
WedgeAngle = int('300A00D5', 16)
WedgeFactor = int('300A00D6', 16)
TotalWedgeTrayWaterEquivalentThickness = int('300A00D7', 16)
WedgeOrientation = int('300A00D8', 16)
IsocentertoWedgeTrayDistance = int('300A00D9', 16)
SourcetoWedgeTrayDistance = int('300A00DA', 16)
WedgeThinEdgePosition = int('300A00DB', 16)
BolusID = int('300A00DC', 16)
BolusDescription = int('300A00DD', 16)
EffectiveWedgeAngle = int('300A00DE', 16)
NumberofCompensators = int('300A00E0', 16)
MaterialID = int('300A00E1', 16)
TotalCompensatorTrayFactor = int('300A00E2', 16)
CompensatorSequence = int('300A00E3', 16)
CompensatorNumber = int('300A00E4', 16)
CompensatorID = int('300A00E5', 16)
SourcetoCompensatorTrayDistance = int('300A00E6', 16)
CompensatorRows = int('300A00E7', 16)
CompensatorColumns = int('300A00E8', 16)
CompensatorPixelSpacing = int('300A00E9', 16)
CompensatorPosition = int('300A00EA', 16)
CompensatorTransmissionData = int('300A00EB', 16)
CompensatorThicknessData = int('300A00EC', 16)
NumberofBoli = int('300A00ED', 16)
CompensatorType = int('300A00EE', 16)
CompensatorTrayID = int('300A00EF', 16)
NumberofBlocks = int('300A00F0', 16)
TotalBlockTrayFactor = int('300A00F2', 16)
TotalBlockTrayWaterEquivalentThickness = int('300A00F3', 16)
BlockSequence = int('300A00F4', 16)
BlockTrayID = int('300A00F5', 16)
SourcetoBlockTrayDistance = int('300A00F6', 16)
IsocentertoBlockTrayDistance = int('300A00F7', 16)
BlockType = int('300A00F8', 16)
AccessoryCode = int('300A00F9', 16)
BlockDivergence = int('300A00FA', 16)
BlockMountingPosition = int('300A00FB', 16)
BlockNumber = int('300A00FC', 16)
BlockName = int('300A00FE', 16)
BlockThickness = int('300A0100', 16)
BlockTransmission = int('300A0102', 16)
BlockNumberofPoints = int('300A0104', 16)
BlockData = int('300A0106', 16)
ApplicatorSequence = int('300A0107', 16)
ApplicatorID = int('300A0108', 16)
ApplicatorType = int('300A0109', 16)
ApplicatorDescription = int('300A010A', 16)
CumulativeDoseReferenceCoefficient = int('300A010C', 16)
FinalCumulativeMetersetWeight = int('300A010E', 16)
NumberofControlPoints = int('300A0110', 16)
ControlPointSequence = int('300A0111', 16)
ControlPointIndex = int('300A0112', 16)
NominalBeamEnergy = int('300A0114', 16)
DoseRateSet = int('300A0115', 16)
WedgePositionSequence = int('300A0116', 16)
WedgePosition = int('300A0118', 16)
BeamLimitingDevicePositionSequence = int('300A011A', 16)
LeafJawPositions = int('300A011C', 16)
GantryAngle = int('300A011E', 16)
GantryRotationDirection = int('300A011F', 16)
BeamLimitingDeviceAngle = int('300A0120', 16)
BeamLimitingDeviceRotationDirection = int('300A0121', 16)
PatientSupportAngle = int('300A0122', 16)
PatientSupportRotationDirection = int('300A0123', 16)
TableTopEccentricAxisDistance = int('300A0124', 16)
TableTopEccentricAngle = int('300A0125', 16)
TableTopEccentricRotationDirection = int('300A0126', 16)
TableTopVerticalPosition = int('300A0128', 16)
TableTopLongitudinalPosition = int('300A0129', 16)
TableTopLateralPosition = int('300A012A', 16)
IsocenterPosition = int('300A012C', 16)
SurfaceEntryPoint = int('300A012E', 16)
SourcetoSurfaceDistance = int('300A0130', 16)
AverageBeamDosePointSourcetoExternalContourDistance = int('300A0131', 16)
SourcetoExternalContourDistance = int('300A0132', 16)
ExternalContourEntryPoint = int('300A0133', 16)
CumulativeMetersetWeight = int('300A0134', 16)
TableTopPitchAngle = int('300A0140', 16)
TableTopPitchRotationDirection = int('300A0142', 16)
TableTopRollAngle = int('300A0144', 16)
TableTopRollRotationDirection = int('300A0146', 16)
HeadFixationAngle = int('300A0148', 16)
GantryPitchAngle = int('300A014A', 16)
GantryPitchRotationDirection = int('300A014C', 16)
GantryPitchAngleTolerance = int('300A014E', 16)
FixationEye = int('300A0150', 16)
ChairHeadFramePosition = int('300A0151', 16)
HeadFixationAngleTolerance = int('300A0152', 16)
ChairHeadFramePositionTolerance = int('300A0153', 16)
FixationLightAzimuthalAngleTolerance = int('300A0154', 16)
FixationLightPolarAngleTolerance = int('300A0155', 16)
PatientSetupSequence = int('300A0180', 16)
PatientSetupNumber = int('300A0182', 16)
PatientSetupLabel = int('300A0183', 16)
PatientAdditionalPosition = int('300A0184', 16)
FixationDeviceSequence = int('300A0190', 16)
FixationDeviceType = int('300A0192', 16)
FixationDeviceLabel = int('300A0194', 16)
FixationDeviceDescription = int('300A0196', 16)
FixationDevicePosition = int('300A0198', 16)
FixationDevicePitchAngle = int('300A0199', 16)
FixationDeviceRollAngle = int('300A019A', 16)
ShieldingDeviceSequence = int('300A01A0', 16)
ShieldingDeviceType = int('300A01A2', 16)
ShieldingDeviceLabel = int('300A01A4', 16)
ShieldingDeviceDescription = int('300A01A6', 16)
ShieldingDevicePosition = int('300A01A8', 16)
SetupTechnique = int('300A01B0', 16)
SetupTechniqueDescription = int('300A01B2', 16)
SetupDeviceSequence = int('300A01B4', 16)
SetupDeviceType = int('300A01B6', 16)
SetupDeviceLabel = int('300A01B8', 16)
SetupDeviceDescription = int('300A01BA', 16)
SetupDeviceParameter = int('300A01BC', 16)
SetupReferenceDescription = int('300A01D0', 16)
TableTopVerticalSetupDisplacement = int('300A01D2', 16)
TableTopLongitudinalSetupDisplacement = int('300A01D4', 16)
TableTopLateralSetupDisplacement = int('300A01D6', 16)
BrachyTreatmentTechnique = | |
21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE median blur"))
regress = d.get_iterator(self.SOURCE_DATA, self.MEDIAN_BLUR_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.MEDIAN_BLUR_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_bilateral_filter(self):
try:
from .. import cv
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from .. import cv
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
cv.file_bilateral_filter(self.SPHERE_DATA, row[-1], suffix="_a")
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE bilateral filter", cv.file_bilateral_filter))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE bilateral filter"))
regress = d.get_iterator(self.SOURCE_DATA, self.BILATERAL_FILTER_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.BILATERAL_FILTER_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_canny(self):
try:
from .. import cv
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from .. import cv
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
cv.file_canny(self.SPHERE_DATA, row[-1], suffix="_a")
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE canny", cv.file_canny))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE canny"))
regress = d.get_iterator(self.SOURCE_DATA, self.CANNY_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.CANNY_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_contour_threshold(self):
try:
from .. import cv
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from .. import cv
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
cv.file_contour_threshold(self.SPHERE_DATA, row[-1],
suffix="_a")
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE contour threshold", cv.file_contour_threshold))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE contour threshold"))
regress = d.get_iterator(self.SOURCE_DATA, self.CONTOUR_THRESHOLD_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.CONTOUR_THRESHOLD_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_contour_threshold_color(self):
try:
from .. import cv
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from .. import cv
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
cv.file_contour_threshold(self.SPHERE_DATA, row[-1],
suffix="_a", color=(255,0,0))
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE contour threshold color",
lambda x, y: cv.file_contour_threshold(x, y,
suffix="_cv_contour_threshold_color", color=(255,0,0))))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE contour threshold color"))
regress = d.get_iterator(self.SOURCE_DATA, self.CONTOUR_THRESHOLD_COLOR_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.CONTOUR_THRESHOLD_COLOR_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_sift_draw(self):
try:
from ..cv import contrib
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from ..cv import contrib
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
contrib.file_sift_draw(self.SPHERE_DATA, row[-1],
suffix="_a")
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE sift draw", contrib.file_sift_draw))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE sift draw"))
regress = d.get_iterator(self.SOURCE_DATA, self.SIFT_DRAW_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.SIFT_DRAW_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_sift_draw_color(self):
try:
from ..cv import contrib
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from ..cv import contrib
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
contrib.file_sift_draw(self.SPHERE_DATA, row[-1],
suffix="_a", color=(255,0,0))
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE sift draw color",
lambda x, y: contrib.file_sift_draw(x, y,
suffix="_cv_sift_draw_color", color=(255,0,0))))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE sift draw color"))
regress = d.get_iterator(self.SOURCE_DATA, self.SIFT_DRAW_COLOR_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.SIFT_DRAW_COLOR_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_surf_draw(self):
try:
from ..cv import contrib
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from ..cv import contrib
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
contrib.file_surf_draw(self.SPHERE_DATA, row[-1],
suffix="_a")
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE surf draw", contrib.file_surf_draw))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE surf draw"))
regress = d.get_iterator(self.SOURCE_DATA, self.SURF_DRAW_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.SURF_DRAW_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_surf_draw_color(self):
try:
from ..cv import contrib
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from ..cv import contrib
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey = {}
for row in a_db:
new_fn = \
contrib.file_surf_draw(self.SPHERE_DATA, row[-1],
suffix="_a", color=(255,0,0))
a_grey[row[-1]] = new_fn
self.assertFalse(d_image.file_add_file_column(self.SPHERE_DATA, 2,
"FILE surf draw color",
lambda x, y: contrib.file_surf_draw(x, y,
suffix="_cv_surf_draw_color", color=(255,0,0))))
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertTrue(reduce(lambda x, y: x + 1, d_db, 0) == 21)
d_db = d.get_iterator(self.SPHERE_DATA)
self.assertEqual(next(d_db), ("theta", "phi", "FILE",
"FILE surf draw color"))
regress = d.get_iterator(self.SOURCE_DATA, self.SURF_DRAW_COLOR_DATA)
next(regress)
for row, reg_row in zip(d_db, regress):
left = os.path.join(self.SPHERE_DATA, a_grey[row[2]])
right = os.path.join(self.SPHERE_DATA, row[3])
reg_image = os.path.join(self.SOURCE_DATA, reg_row[3])
self.assertTrue(filecmp.cmp(left, right, False))
self.assertEqual(row, reg_row)
self.assertTrue(filecmp.cmp(right, reg_image, False))
self.assertTrue(d.check_database(self.SPHERE_DATA))
# # uncomment when you want to regenerate the regression data
# sh.copyfile(self.d_csv, os.path.join(self.SOURCE_DATA,
# self.SURF_DRAW_COLOR_DATA))
# regress = d.get_iterator(self.SPHERE_DATA)
# next(regress)
# for row in regress:
# sh.copyfile(os.path.join(self.SPHERE_DATA, row[3]),
# os.path.join(self.SOURCE_DATA, row[3]))
os.unlink(self.d_csv)
def test_fast_draw(self):
try:
from .. import cv
except Exception as e:
log.info("Unable to run test: " + str(e))
return
from .. import cv
from ..cv import d as d_image
sh.copyfile(self.d_backup, self.d_csv)
a_db = a.get_iterator(self.SPHERE_DATA)
next(a_db)
a_grey | |
ParamName_String.append('_'.join(('ParamName', self.Model_List[m].split()[3])))
ParamUnits_String.append('_'.join(('ParamUnits', self.Model_List[m].split()[3])))
VarName_String.append('_'.join(('VarName', self.Model_List[m].split()[3])))
y0_String.append('_'.join(('y0', self.Model_List[m].split()[3])))
Param_dict = dict(zip(Param_String, self.Param_List))
ParamName_dict = dict(zip(ParamName_String, self.ParamName_List))
ParamUnits_dict = dict(zip(ParamUnits_String, self.ParamUnits_List))
VarName_dict = dict(zip(VarName_String, self.VarName_List))
y0_dict = dict(zip(y0_String, self.y0_List))
# Get the best model and update the parameters for txt file and SBML outputs
BestSystemType = self.Best_Model.split()[3]
Param_ = '_'.join(('Param', BestSystemType))
self.FittedParams = Param_dict[Param_] #convert string to variable name
ParamName_ = '_'.join(('ParamName', BestSystemType))
self.ParamName = ParamName_dict[ParamName_] #convert string to variable name
ParamUnits_ = '_'.join(('ParamUnits', BestSystemType))
self.ParamUnits = ParamUnits_dict[ParamUnits_] #convert string to variable name
VarName_ = '_'.join(('VarName', BestSystemType))
self.VarName = VarName_dict[VarName_] #convert string to variable name
y0_ = '_'.join(('y0', BestSystemType))
self.Init = y0_dict[y0_] #convert string to variable name
self.Time, self.VariableMatrix, self.DataLegend, self.ODEstring = self.LogicGLib1.Run_LogicGatesPlot\
(BestSystemType, self.Init, self.Data_header1, self.Data_array1, self.Data_stddev1, self.NumState_, self.FittedParams)
plt.show()
# -------------------------------------------------------------------------------- #
### Calculate Time Elapsed ###
endtime = time.time()
self.elapsedtime = endtime - self.starttime
print('Overall Time taken =', self.elapsedtime, 's')
# -------------------------------------------------------------------------------- #
def CreateOutputTextFile(self):
### Rank the AIC results
Rank = ss.rankdata(self.AIC_Results)
### create a table with (Model, SSE, AIC, Rank)
TableData = []
Header = ["Model", "SSE", "AIC", "\u0394AIC", "Evidence", "Rank"]
# #use list comprehension
# dAIC = [x - self.min_AIC for x in self.AIC_Results]
i = 0
dAIC = []
Evidence = []
for x in self.AIC_Results:
dAIC.append(x - self.min_AIC)
if (dAIC[i] == 0):
Evidence.append('-')
elif (dAIC[i] > 0) and (dAIC[i] <= 2):
Evidence.append('Substantial Support')
elif (dAIC[i] > 10):
Evidence.append('No Support')
else:
Evidence.append('Weak Support')
i += 1
# #decide if the model with rank 1 is a better model with confidence
# Rank_ = [int(x) for x in Rank]
# BestEvidence = Evidence[Rank_.index(2)]
if 'Substantial Support' in Evidence:
Count = Evidence.count('Substantial Support')
BestEvidence = 'low confidence. There are ' + str(Count) + ' other comparably good models'
else:
BestEvidence = 'confidence'
for i in range(0, len(self.Model_List)):
TableData.append([str(self.Model_List[i]), str(self.SSE_Combined[i]), str(self.AIC_Results[i]), str(dAIC[i]), Evidence[i], str(int(Rank[i]))])
Table = tabulate(TableData, Header, tablefmt='orgtbl')
print(Table)
### Create and write to Txt File ###
Txtfilename1, DateTimenow = Txtfilename.gettxtfilename()
print('\nText File Generated:', Txtfilename)
#f = open(Txtfilename1,"a+")
Txtpath = "Results\\" + Txtfilename1
f = open(Txtpath,encoding = 'utf-8', mode ="a+")
f.write('Input File name: '+self.Input_filename_+'\n')
f.write('\n')
f.write('Models Tested: '+str(self.Model_List)+'\n')
f.write('\n')
f.write(str(Table))
f.write('\n\n')
f.write('Recommended Model: '+self.Best_Model+ ' with ' + BestEvidence +'\n')
f.write('\n')
f.write('Optimized Parameters:\n')
for i in self.ParamName:
f.write('\t'+ i +' = '+ str(self.FittedParams[self.ParamName.index(i)])+'\n')
f.write('\tdeg_mRNA1 = 0.1386\n')
f.write('ODE:\n')
for j in self.VarName:
f.write('d'+ j + 'dt'+' = '+ self.ODEstring[self.VarName.index(j)]+'\n')
f.write('\n')
f.write('Number of Data points: '+str(self.Sample_size)+'\n')
f.write('SSE of Ideal Model: '+str(self.SSE_Combined[self.min_AIC_index])+'\n')
f.write('AIC of Ideal Model: '+str(self.min_AIC)+'\n')
f.write('\n')
f.write('Time taken: '+str(self.elapsedtime)+ 's\n')
f.write('\n')
f.write('Date and Time: '+DateTimenow+'\n')
f.close()
def ExportModelDataFile(self):
### To export Model Data in CSV file
ExportDataFile = input("Please insert 'yes/no' to export Model data file):")
while not ((ExportDataFile.casefold() == 'yes') or (ExportDataFile.casefold() == 'no')):
ExportDataFile = input("Error: Incorrect Choice! Please insert either yes or no only:\n")
if ExportDataFile.casefold() == 'yes':
CSVfileName = Txtfilename.getcsvfilename()
VariableMatrixData = self.VariableMatrix[-1][:,:].tolist()
CSVfilePath = "Results/" + CSVfileName
with open(CSVfilePath, 'w', newline='') as csvfile:
CF = csv.writer(csvfile, delimiter=',')
CF.writerow(['Time(min)'] + self.DataLegend)
for i in range(0, len(self.Time)):
CF.writerow([self.Time[i]] + VariableMatrixData[i])
else:
print('No result data file is exported\n')
def ExportSBMLFile(self):
### To export SBML file in .xml
from exportsbml import exportsbml
ODE = self.ODEstring;
Variable = []
for v in self.VarName:
Variable.append(v) #to convert to Molar instead of mole
VarInit = self.Init;
#'molL-1s-1', 's-1', 'molL-1', 'dimensionless'
for u in self.ParamUnits:
if u == 'molL-1min-1':
self.ParamUnits[self.ParamUnits.index(u)] = 'molperLmin'
elif u == 'molL-1':
self.ParamUnits[self.ParamUnits.index(u)] = 'molperL'
elif u == 's-1':
self.ParamUnits[self.ParamUnits.index(u)] = 'per_second'
elif u == 'min-1':
self.ParamUnits[self.ParamUnits.index(u)] = 'per_min'
elif u == 'dimensionless':
self.ParamUnits[self.ParamUnits.index(u)] = 'Dimension_less'
else:
print('Error in the defined units for parameters')
if self.NumState_ == 2:
ParName = self.ParamName + ['deg_mRNA', 'state'];
Params = self.FittedParams.tolist() + [0.1386, 1];
ParamsUnit = self.ParamUnits + ['per_min', 'Dimension_less'];
elif self.NumState_ == 4:
ParName = self.ParamName + ['deg_mRNA', 'state1', 'state2'];
Params = self.FittedParams.tolist() + [0.1386, 1, 1];
ParamsUnit = self.ParamUnits + ['per_min', 'Dimension_less', 'Dimension_less'];
else:
print('Error in setting NumState for SBML output')
exportsbml(ODE, Variable, VarInit, ParName, Params, ParamsUnit)
def PlotSBOLGraphics(self):
### To plot and visualize the gene circuit in SBOL visual compliant diagram
import PlotCircuit as pc
PlotCircuitFile = input("Please insert 'yes/no' to set and visualize the gene circuit diagram:\n")
while not ((PlotCircuitFile.casefold() == 'yes') or (PlotCircuitFile.casefold() == 'no')):
PlotCircuitFile = input("Error: Incorrect Choice! Please insert either yes or no only:\n")
if PlotCircuitFile.casefold() == 'yes':
Reporter = input('Please insert Reporter type (RFP/GFP/YFP/BFP/...): \n')
print('Reporter: ', Reporter)
if Reporter == 'RFP':
ReporterColor = "red."
elif Reporter == 'GFP':
ReporterColor = "green."
elif Reporter == 'YFP':
ReporterColor = "yellow."
elif Reporter == 'BFP':
ReporterColor = "blue."
else:
ReporterColor = "orange."
Origin = []
PlasmidNum = input('Please insert the number of plasmids: \n')
while not PlasmidNum.isdigit():
PlasmidNum = input('Error: Incorrect input! Please insert the right number: \n')
for pl in range(0, int(PlasmidNum)):
Origin.append (input('Please insert the Name of Origin '+str(pl+1)+': '))
print('Origin: ', Origin)
Gene = []
PartNum = input('Please insert the number of parts: ')
while not PartNum.isdigit():
PartNum = input('Error: Incorrect input! Please insert the right number: \n')
for pt in range(0, int(PartNum)):
Gene.append(input('Please insert the Name of gene '+str(pt+1)+': '))
print('Gene: ', Gene)
# NOT gate models
if 'NOTgate' in self.Best_Model:
if PlasmidNum == '1':
if PartNum == '1':
Input = "p.black r.black c." + ReporterColor + Reporter + " " + "t.black o.black." + Origin[0]
Regulations = [{'type': 'Repression', 'from_part': {'start': 8, 'end': 8},
'to_part': {'start': 8,'end': 8, 'fwd': True},
'opts': {'color': (0.12,0.47,0.71), 'linewidth': 1.5}}]
elif PartNum == '2':
Input = "p.black r.black c.blue."+ Gene[0] + " t.black "+ "p.black r.black c."+ ReporterColor + Reporter + " " + "t.black o.black." + Origin[0]
Regulations = [{'type': 'Repression', 'from_part': {'start': 40, 'end': 40},
'to_part': {'start': 78,'end': 78, 'fwd': True},
'opts': {'color': (0.12,0.47,0.71), 'linewidth': 1.5}}]
else:
print('Error: The numbers of part and plasmid are not compatible')
elif PlasmidNum == '2':
if PartNum == '2':
Input = "p.black r.black c.blue."+ Gene[0] + " t.black o.black." + Origin[0] + " "+ "=.white "+ "p.black r.black c."+ ReporterColor + Reporter + " " + "t.black o.black." + Origin[1]
Regulations = [{'type': 'Repression', 'from_part': {'start': 43, 'end': 43},
'to_part': {'start': 105,'end': 105, 'fwd': True},
'opts': {'color': (0.12,0.47,0.71), 'linewidth': 1.5}}]
elif PartNum == '3':
Input = "p.black r.black c.orange."+ Gene[0] + " t.black o.black." + Origin[0] + " =.white " + \
"p.black r.black c.blue."+ Gene[1] + " t.black " + \
"p.black r.black c."+ ReporterColor + Reporter + " " + "t.black o.black." + Origin[1]
Regulations = [{'type': 'Connection', 'from_part': {'start': 43, 'end': 43},
'to_part': {'start': 142,'end': 142, 'fwd': True},
'opts': {'color': (1.00,0.50,0.00), 'linewidth': 1.5}},
{'type': 'Repression', 'from_part': {'start': 142, 'end': 142},
'to_part': {'start': 178,'end': 178, 'fwd': True},
'opts': {'color': (0.12,0.47,0.71), 'linewidth': 1.5}}]
else:
print('Error: The numbers of part and plasmid are not compatible')
elif PlasmidNum == '3':
if PartNum == '3':
Input = "p.black r.black c.orange."+ Gene[0] + " t.black o.black." + Origin[0] + " =.white " + \
"p.black r.black c.blue."+ Gene[1] + " t.black o.black." + Origin[1] + " =.white " + \
"p.black r.black c."+ ReporterColor + Reporter + " " + "t.black o.black." + Origin[2]
Regulations = [{'type': 'Connection', 'from_part': {'start': 43, 'end': 43},
'to_part': {'start': 142,'end': 142, 'fwd': True},
'opts': {'color': (1.00,0.50,0.00), 'linewidth': 1.5}},
{'type': 'Repression', 'from_part': {'start': 142, 'end': 142},
'to_part': {'start': 205,'end': 205, 'fwd': True},
'opts': {'color': (0.12,0.47,0.71), 'linewidth': 1.5}}]
else:
print('Error: The numbers of part and plasmid are not compatible')
else:
print('The inserted plasmid number is too high')
# AND gate models
elif 'ANDgate' in self.Best_Model:
if PlasmidNum == '1':
if PartNum == '1':
Input = "p.black r.black c." + ReporterColor | |
2.0, rect=(0.3, 0.3, 0.8, 0.8)) # not works with axes. savefig works.
# save
plt.savefig("refirgerator", bbox_inches='tight')
plt.show()
def split_train_test_video(meta, video_type='_1.mp4', test_size=0.3, random_state=42):
# only get camera_1 data (_1.mp4)
camera_1 = [(f, feat, y) for f, feat, y in meta['data'] if video_type in f] # '_1.mp4'
camera_2 = [(f, feat, y) for f, feat, y in meta['data'] if '_2.mkv' in f]
camera_3 = [(f, feat, y) for f, feat, y in meta['data'] if '_3.mp4' in f]
print(f'camera_1 ({len(camera_1)}): {Counter([y for f, feat, y in camera_1])}')
print(f'camera_2 ({len(camera_2)}): {Counter([y for f, feat, y in camera_2])}')
print(f'camera_3 ({len(camera_3)}): {Counter([y for f, feat, y in camera_3])}')
train1, test1 = train_test_split(camera_1, test_size=test_size, random_state=random_state)
# add '-mirror' into train1 and test1
# camera_1
new_train1 = []
for (f, feat, y) in train1:
f = f.replace('_1.mp4', '_1-mirrored.mp4')
feat = feat.replace('_1_vgg.npy', '_1-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'train1: {f} does not exist')
continue
new_train1.append((f, feat, y))
new_test1 = []
for (f, feat, y) in test1:
f = f.replace('_1.mp4', '_1-mirrored.mp4')
feat = feat.replace('_1_vgg.npy', '_1-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'test1: {f} does not exist')
continue
new_test1.append((f, feat, y))
train1.extend(new_train1)
test1.extend(new_test1)
# camera_2
train2 = []
for (f, feat, y) in train1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_2.mkv')
feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_2-mirrored.mkv')
feat = feat.replace('_1-mirrored_vgg.npy', '_2-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'train2: {f} does not exist')
continue
train2.append((f, feat, y))
test2 = []
for (f, feat, y) in test1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_2.mkv')
feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_2-mirrored.mkv')
feat = feat.replace('_1-mirrored_vgg.npy', '_2-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
print(f'test2: {f} does not exist')
continue
test2.append((f, feat, y))
# camera_3
train3 = []
for (f, feat, y) in train1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_3.mp4')
feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_3-mirrored.mp4')
feat = feat.replace('_1-mirrored_vgg.npy', '_3-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
# print(f'{f} or {feat} does not exist')
continue
train3.append((f, feat, y))
test3 = []
for (f, feat, y) in test1:
if '-mirrored.mp4' not in f:
f = f.replace('_1.mp4', '_3.mp4')
feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
else:
f = f.replace('_1-mirrored.mp4', '_3-mirrored.mp4')
feat = feat.replace('_1-mirrored_vgg.npy', '_3-mirrored_vgg.npy')
if not os.path.exists(f) or not os.path.exists(feat):
# print(f'{f} or {feat} does not exist')
continue
test3.append((f, feat, y))
# # camera_2
# train2 = []
# for (f, feat, y) in train1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# train2.append((f, feat, y))
# test2 = []
# for (f, feat, y) in test1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# test2.append((f, feat, y))
#
# # camera_3
# train3 = []
# for (f, feat, y) in train1:
# f = f.replace('_1.mp4', '_2.mkv')
# feat = feat.replace('_1_vgg.npy', '_2_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# train3.append((f, feat, y))
# test3 = []
# for (f, feat, y) in test1:
# f = f.replace('_1.mp4', '_3.mp4')
# feat = feat.replace('_1_vgg.npy', '_3_vgg.npy')
# if not os.path.exists(f) or not os.path.exists(feat): continue
# test3.append((f, feat, y))
train_meta = {'camera_1': train1, 'camera_2': train2, 'camera_3': train3}
test_meta = {'camera_1': test1, 'camera_2': test2, 'camera_3': test3}
return train_meta, test_meta
def get_activity_info(in_file, log):
start_time, end_time = 0, 0
return start_time, end_time
def parse_logs(in_dir='data/data-clean/log'):
# combine all files in the list
combined_csv = pd.concat([pd.read_csv(f) for f in os.listdir(in_dir).sort()])
# # export to csv
# combined_csv.to_csv("combined_csv.csv", index=False, encoding='utf-8-sig')
video_logs = {} # f: v
for line in combined_csv.values:
video_logs[line] = ''
return video_logs
def change_label2idx(train_meta, label2idx={}):
""" change label to index
Parameters
----------
train_meta
label2idx
Returns
-------
"""
for name, train in train_meta.items():
for i, vs in enumerate(train):
train_meta[name][i] = (vs[0], vs[1], vs[2], label2idx[vs[2]]) # (video_path, feature, y_label, y_idx)
return train_meta
def cnn_feature2final_feature(train_meta, feature_type='mean', is_test=False):
"""
Parameters
----------
train_meta
feature_type
Returns
-------
"""
for name, train in train_meta.items():
for i, vs in enumerate(train):
f = vs[1] # (video_path, feature, y_label, y_idx )
if not os.path.exists(f):
x = ''
train_meta[name][i] = (vs[0], vs[1], vs[2], vs[3], x) # (video_path, feature, y_label, y_idx, X)
continue
if not is_test:
if feature_type == 'mean':
x = extract_feature_average(f)
# x = extract_feature_sampling(f)
elif feature_type == 'sampling':
x = extract_feature_sampling(f)
elif is_test:
if feature_type == 'mean':
x = extract_feature_average(f)
train_meta[name][i] = (vs[0], vs[1], vs[2], vs[3], x) # (video_path, feature, y_label, y_idx, X)
return train_meta
def gen_Xy(in_dir, out_dir, is_subclip=False, is_mirror=True, is_cnn_feature=True, device_type='refrigerator'):
"""Mirror videos and extract features by CNN
Parameters
----------
in_dir: ['data/data-clean/refrigerator]
out_dir:
is_subclip: cut video
is_mirror
is_cnn_feature
Returns
-------
meta: dictionary
"""
if is_cnn_feature:
# deep neural network model
model_file = './features/video/slim/vgg_16.ckpt'
model = CNN_tf('vgg', model_file)
else:
model = None
data = [] # [(video_path, cnn_feature, y)]
# list device folders (e.g., refrigerator or camera)
i = 0
cnt_3 = 0 # camera_3
cnt_32 = 0 # camera_32: backup
for device_dir in sorted(in_dir):
out_dir_sub = ''
if device_type not in device_dir: continue
# list activity folders (e.g., open_close or take_out )
for activity_dir in sorted(os.listdir(device_dir)):
activity_label = activity_dir
out_dir_activity = activity_dir
activity_dir = os.path.join(device_dir, activity_dir)
if not os.path.exists(activity_dir) or '.DS_Store' in activity_dir or not os.path.isdir(
activity_dir): continue
# list participant folders (e.g., participant 1 or participant 2)
for participant_dir in sorted(os.listdir(activity_dir)):
out_dir_participant = participant_dir
out_dir_sub = os.path.join(participant_dir)
participant_dir = os.path.join(activity_dir, participant_dir)
if not os.path.exists(participant_dir) or '.DS_Store' in participant_dir: continue
# print(participant_dir)
# list videos (e.g., 'no_interaction_1_1614038765_1.mp4')
for f in sorted(os.listdir(participant_dir)):
if ('mp4' not in f) and ('mkv' not in f): continue # only process video file.
x = os.path.join(participant_dir, f)
if '_3.mp4' in f: cnt_3 += 1
if '_3 2.mp4' in f: cnt_32 += 1
print(f'i: {i}, {x}')
try:
out_dir_tmp = os.path.join(out_dir, out_dir_activity, out_dir_participant)
if is_cnn_feature:
x_feat = _extract_video_feature(model, x, out_dir=out_dir_tmp)
else:
x_feat = ''
data.append((os.path.join(out_dir_tmp, f), x_feat, activity_label))
if is_mirror:
mirrored_x = _mirror_video(x, out_dir=out_dir_tmp)
if is_cnn_feature:
mirrored_x_feat = _extract_video_feature(model, mirrored_x, out_dir=out_dir_tmp)
else:
mirrored_x_feat = ''
data.append((mirrored_x, mirrored_x_feat, activity_label))
except Exception as e:
msg = f'error: {e} on {x}'
raise ValueError(msg)
i += 1
print(f'camera_3: {cnt_3}, camera_32 (backup): {cnt_32}')
meta = {'data': data, 'is_mirror': is_mirror, 'is_cnn_feature': is_cnn_feature}
return meta
def main(random_state=42):
###############################################################################################################
# Step 1. check if the Xy.dat exists. Xy.dat includes the train and test set.
in_dir = '../out/data/data-clean/refrigerator'
Xy_train_test_file = f'{in_dir}/Xy_train_test.dat'
if os.path.exists(Xy_train_test_file):
os.remove(Xy_train_test_file)
if not os.path.exists(Xy_train_test_file):
###############################################################################################################
# Step 2. Get all cnn_features from videos
Xy_cnn_features_file = f'{in_dir}/Xy_cnn_features.dat'
# if os.path.exists(Xy_cnn_features_file): os.remove(Xy_cnn_features_file)
if not os.path.exists(Xy_cnn_features_file):
in_raw_dir = 'data/data-clean/refrigerator'
# mirror the data and extract the features by CNN
meta = gen_Xy([in_raw_dir], out_dir=in_dir, is_mirror=True, is_cnn_feature=True)
dump_data(meta, out_file=Xy_cnn_features_file)
###############################################################################################################
# Step 3. Split the features to train and test set according to camera 1 (i.e., front view-'_1.mp4') .
if not os.path.exists(Xy_train_test_file):
meta = load_data(Xy_cnn_features_file)
# video_type = {'_1.mp4': front view,'_3.mp4': side view and mirrored (up to down) view, 'mkv': side view}
video_type = '_1.mp4' # split the train and test based on 'camera_1' (i.e, '_1.mp4')
test_size = 0.3
label2idx = {'no_interaction': 0, 'open_close_fridge': 1, 'put_back_item': 2, 'screen_interaction': 3,
'take_out_item': 4}
idx2label = {0: 'no_interaction', 1: 'open_close_fridge', 2: 'put_back_item', 3: 'screen_interaction',
4: 'take_out_item'}
###############################################################################################################
# Step 3.1. Split the videos capured by camera 1 (i.e., front view-'_1.mp4') to train and test set.
train_meta, test_meta = split_train_test_video(meta, video_type, test_size=test_size,
random_state=random_state)
###############################################################################################################
# Step 3.2. change label to idx
train_meta = change_label2idx(train_meta, label2idx)
test_meta = change_label2idx(test_meta, label2idx)
###############################################################################################################
# Step 3.3. dump all data to disk
meta = {'train_meta': train_meta, 'test_meta': test_meta, 'test_size': test_size,
'label2idx': label2idx, 'idx2label': idx2label}
dump_data(meta, out_file=Xy_train_test_file)
###############################################################################################################
# Step 4. load Xy_train_test.dat
meta = load_data(Xy_train_test_file)
train_meta = meta['train_meta']
test_meta = meta['test_meta']
test_size = meta['test_size']
###############################################################################################################
# Step 5. obtain final feature data (X_train and X_test) from CNN features with different methods
train_meta = cnn_feature2final_feature(train_meta, feature_type='mean', is_test=False)
test_meta = cnn_feature2final_feature(test_meta, feature_type='mean', is_test=True)
###############################################################################################################
# Step 6. if augment data or not
# X_train_meta, X_train, y_train = augment_train(train_meta, augment_type='camera_1',
# is_mirror=False)
X_train_meta, X_train, y_train = augment_train(train_meta, augment_type='camera_1+camera_2+camera_3',
is_mirror=False)
X_test_meta, X_test, y_test = | |
import unittest
from unittest import mock
from printy.exceptions import InvalidFlag, InvalidInputType
from printy.core import Printy, WINDOWS
from printy.flags import Flags
class TestGlobalFlagsPrinty(unittest.TestCase):
""" Test case for formatting with a global set of flags specified """
def setUp(self):
self.sample_text = "Some Text To Print Out"
self.printy = Printy()
self.raw_text = self.printy.get_formatted_text
self.esc = self.printy.escape
def test_empty_value(self):
""" Tests passing an empty value print's nothing"""
text = ''
result = self.raw_text(text)
self.assertEqual(result, text)
def test_empty_value_with_flags(self):
"""
Tests that passing and empty value with some flags returns the
escape ansi characters
"""
text = ''
flags = 'rBH'
result = self.raw_text(text, flags)
expected_result = "%s%s" % (
Flags.join_flags(Flags.get_flag_values(flags)),
Flags.get_end_of_line()
)
self.assertEqual(result, expected_result)
def test_single_invalid_flag(self):
"""
Tests that passing an invalid flag (only one)
raises and exception
"""
invalid_flag = 'P'
with self.assertRaises(InvalidFlag):
self.printy.format(self.sample_text, invalid_flag)
def test_multiple_invalid_flag(self):
"""
Tests that passing multiple invalid flags raises an
exception with the first invalid flag found
"""
# P and G are invalid, should raise InvalidFlag
# with 'P' as invalid flag
flags = 'yBPGr'
with self.assertRaises(InvalidFlag) as e:
self.printy.format(self.sample_text, flags)
self.assertEqual(e.exception.flag, 'P')
def test_high_intensity_flag_color(self):
"""
Checks the correct format is returned for a high
intensity (>) flag color
"""
flag = 'p>'
text = 'Hello'
expected_text = '\x1b[38;5;98mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flag), expected_text)
def test_low_intensity_flag_color(self):
"""
Checks the correct format is returned for a low
intensity (<) flag color
"""
flag = '<p'
text = 'Hello'
expected_text = '\x1b[38;5;54mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flag), expected_text)
def tests_always_closing_format(self):
"""
Tests that the returned text is always ended with the closing tag
"""
result = self.raw_text(self.sample_text, 'r')
closing_tag = result[-4:]
self.assertEqual(len(closing_tag), 4)
self.assertEqual(closing_tag, Flags.get_end_of_line())
def test_no_flag_parameter_passed(self):
"""
Tests that passing no flag parameter will return a default value
"""
result = self.raw_text(self.sample_text)
self.assertEqual(result, self.sample_text)
def test_empty_flag(self):
"""
Test that passing and empty string as a flag still returns the
default value
"""
result = self.raw_text(self.sample_text, '')
self.assertEqual(result, self.sample_text)
def test_flags_with_spaces_in_between(self):
"""
Tests that passing a set of flags with some spaces in between
(like 'yB H U') still applies the desired formats
"""
desired_flags = 'yBH'
flags_with_one_space = 'yB H'
flags_with_multiple_spaces = 'y B H'
result_one = self.raw_text(self.sample_text, desired_flags)
result_two = self.raw_text(self.sample_text, flags_with_one_space)
result_three = self.raw_text(self.sample_text, flags_with_multiple_spaces)
self.assertTrue(result_one == result_two == result_three)
def test_escape_with_global_flags(self):
"""
Test escaping values with global flags
"""
text = '[n]escaped@'
expected_text = '\x1b[38;5;196m[n]escaped@\x1b[0m'
result = self.raw_text(self.esc(text), 'r')
self.assertEqual(result, expected_text)
@mock.patch('printy.core.Printy.set_windows_console_mode', return_value=True)
def test_virtual_terminal_processing_on_windows(self, mock_console_mode):
"""
Tests that if platform is windows, then then returns True
"""
self.printy.platform = WINDOWS
virtual_terminal_processing = mock_console_mode()
self.assertTrue(virtual_terminal_processing)
def test_return_cleaned_value_if_windows_is_not_properly_configured(self):
"""
Tests that if printy virtual_console_mode is false, then it returns the
cleaned_text
"""
flags = 'rBH'
# Changes platform to Windows
self.printy.platform = WINDOWS
self.printy.virtual_terminal_processing = False
result_one = self.raw_text(self.sample_text, flags)
self.assertEqual(result_one, self.sample_text)
def test_background_color_with_global_flags(self):
"""
Test backgroun color with global flags
"""
flags = 'yB{o}'
text = 'Hello'
expected_text = '\x1b[48;5;208;38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flags), expected_text)
def test_background_color_no_flag_with_global_flags(self):
"""
Test backgroun color with no flag for it, with global flags
"""
flags = 'yB{}'
text = 'Hello'
expected_text = '\x1b[38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flags), expected_text)
class TestInlineFlagsPrinty(unittest.TestCase):
""" Test case for inline formatting """
def setUp(self):
self.printy = Printy()
self.raw_text = self.printy.get_formatted_text
self.esc = self.printy.escape
def test_inline_format_with_global_flags(self):
"""
Tests that passing a text with inline formatting and also a global
set of flags takes this last one as the format to be applied
"""
inline_formatted = "[y]Hey you@"
no_format = 'Hey you'
global_flags = 'rB'
result_one = self.raw_text(inline_formatted, global_flags)
result_two = self.raw_text(no_format, global_flags)
self.assertEqual(result_one, result_two)
def test_inline_format_without_ending_format_character(self):
"""
Tests that passing an inline formatted text without the ending
formatting character still returns the formatted text
"""
result_one = self.raw_text('[y]Hey you')
result_two = self.raw_text('[y]Hey you@')
self.assertEqual(result_one, result_two)
def test_escape_special_characters(self):
""" Tests that escaping special characters prints them out """
inline_text_one = '[y]<EMAIL>@'
global_text_one = '<EMAIL>', 'y'
inline_text_two = '[bH]Some text \@@'
global_text_two = 'Some text @', 'bH'
inline_result_one = self.raw_text(inline_text_one)
global_result_one = self.raw_text(global_text_one[0], global_text_one[1])
inline_result_two = self.raw_text(inline_text_two)
global_result_two = self.raw_text(global_text_two[0], global_text_two[1])
self.assertEqual(inline_result_one, global_result_one)
self.assertEqual(inline_result_two, global_result_two)
def test_multiple_sections(self):
""" Test that formats are applied correctly to each section """
section_one = "Some"
section_two = ' '
section_three = 'text'
global_format_one = self.raw_text(section_one, 'rB')
global_format_two = self.raw_text(section_two)
global_format_three = self.raw_text(section_three, 'y')
joined_global_format = global_format_one + global_format_two + global_format_three
inline_text = '[rB]Some@ [y]text@'
inline_format = self.raw_text(inline_text)
self.assertEqual(inline_format, joined_global_format)
def test_read_file(self):
""" Test retrieving the text from a file """
text_in_file = 'printy'
file_name = 'printy_file'
with mock.patch('builtins.open', mock.mock_open(read_data=text_in_file)) as m:
result = self.printy.read_file(file_name)
m.assert_called_once_with(file_name)
self.assertEqual(result, text_in_file)
def test_escape_special_chars_method(self):
"""
Test escaping especial characters correctly, this method is used when
an object other than a string is passed
"""
text_to_escape = '[some text @ ]'
expected_value = '\[some text \@ \]'
escaped_text = Printy._escape_special_chars(text_to_escape)
self.assertEqual(expected_value, escaped_text)
def test_pretty_print_dicts(self):
""" Test pretty printing dictionaries """
dict_to_print = {'name': '<NAME>', 'age': 34}
expected_result = '{\n [n>]\'name\'@: [c>]\'<NAME>\'@[<oB],@\n [n>]\'age\'@: [c]34@[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_pretty_print_lists(self):
""" Test pretty printing lists """
list_to_print = [1, 2, 'hello']
expected_result = '\[\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n\]'
pretty_list = Printy._repr_value(list_to_print)
self.assertEqual(expected_result, pretty_list)
def test_pretty_printy_tuples(self):
""" Test pretty printing tuples """
tuple_to_print = (1, 2, 'hello')
expected_result = '(\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n)'
pretty_tuple = Printy._repr_value(tuple_to_print)
self.assertEqual(expected_result, pretty_tuple)
def test_pretty_printy_sets(self):
""" Test pretty printing sets """
set_to_print = {1, 2, 'hello'}
expected_result = '{\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n}'
pretty_set = Printy._repr_value(set_to_print)
self.assertEqual(expected_result, pretty_set)
def test_pretty_printy_dict_pretty_false(self):
""" Tests pretty printing a dict when 'pretty' parameter is set to False """
dict_to_print = {'name': '<NAME>', 'age': 34}
expected_result = '{\'name\': \'<NAME>\', \'age\': 34}'
not_pretty_dict = Printy._repr_value(dict_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_dict)
def test_pretty_printy_list_pretty_false(self):
""" Tests pretty printing a list when 'pretty' parameter is set to False """
list_to_print = [1, 2, 'hello']
expected_result = '\[1, 2, \'hello\'\]'
not_pretty_list = Printy._repr_value(list_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_list)
def test_pretty_printy_tuple_pretty_false(self):
""" Tests pretty printing a tuple when 'pretty' parameter is set to False """
tuple_to_print = (1, 2, 'hello')
expected_result = '(1, 2, \'hello\')'
not_pretty_tuple = Printy._repr_value(tuple_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_tuple)
def test_pretty_printy_set_pretty_false(self):
""" Tests pretty printing a set when 'pretty' parameter is set to False """
set_to_print = {1, 2, 'hello'}
expected_result = '{1, 2, \'hello\'}'
not_pretty_set = Printy._repr_value(set_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_set)
def test_pretty_print_str_method_of_objects(self):
""" Test printing the str method of an object, both not defined and defined """
builtin_obj = int
expected_builtin_result = '<class \'int\'>'
pretty_builtin = Printy._repr_value(builtin_obj)
class Person:
def __str__(self):
return '[c]I am a person@'
custom_str = Person()
# Notice how it should not return the escaped character
expected_custom_result = '[c]I am a person@'
pretty_custom = Printy._repr_value(custom_str)
self.assertEqual(expected_builtin_result, pretty_builtin)
self.assertEqual(expected_custom_result, pretty_custom)
def test_pretty_object_in_dictionary(self):
"""
Test pretty printing an str method of an object inside a dictionary
or any iterable, it should give it a light magenta color
"""
dict_to_print = {'class': int}
expected_result = '{\n [n>]\'class\'@: <class \'int\'>[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_pretty_custom_str_method_in_dictionary(self):
class CustomStrMethod:
def __str__(self):
return '[rBU]Red Bold Underlined@ and [y]Yellow@'
dict_to_print = {'str': CustomStrMethod()}
expected_result = '{\n [n>]\'str\'@: [rBU]Red Bold Underlined@ and [y]Yellow@[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_print_number(self):
integer_to_print = 123
float_to_print = 123.45
expected_result_integer = '[c]123@'
expected_result_float = '[c]123.45@'
result_integer = Printy._repr_value(integer_to_print)
result_float = Printy._repr_value(float_to_print)
self.assertEqual(expected_result_integer, result_integer)
self.assertEqual(expected_result_float, result_float)
def test_print_boolean(self):
expected_false = '[<o]False@'
expected_true = '[<o]True@'
result_false = Printy._repr_value(False)
result_true = Printy._repr_value(True)
self.assertEqual(expected_false, result_false)
self.assertEqual(expected_true, result_true)
def test_print_none(self):
expected_none = '[<o]None@'
result_none = Printy._repr_value(None)
self.assertEqual(expected_none, result_none)
def test_escape_with_inline_flags(self):
"""
Test escaping values on inline formats
"""
email = '<EMAIL>'
expected_text = '\x1b[38;5;28<EMAIL>\x1b[0m'
result = self.raw_text(f'[n]{self.esc(email)}@')
self.assertEqual(result, expected_text)
def test_background_color_with_inline_flags(self):
"""
Test backgroun color with inline flags
"""
text = '[yB{o}]Hello@'
expected_text = '\x1b[48;5;208;38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text), expected_text)
def test_background_color_no_flag_with_global_flags(self):
"""
Test backgroun color with no flag for it, with global flags
"""
text = '[yB{}]Hello@'
expected_text = '\x1b[38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text), expected_text)
class TestInputy(unittest.TestCase):
"""
Test case for inputy functionality
Here, it is not necessary to test whether the prompted message has the
correct format because it uses the methods already tested in the Printy
test cases
"""
def setUp(self):
self.inputy = Printy()
str_valid_test = "Valid String"
int_valid_test = 23
float_valid_test = 45.6
bool_valid_test = | |
<reponame>EiffL/bayesfast<filename>bayesfast/core/recipe.py
from .module import Surrogate
from .density import Density, DensityLite
from .sample import sample
from ..modules.poly import PolyConfig, PolyModel
from ..utils import Laplace, threadpool_limits, check_client
from ..utils.random import check_state, resample, multivariate_normal
import numpy as np
from distributed import Client
from collections import namedtuple, OrderedDict
import warnings
from copy import deepcopy
__all__ = ['BaseStep', 'OptimizeStep', 'SampleStep', 'PostStep', 'Recipe']
# TODO: early stop in pipeline evaluation
# TODO: use tqdm to add progress bar for _map_fun
# TODO: better control when we don't have enough points before resampling
# TODO: allow IS over hmc_samples in OptimizeStep
# TODO: review the choice of x_0 for SampleStep
# TODO: monitor the progress of IS
# TODO: improve optimization with trust region?
# https://arxiv.org/pdf/1804.00154.pdf
class BaseStep:
def __init__(self, surrogate_list=[], fit_options={}, alpha_n=2,
sample_options={}, prefit=False):
if isinstance(surrogate_list, Surrogate):
self._surrogate_list = [surrogate_list]
elif (hasattr(surrogate_list, '__iter__') and
all(isinstance(su, Surrogate) for su in surrogate_list)):
self._surrogate_list = list(surrogate_list)
else:
raise ValueError('surrogate_list should be a Surrogate or consist '
'of Surrogate(s).')
if isinstance(fit_options, dict):
self._fit_options = [
fit_options for i in range(max(1, self.n_surrogate))]
elif (hasattr(fit_options, '__iter__') and
all(isinstance(fi, dict) for fi in fit_options)):
self._fit_options = list(fit_options)
if len(self._fit_options) < self.n_surrogate:
self._fit_options.extend([{} for i in range(
self.n_surrogate - len(self._fit_options))])
else:
raise ValueError(
'fit_options should be a dict or consist of dict(s).')
alpha_n = float(alpha_n)
if alpha_n <= 0:
raise ValueError('alpha_n should be positive.')
self._alpha_n = alpha_n
if not isinstance(sample_options, dict):
raise ValueError('sample_options should be a dict.')
self._sample_options = sample_options
self._prefit = bool(prefit)
@property
def surrogate_list(self):
return self._surrogate_list
@property
def n_surrogate(self):
return len(self._surrogate_list)
@property
def has_surrogate(self):
return self.n_surrogate > 0
@property
def fit_options(self):
return tuple(self._fit_options)
@property
def alpha_n(self):
return self._alpha_n
@property
def n_eval(self):
return int(self._alpha_n *
max(su.n_param for su in self._surrogate_list))
@property
def sample_options(self):
return self._sample_options
@property
def prefit(self):
return self._prefit
class OptimizeStep(BaseStep):
def __init__(self, surrogate_list=[], fit_options={}, alpha_n=2.,
sample_options={'beta': 0.01}, prefit=False, eps_pp=0.1,
eps_pq=0.1, max_iter=10, run_hmc=False, hmc_options={}):
super().__init__(surrogate_list, fit_options, alpha_n, sample_options,
prefit)
eps_pp = float(eps_pp)
if eps_pp <= 0:
raise ValueError('eps_pp should be a positive float.')
self._eps_pp = eps_pp
eps_pq = float(eps_pq)
if eps_pq <= 0:
raise ValueError('eps_pq should be a positive float.')
self._eps_pq = eps_pq
max_iter = int(max_iter)
if max_iter < 2:
raise ValueError('max_iter should be larger than 1.')
self._max_iter = max_iter
self._run_hmc = bool(run_hmc)
if not isinstance(hmc_options, dict):
raise ValueError('hmc_options should be a dict.')
self._hmc_options = hmc_options
@property
def eps_pp(self):
return self._eps_pp
@property
def eps_pq(self):
return self._eps_pq
@property
def max_iter(self):
return self._max_iter
@property
def run_hmc(self):
return self._run_hmc
@property
def hmc_options(self):
return self._hmc_options
class SampleStep(BaseStep):
def __init__(self, surrogate_list=[], fit_options={}, alpha_n=2.,
sample_options={}, prefit=False, resample_options={},
reuse_steps=0, logp_cutoff=True, alpha_min=1.5,
alpha_supp=0.1, adapt_metric=False):
super().__init__(surrogate_list, fit_options, alpha_n, sample_options,
prefit)
if not isinstance(resample_options, dict):
raise ValueError('resample_options should be a dict.')
self._resample_options = resample_options
self._reuse_steps = int(reuse_steps)
self._logp_cutoff = bool(logp_cutoff)
alpha_min = float(alpha_min)
if alpha_min <= 0:
raise ValueError('alpha_min should be a positive float.')
self._alpha_min = alpha_min
alpha_supp = float(alpha_supp)
if alpha_supp <= 0:
raise ValueError('alpha_supp should be a positive float.')
self._alpha_supp = alpha_supp
self._adapt_metric = bool(adapt_metric)
@property
def resample_options(self):
return self._resample_options
@property
def reuse_steps(self):
return self._reuse_steps
@property
def logp_cutoff(self):
return self._logp_cutoff
@property
def alpha_min(self):
return self._alpha_min
@property
def alpha_supp(self):
return self._alpha_supp
@property
def adapt_metric(self):
return self._adapt_metric
@property
def n_eval_min(self):
return int(self._alpha_min *
max(su.n_param for su in self._surrogate_list))
@property
def n_eval_supp(self):
return int(self._alpha_supp *
max(su.n_param for su in self._surrogate_list))
class PostStep:
def __init__(self, n_is=None, k_trunc=None):
if n_is is None:
self._n_is = 0
else:
self._n_is = int(n_is)
if k_trunc is None:
self._k_trunc = 0.25
else:
self._k_trunc = float(k_trunc)
@property
def n_is(self):
return self._n_is
@property
def k_trunc(self):
return self._k_trunc
RecipePhases = namedtuple('RecipePhases', 'optimize, sample, post')
class RecipeResult:
def __init__(self, optimize=None, sample=[], post=None, x_0=None,
random_state=None, input_size=None):
if isinstance(optimize, OptimizeStep) or optimize is None:
self._s_optimize = optimize
else:
raise ValueError('optimize should be a OptimizeStep or None.')
if isinstance(sample, SampleStep):
self._s_sample = [sample]
elif (hasattr(sample, '__iter__') and
all(isinstance(sam, SampleStep) for sam in sample)):
self._s_sample = list(sample)
else:
raise ValueError('sample should be a SampleStep, or consists of '
'SampleStep(s).')
if isinstance(post, PostStep):
pass
elif post is None:
if len(self._s_sample):
post = PostStep()
else:
raise ValueError('post should be a PostStep or None.')
self._s_post = post
self._d_optimize = []
self._d_sample = []
self._d_post = []
self._n_optimize = 0 if self._s_optimize is None else 1
self._n_sample = len(self._s_sample)
self._n_post = 0 if self._s_post is None else 1
self._i_optimize = 0
self._i_sample = 0
self._i_post = 0
self._random_state = check_state(random_state)
self._random_state_init = deepcopy(self._random_state)
input_size = int(input_size)
if input_size < 1:
raise ValueError('input_size should be larger than 1.')
self._input_size = input_size
if x_0 is None:
warnings.warn('you did not give me x_0, so I will use standard '
'Gaussian when needed. If this fails, you should '
'consider give me some better x_0.', RuntimeWarning)
self._x_0 = x_0
else:
x_0 = np.atleast_2d(x_0)
if x_0.shape[-1] != self._input_size:
raise ValueError(
'the shape of x_0 is not consistent with the density.')
self._x_0 = x_0.reshape((-1, self._input_size))
@property
def data(self):
return RecipePhases(self._d_optimize, self._d_sample, self._d_post)
@property
def steps(self):
return RecipePhases(self._s_optimize, self._s_sample, self._s_post)
@property
def i(self):
return RecipePhases(self._i_optimize, self._i_sample, self._i_post)
@property
def n(self):
return RecipePhases(self._n_optimize, self._n_sample, self._n_post)
@property
def n_call(self):
_n_call = 0
for _opt in self.data.optimize:
if len(_opt.surrogate_list) > 0:
_n_call += len(_opt.var_dicts)
else:
raise NotImplementedError
for _sam in self.data.sample:
if len(_sam.surrogate_list) > 0:
_n_call += len(_sam.var_dicts)
else:
raise NotImplementedError
for _pos in self.data.post:
if _pos.weights is None:
pass
else:
_n_call += len(_pos.weights)
return _n_call
@property
def x_0(self):
return self._x_0
@property
def random_state(self):
return self._random_state
@property
def random_state_init(self):
return deepcopy(self._random_state_init)
@property
def input_size(self):
return self._input_size
@property
def finished(self):
return RecipePhases(self._i_optimize == self._n_optimize,
self._i_sample == self._n_sample,
self._i_post == self._n_post)
DensityQuartet = namedtuple('DensityQuartet',
'logp, logq, logp_trans, logq_trans')
OptimizeResult = namedtuple('OptimizeResult', 'x_max, f_max, samples, '
'surrogate_list, hmc_samples, var_dicts, '
'Laplace, trace')
SampleResult = namedtuple('SampleResult', 'samples, surrogate_list, '
'var_dicts, trace')
PostResult = namedtuple('PostResult', 'samples, weights, logp, logq, '
'samples_raw, weights_raw')
class Recipe:
def __init__(self, density, client=None, result=None, optimize=None,
sample=[], post=None, x_0=None, random_state=None):
if isinstance(density, (Density, DensityLite)):
self._density = density
else:
raise ValueError('density should be a Density or DensityLite.')
self.client = client
if result is None:
self._result = RecipeResult(optimize, sample, post, x_0,
random_state, self._density.input_size)
elif isinstance(result, RecipeResult):
if result.input_size == self._density.input_size:
self._result = result
else:
raise ValueError('the input_size of the result is inconsistent '
'with the Recipe.')
else:
raise ValueError('result should be a RecipeResult or None.')
@property
def density(self):
return self._density
@property
def client(self):
return self._client
@client.setter
def client(self, clt):
self._client = clt
@property
def input_size(self):
return self._density.input_size
@property
def result(self):
return self._result
@property
def n_call(self):
return self._result.n_call
@classmethod
def _map_fun(cls, client, density, x):
foo = client.map(density.fun, x)
return client.gather(foo)
@property
def logp(self):
return self._density.logp
@property
def grad(self):
return self._density.grad
@property
def logp_and_grad(self):
return self._density.logp_and_grad
@property
def to_original(self):
return self._density.to_original
@property
def to_original_grad(self):
return self._density.to_original_grad
@property
def to_original_grad2(self):
return self._density.to_original_grad2
@property
def to_original_density(self):
return self._density.to_original_density
@property
def from_original(self):
return self._density.from_original
@property
def from_original_grad(self):
return self._density.from_original_grad
@property
def from_original_grad2(self):
return self._density.from_original_grad2
@property
def from_original_density(self):
return self._density.from_original_density
def _opt_surro(self, x_0, var_dicts):
steps = self.result.steps.optimize
data = self.result.data.optimize
_logp = lambda x: self.logp(x, use_surrogate=True, original_space=False)
_grad = lambda x: self.grad(x, use_surrogate=True, original_space=False)
x_0 = self.from_original(x_0[0])
laplace = Laplace(_logp, x_0, grad=_grad)
lap_res = laplace.run(**steps._sample_options)
x_max = self.to_original(lap_res.x_max)
logp = self.logp(x_max, use_surrogate=False, original_space=True)
logq_trans = lap_res.f_max
logp_trans = self.from_original_density(density=logp, x=x_max)
logq = self.to_original_density(density=logq_trans, x=x_max)
f_max = DensityQuartet(float(logp), float(logq), float(logp_trans),
float(logq_trans))
samples = self.to_original(lap_res.samples)
surrogate_list = deepcopy(self._density._surrogate_list)
data.append(
OptimizeResult(x_max=x_max, f_max=f_max, samples=samples,
surrogate_list=surrogate_list, hmc_samples=None,
var_dicts=var_dicts, Laplace=lap_res, trace=None))
def _opt_step(self):
# DEVELOPMENT NOTES
# if has surrogate, iterate until convergence
# if no surrogate, just run on true model
# in the end, optionally run hmc
| |
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipes for NativeClient toolchain packages.
The real entry plumbing is in toolchain_main.py.
"""
import platform
import sys
import command
import toolchain_main
GIT_REVISIONS = {
'binutils': '7843d8d31065be1d961ec04f35fba04b65f7708b',
'gcc': '7e29dc7ddbec30ae2e8eaa202649ff6182986243',
'newlib': '5feee65e182c08a7e89fbffc3223c57e4335420f',
}
TARGET_LIST = ['arm']
# These are extra arguments to pass gcc's configure that vary by target.
TARGET_GCC_CONFIG = {
# TODO(mcgrathr): Disabled tuning for now, tickling a constant-pool layout bug.
# 'arm': ['--with-tune=cortex-a15'],
}
PACKAGE_NAME = 'Native Client SDK [%(build_signature)s]'
BUG_URL = 'http://gonacl.com/reportissue'
GIT_BASE_URL = 'https://chromium.googlesource.com/native_client'
TAR_XV = ['tar', '-x', '-v']
EXTRACT_STRIP_TGZ = TAR_XV + ['--gzip', '--strip-components=1', '-f']
EXTRACT_STRIP_TBZ2 = TAR_XV + ['--bzip2', '--strip-components=1', '-f']
CONFIGURE_CMD = ['sh', '%(src)s/configure']
MAKE_PARALLEL_CMD = ['make', '-j%(cores)s']
MAKE_CHECK_CMD = MAKE_PARALLEL_CMD + ['check']
MAKE_DESTDIR_CMD = ['make', 'DESTDIR=%(abs_output)s']
# This file gets installed by multiple packages' install steps, but it is
# never useful when installed in isolation. So we remove it from the
# installation directories before packaging up.
REMOVE_INFO_DIR = command.Remove(command.path.join('%(output)s',
'share', 'info', 'dir'))
CONFIGURE_HOST_ARCH = []
if sys.platform.startswith('linux'):
cc = ['gcc']
cxx = ['g++', '-static-libstdc++']
if any(platform.machine().lower().startswith(machine) for machine in
['x86_64', 'amd64', 'x64', 'i686']):
# We build the tools for x86-32 hosts so they will run on either x86-32
# or x86-64 hosts (with the right compatibility libraries installed).
cc += ['-m32']
cxx += ['-m32']
CONFIGURE_HOST_ARCH += ['--build=i686-linux']
CONFIGURE_HOST_ARCH += [
'CC=' + ' '.join(cc),
'CXX=' + ' '.join(cxx),
]
elif sys.platform.startswith('win'):
# The i18n support brings in runtime dependencies on MinGW DLLs
# that we don't want to have to distribute alongside our binaries.
# So just disable it, and compiler messages will always be in US English.
CONFIGURE_HOST_ARCH += [
'--disable-nls',
]
# There appears to be nothing we can pass at top-level configure time
# that will prevent the configure scripts from finding MinGW's libiconv
# and using it. We have to force this variable into the environment
# of the sub-configure runs, which are run via make.
MAKE_PARALLEL_CMD += [
'HAVE_LIBICONV=no',
]
CONFIGURE_HOST_COMMON = CONFIGURE_HOST_ARCH + [
'--prefix=',
'--disable-silent-rules',
'--without-gcc-arch',
]
CONFIGURE_HOST_LIB = CONFIGURE_HOST_COMMON + [
'--disable-shared',
]
CONFIGURE_HOST_TOOL = CONFIGURE_HOST_COMMON + [
'--with-pkgversion=' + PACKAGE_NAME,
'--with-bugurl=' + BUG_URL,
'--without-zlib',
]
def InstallDocFiles(subdir, files):
doc_dir = command.path.join('%(output)s', 'share', 'doc', subdir)
dirs = sorted(set([command.path.dirname(command.path.join(doc_dir, file))
for file in files]))
commands = ([command.Mkdir(dir, parents=True) for dir in dirs] +
[command.Copy(command.path.join('%(src)s', file),
command.path.join(doc_dir, file))
for file in files])
return commands
def NewlibLibcScript(arch):
template = """/*
* This is a linker script that gets installed as libc.a for the
* newlib-based NaCl toolchain. It brings in the constituent
* libraries that make up what -lc means semantically.
*/
OUTPUT_FORMAT(%s)
GROUP ( libcrt_common.a libnacl.a )
"""
if arch == 'arm':
# Listing three formats instead of one makes -EL/-EB switches work
# for the endian-switchable ARM backend.
format_list = ['elf32-littlearm-nacl',
'elf32-bigarm-nacl',
'elf32-littlearm-nacl']
else:
raise Exception('TODO(mcgrathr): OUTPUT_FORMAT for %s' % arch)
return template % ', '.join(['"' + fmt + '"' for fmt in format_list])
# The default strip behavior removes debugging and symbol table
# sections, but it leaves the .comment section. This contains the
# compiler version string, and so it changes when the compiler changes
# even if the actual machine code it produces is completely identical.
# Hence, the target library packages will always change when the
# compiler changes unless these sections are removed. Doing this
# requires somehow teaching the makefile rules to pass the
# --remove-section=.comment switch to TARGET-strip. For the GCC
# target libraries, setting STRIP_FOR_TARGET is sufficient. But
# quoting nightmares make it difficult to pass a command with a space
# in it as the STRIP_FOR_TARGET value. So the build writes a little
# script that can be invoked with a simple name.
#
# Though the gcc target libraries' makefiles are smart enough to obey
# STRIP_FOR_TARGET for library files, the newlib makefiles just
# blindly use $(INSTALL_DATA) for both header (text) files and library
# files. Hence it's necessary to override its INSTALL_DATA setting to
# one that will do stripping using this script, and thus the script
# must silently do nothing to non-binary files.
def ConfigureTargetPrep(arch):
script_file = 'strip_for_target'
config_target = arch + '-nacl'
script_contents = """\
#!/bin/sh
mode=--strip-all
for arg; do
case "$arg" in
-*) ;;
*)
type=`file --brief --mime-type "$arg"`
case "$type" in
application/x-executable|application/x-sharedlib) ;;
application/x-archive|application/x-object) mode=--strip-debug ;;
*) exit 0 ;;
esac
;;
esac
done
exec %s-strip $mode --remove-section=.comment "$@"
""" % config_target
return [
command.WriteData(script_contents, script_file),
command.Command(['chmod', '+x', script_file]),
]
def ConfigureTargetArgs(arch):
config_target = arch + '-nacl'
return [
'--target=' + config_target,
'--with-sysroot=/' + config_target,
'STRIP_FOR_TARGET=%(cwd)s/strip_for_target',
]
def CommandsInBuild(command_lines):
return [command.Mkdir('build')] + [command.Command(cmd, cwd='build')
for cmd in command_lines]
def UnpackSrc(is_gzip):
if is_gzip:
extract = EXTRACT_STRIP_TGZ
else:
extract = EXTRACT_STRIP_TBZ2
return [
command.Mkdir('src'),
command.Command(extract + ['%(src)s'], cwd='src'),
]
def PopulateDeps(dep_dirs):
commands = [command.Mkdir('all_deps')]
commands += [command.Command('cp -r "%s/"* all_deps' % dirname, shell=True)
for dirname in dep_dirs]
return commands
def WithDepsOptions(options):
return ['--with-' + option + '=%(abs_all_deps)s' for option in options]
# These are libraries that go into building the compiler itself.
HOST_GCC_LIBS = {
'gmp': {
'tar_src': 'third_party/gmp/gmp-5.1.2.tar.bz2',
'unpack_commands': UnpackSrc(False),
'hashed_inputs': {'src': 'src'},
'commands': CommandsInBuild([
CONFIGURE_CMD + CONFIGURE_HOST_LIB + [
'--with-sysroot=%(abs_output)s',
'--enable-cxx',
# Without this, the built library will assume the
# instruction set details available on the build machine.
# With this, it dynamically chooses what code to use based
# on the details of the actual host CPU at runtime.
'--enable-fat',
],
MAKE_PARALLEL_CMD,
MAKE_CHECK_CMD,
MAKE_DESTDIR_CMD + ['install-strip'],
]),
},
'mpfr': {
'dependencies': ['gmp'],
'tar_src': 'third_party/mpfr/mpfr-3.1.2.tar.bz2',
'unpack_commands': UnpackSrc(False) + PopulateDeps(['%(gmp)s']),
'hashed_inputs': {'src': 'src', 'all_deps': 'all_deps'},
'commands': CommandsInBuild([
CONFIGURE_CMD + CONFIGURE_HOST_LIB + WithDepsOptions(['sysroot',
'gmp']),
MAKE_PARALLEL_CMD,
MAKE_CHECK_CMD,
MAKE_DESTDIR_CMD + ['install-strip'],
]),
},
'mpc': {
'dependencies': ['gmp', 'mpfr'],
'tar_src': 'third_party/mpc/mpc-1.0.1.tar.gz',
'unpack_commands': UnpackSrc(True) + PopulateDeps(['%(gmp)s',
'%(mpfr)s']),
'hashed_inputs': {'src': 'src', 'all_deps': 'all_deps'},
'commands': CommandsInBuild([
CONFIGURE_CMD + CONFIGURE_HOST_LIB + WithDepsOptions(['sysroot',
'gmp',
'mpfr']),
MAKE_PARALLEL_CMD,
MAKE_CHECK_CMD,
MAKE_DESTDIR_CMD + ['install-strip'],
]),
},
'isl': {
'dependencies': ['gmp'],
'tar_src': 'third_party/cloog/isl-0.11.1.tar.bz2',
'unpack_commands': UnpackSrc(False) + PopulateDeps(['%(gmp)s']),
'hashed_inputs': {'src': 'src', 'all_deps': 'all_deps'},
'commands': CommandsInBuild([
CONFIGURE_CMD + CONFIGURE_HOST_LIB + WithDepsOptions([
'sysroot',
'gmp-prefix',
]),
MAKE_PARALLEL_CMD,
MAKE_CHECK_CMD,
MAKE_DESTDIR_CMD + ['install-strip'],
]) + [
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join('%(output)s',
'lib', 'pkgconfig')),
],
},
'cloog': {
'dependencies': ['gmp', 'isl'],
'tar_src': 'third_party/cloog/cloog-0.18.0.tar.gz',
'unpack_commands': UnpackSrc(True) + PopulateDeps(['%(gmp)s',
'%(isl)s']),
'hashed_inputs': {'src': 'src', 'all_deps': 'all_deps'},
'commands': CommandsInBuild([
CONFIGURE_CMD + CONFIGURE_HOST_LIB + [
'--with-bits=gmp',
'--with-isl=system',
] + WithDepsOptions(['sysroot',
'gmp-prefix',
'isl-prefix']),
MAKE_PARALLEL_CMD,
MAKE_CHECK_CMD,
MAKE_DESTDIR_CMD + ['install-strip'],
]) + [
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join('%(output)s',
'lib', 'pkgconfig')),
],
},
}
HOST_GCC_LIBS_DEPS = ['gmp', 'mpfr', 'mpc', 'isl', 'cloog']
GCC_GIT_URL = GIT_BASE_URL + '/nacl-gcc.git'
def GccCommand(target, cmd):
return command.Command(
cmd, path_dirs=[command.path.join('%(abs_binutils_' + target + ')s',
'bin')])
def ConfigureGccCommand(target, extra_args=[]):
target_cflagstr = ' '.join(CommonTargetCflags(target))
return GccCommand(
target,
CONFIGURE_CMD +
CONFIGURE_HOST_TOOL +
ConfigureTargetArgs(target) +
TARGET_GCC_CONFIG.get(target, []) + [
'--with-gmp=%(abs_gmp)s',
'--with-mpfr=%(abs_mpfr)s',
'--with-mpc=%(abs_mpc)s',
'--with-isl=%(abs_isl)s',
'--with-cloog=%(abs_cloog)s',
'--enable-cloog-backend=isl',
'--disable-dlopen',
'--disable-shared',
'--with-newlib',
'--with-linker-hash-style=gnu',
'--enable-linker-build-id',
'--enable-languages=c,c++,lto',
'CFLAGS_FOR_TARGET=' + target_cflagstr,
'CXXFLAGS_FOR_TARGET=' + target_cflagstr,
] + extra_args)
def HostTools(target):
tools = {
'binutils_' + target: {
'git_url': GIT_BASE_URL + '/nacl-binutils.git',
'git_revision': GIT_REVISIONS['binutils'],
'commands': ConfigureTargetPrep(target) + [
command.Command(
CONFIGURE_CMD +
CONFIGURE_HOST_TOOL +
ConfigureTargetArgs(target) + [
'--enable-deterministic-archives',
'--enable-gold',
] + ([] if sys.platform == 'win32' else [
'--enable-plugins',
])),
command.Command(MAKE_PARALLEL_CMD),
command.Command(MAKE_CHECK_CMD),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
REMOVE_INFO_DIR,
] + InstallDocFiles('binutils',
['COPYING3'] +
[command.path.join(subdir, 'NEWS')
for subdir in
['binutils', 'gas', 'ld', 'gold']]) +
# The top-level lib* directories contain host libraries
# that we don't want to include in the distribution.
[command.RemoveDirectory(command.path.join('%(output)s', name))
for name in ['lib', 'lib32', 'lib64']],
},
'gcc_' + target: {
'dependencies': HOST_GCC_LIBS_DEPS + ['binutils_' + target],
'git_url': GCC_GIT_URL,
'git_revision': GIT_REVISIONS['gcc'],
# Remove all the source directories that are used solely for
# building target libraries. We don't want those included in the
# input hash calculation so that we don't rebuild the compiler
# when the the only things that have changed are target libraries.
'unpack_commands': [command.RemoveDirectory(dirname) for dirname in [
'boehm-gc',
'libada',
'libffi',
'libgcc',
'libgfortran',
'libgo',
'libgomp',
'libitm',
'libjava',
'libmudflap',
'libobjc',
'libquadmath',
'libssp',
'libstdc++-v3',
]],
'commands': ConfigureTargetPrep(target) + [
ConfigureGccCommand(target),
| |
The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
return decim, offset, new_sfreq
@fill_doc
class EvokedArray(Evoked):
"""Evoked object from numpy array.
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response. See notes for proper units of measure.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event. Defaults to 0.
comment : str
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
%(verbose)s
%(baseline_array)s
Defaults to ``None``, i.e. no baseline correction.
.. versionadded:: 0.21
See Also
--------
EpochsArray, io.RawArray, create_info
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
@verbose
def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average',
verbose=None, baseline=None): # noqa: D102
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples), got shape %s' % (data.shape,))
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
np.shape(data)[0]))
self.data = data
self.first = int(round(tmin * info['sfreq']))
self.last = self.first + np.shape(data)[-1] - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float64) / info['sfreq']
self.info = info.copy() # do not modify original info
self.nave = nave
self.kind = kind
self.comment = comment
self.picks = None
self.verbose = verbose
self.preload = True
self._projector = None
_validate_type(self.kind, "str", "kind")
if self.kind not in _aspect_dict:
raise ValueError('unknown kind "%s", should be "average" or '
'"standard_error"' % (self.kind,))
self._aspect_kind = _aspect_dict[self.kind]
self.baseline = baseline
self.apply_baseline(self.baseline, verbose=verbose)
def _get_entries(fid, evoked_node, allow_maxshield=False):
"""Get all evoked entries."""
comments = list()
aspect_kinds = list()
for ev in evoked_node:
for k in range(ev['nent']):
my_kind = ev['directory'][k].kind
pos = ev['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comments.append(tag.data)
my_aspect = _get_aspect(ev, allow_maxshield)[0]
for k in range(my_aspect['nent']):
my_kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if my_kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kinds.append(int(tag.data))
comments = np.atleast_1d(comments)
aspect_kinds = np.atleast_1d(aspect_kinds)
if len(comments) != len(aspect_kinds) or len(comments) == 0:
fid.close()
raise ValueError('Dataset names in FIF file '
'could not be found.')
t = [_aspect_rev[a] for a in aspect_kinds]
t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
t = '\n'.join(t)
return comments, aspect_kinds, t
def _get_aspect(evoked, allow_maxshield):
"""Get Evoked data aspect."""
is_maxshield = False
aspect = dir_tree_find(evoked, FIFF.FIFFB_ASPECT)
if len(aspect) == 0:
_check_maxshield(allow_maxshield)
aspect = dir_tree_find(evoked, FIFF.FIFFB_IAS_ASPECT)
is_maxshield = True
if len(aspect) > 1:
logger.info('Multiple data aspects found. Taking first one.')
return aspect[0], is_maxshield
def _get_evoked_node(fname):
"""Get info in evoked file."""
f, tree, _ = fiff_open(fname)
with f as fid:
_, meas = read_meas_info(fid, tree, verbose=False)
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
return evoked_node
def _check_evokeds_ch_names_times(all_evoked):
evoked = all_evoked[0]
ch_names = evoked.ch_names
for ii, ev in enumerate(all_evoked[1:]):
if ev.ch_names != ch_names:
if set(ev.ch_names) != set(ch_names):
raise ValueError(
"%s and %s do not contain the same channels." % (evoked,
ev))
else:
warn("Order of channels differs, reordering channels ...")
ev = ev.copy()
ev.reorder_channels(ch_names)
all_evoked[ii + 1] = ev
if not np.max(np.abs(ev.times - evoked.times)) < 1e-7:
raise ValueError("%s and %s do not contain the same time instants"
% (evoked, ev))
return all_evoked
def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the
same time instants. Subtraction can be performed by passing
``weights=[1, -1]``.
.. Warning::
Other than cases like simple subtraction mentioned above (where all
weights are -1 or 1), if you provide numeric weights instead of using
``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's
``.nave`` attribute (which is used to scale noise covariance when
applying the inverse operator) may not be suitable for inverse imaging.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | 'equal' | 'nave'
The weights to apply to the data of each evoked instance, or a string
describing the weighting strategy to apply: ``'nave'`` computes
sum-to-one weights proportional to each object's ``nave`` attribute;
``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``.
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
naves = np.array([evk.nave for evk in all_evoked], float)
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = naves / naves.sum()
else:
weights = np.ones_like(naves) / len(naves)
else:
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
# cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on
# "weighted sample variance". The variance of a weighted sample mean is:
#
# σ² = w₁² σ₁² + w₂² σ₂² + ... + wₙ² σₙ²
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# σ² = w₁² / nave₁ + w₂² / nave₂ + ... + wₙ² / naveₙ
#
# And our resulting nave is the reciprocal of this:
new_nave = 1. / np.sum(weights ** 2 / naves)
# This general formula is equivalent to formulae in Matti's manual
# (pp 128-129), where:
# new_nave = sum(naves) when weights='nave' and
# new_nave = 1. / sum(1. / naves) when weights are all 1.
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(b for e in all_evoked for b in e.info['bads']))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = new_nave
evoked.comment = ' + '.join(f'{w:0.3f} × {e.comment or "unknown"}'
for w, e in zip(weights, all_evoked))
return evoked
@verbose
def read_evokeds(fname, condition=None, baseline=None, kind='average',
proj=True, allow_maxshield=False, verbose=None):
"""Read evoked dataset(s).
.. versionchanged:: 0.21
If the read `~mne.Evoked` objects had been baseline-corrected before
saving, this will be reflected in their ``baseline`` attribute after
reading.
Parameters
----------
fname : str
The file name, which should end with -ave.fif or -ave.fif.gz.
condition : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None, all datasets are returned as a
list.
%(baseline_common_evoked)s
If ``None`` (default), do not apply baseline correction.
.. note:: Note that if the read `~mne.Evoked` objects have already
been baseline-corrected, the data retrieved from disk will
**always** be baseline corrected (in fact, only the
baseline-corrected version of the data will be saved, so
there is no way to undo this procedure). Only **after** the
data has been loaded, a custom (additional) baseline
correction **may** be optionally applied by passing a tuple.
Passing ``None`` will **not** remove the existing baseline
correction, but merely omit the optional, additional baseline
correction
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(verbose)s
Returns
-------
evoked : Evoked or list of Evoked
The evoked dataset(s); one `~mne.Evoked` if condition is ``int`` or
``str``, or list of Evoked if condition is ``None`` or ``list``.
See | |
+ ' '
s += '(' + '%d' % clinfo_data["GPUs"][gpu]["Max compute units"] + ' CU)' + ' '
s += '(' + clinfo_data["GPUs"][gpu]["Global memory size (h)"] + ')' + ' '
s += 'v' + clinfo_data["GPUs"][gpu]["Driver version"]
print_progress(s)
if int(clinfo_data["GPUs"][gpu]["Global memory size (Byte)"]) < warn_minimum_mem["GPU"][1]:
print_warn("%s memory is %s < %s" % (gpu, clinfo_data["GPUs"][gpu]["Global memory size (h)"], warn_minimum_mem["GPU"][0]))
print_warn("This might cause some benchmarks to fallback to Truffle mode")
print_ok('MegaGuards detected OpenCL device(s)')
if num_oclgpus != 1 and num_oclcpus != 1:
print_error('MegaGuards benchmarking might not work properly there should be one OpenCL CPU and one GPU')
print_error(' The number of OpenCL CPU devices should be "1" and OpenCL GPU devices should be "1"')
else:
return cpu, gpu
else:
print_error('MegaGuards did not find any OpenCL device')
return None, None
def get_megaguards_junit_status(verbose=False):
is_ok = get_megaguards_home_dir(check_only=True)
is_ok = is_ok and get_megaguards_build_dir(check_only=True)
is_ok = is_ok and get_megaguards_polyhedral_ld(check_only=True)
is_ok = is_ok and get_megaguards_test_dataset(check_only=True)
if is_ok:
n = 3
for t in range(n):
out = mx.OutputCapture()
_out = out if not verbose else mx.TeeOutputCapture(out)
out_err = mx.OutputCapture()
_out_err = out if not verbose else mx.TeeOutputCapture(out_err)
print_progress("Performing MegaGuards (core) junit tests.. (note: run 'mx junit-mg' for complete MegaGuards junit tests)")
retcode = mx.run(['mx', 'junit-mg-core'], out=_out, err=_out_err, nonZeroIsFatal=False)
if retcode == 0:
break
else:
print_progress("Test failed.. retry %d of %d" % (t+1, n))
if retcode == 0:
print_ok('MegaGuards core junit tests')
else:
print_warn('MegaGuards core junit tests encountered some errors.')
is_ok = is_ok and retcode == 0
return is_ok
def check_benchmark_requirements(verbose=False):
programs_version = {
'CPython2' : ['python', '--version', '2.7.14', '(Download URL: "https://repo.continuum.io/archive/Anaconda2-5.1.0-Linux-x86_64.sh")'],
'CPython3' : ['python3', '--version', '3.5.3', '(Download URL: "https://repo.continuum.io/archive/Anaconda3-4.4.0-Linux-x86_64.sh")'],
'PyPy3' : ['pypy3' , '--version', '5.10.0', '(Download URL: "https://bitbucket.org/pypy/pypy/downloads/pypy3-v5.10.0-linux64.tar.bz2")'],
'GCC' : ['gcc' , '--version', '5.4.1', '(Run: "sudo apt-get install gcc-5")']
}
out = mx.OutputCapture()
_out = out if not verbose else mx.TeeOutputCapture(out)
out_err = mx.OutputCapture()
_out_err = out if not verbose else mx.TeeOutputCapture(out_err)
for p in programs_version:
a = programs_version[p]
retcode = mx.run([a[0], a[1]], out=_out, err=_out_err, nonZeroIsFatal=False)
if retcode != 0:
print_error('%s was not found or not in $PATH. %s' % (p, a[3]))
elif a[2] in out.data:
print_ok('%s v%s exists' % (p, a[2]))
else:
print_warn('%s exists but mis-match the recommended v%s' % (p, a[2]))
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
except:
print_error('matplotlib was not found (Run: "conda install matplotlib")')
try:
import numpy
except:
print_error('numpy was not found (Run: "conda install numpy")')
try:
from scipy import stats
except:
print_error('scipy was not found (Run: "conda install scipy")')
try:
from functools import reduce
except:
print_error('reduce function was not found')
try:
import csv
except:
print_error('missing csv module')
try:
import xml
except:
print_error('missing csv module')
try:
import json
except:
print_error('missing json module')
def _get_out_outerr(verbose=False):
out = mx.OutputCapture()
_out = out if not verbose else mx.TeeOutputCapture(out)
out_err = mx.OutputCapture()
_out_err = out_err if not verbose else mx.TeeOutputCapture(out_err)
return _out, _out_err
def find_opencl_device_platform(verbose=False):
from mx_mg_conf import envoclcpu, envoclgpu
envoclcpu_set = False
if envoclcpu in os.environ:
print_info('%s=%s already set in the "env" file' % (envoclcpu, os.environ[envoclcpu]))
envoclcpu_set = True
envoclgpu_set = False
if envoclgpu in os.environ:
print_info('%s=%s already set in the "env" file' % (envoclgpu, os.environ[envoclgpu]))
envoclgpu_set = True
from mx_zippy_bench_param import mg_opencl_bench_paths
ocl_mm_path = mg_opencl_bench_paths['mm']
out, out_err = _get_out_outerr(verbose)
if not os.path.isfile(ocl_mm_path + 'mm'):
retcode = mx.run(['make'], out=out, err=out_err, cwd=ocl_mm_path, nonZeroIsFatal=True)
oclcpu, oclgpu = clinfo_mg()
if not oclcpu or not oclgpu:
print_error("Please fix this before running the benchmarks")
if envoclcpu_set and envoclgpu_set:
return
if not envoclcpu_set:
opencl_cpu_device_num = 1
oclcpu_cmd = ["./mm", "64", "-t", "cpu", "-d"]
found = False
for p in range(5):
out, out_err = _get_out_outerr(verbose)
cmd = oclcpu_cmd + ['%d' % p]
retcode = mx.run(cmd, out=out, err=out_err, cwd=ocl_mm_path, nonZeroIsFatal=False)
output = out.data if not verbose else out.underlying.data
if retcode == 0:
if oclcpu in output or oclcpu[:5] in output or oclcpu[-5:] in output:
found = True
opencl_cpu_device_num = p
print_ok("%s OpenCL device platform index is %d" % (oclcpu, opencl_cpu_device_num))
break
if found:
update_env_file(envoclcpu, '%d' % opencl_cpu_device_num)
os.environ[envoclcpu] = '%d' % opencl_cpu_device_num
else:
print_error('Unable to find the platform index for %s' % oclcpu)
if not envoclgpu_set:
opencl_gpu_device_num = 0
oclgpu_cmd = ["./mm", "64", "-t", "gpu", "-d"]
found = False
for p in range(5):
out, out_err = _get_out_outerr(verbose)
cmd = oclgpu_cmd + ['%d' % p]
retcode = mx.run(cmd, out=out, err=out_err, cwd=ocl_mm_path, nonZeroIsFatal=False)
output = out.data if not verbose else out.underlying.data
if retcode == 0:
if oclgpu in output or oclgpu[:5] in output or oclgpu[-5:] in output:
found = True
opencl_gpu_device_num = p
print_ok("%s OpenCL device platform index is %d" % (oclgpu, opencl_gpu_device_num))
break
if found:
update_env_file(envoclgpu, '%d' % opencl_gpu_device_num)
os.environ[envoclgpu] = '%d' % opencl_gpu_device_num
else:
print_error('Unable to find the platform index for %s' % oclcpu)
def run_simple_example(verbose=False):
from mx_mg_conf import envoclcpu, envoclgpu
from mx_zippy_bench_param import hpc_path, rodinia_path, mg_paths, mg_opencl_bench_paths
examples = {
"mm" : {
"Python" : {
"PATH" : hpc_path + mg_paths['Python'],
"exec" : 'mm.py',
"preArg" : [],
"postArg": []
},
"OpenCL" : {
"PATH" : mg_opencl_bench_paths['mm'],
"exec" : 'mm',
"preArg" : [],
"postArg": []
},
},
"lud" : {
"Python" : {
"PATH" : rodinia_path + mg_paths['Python'],
"exec" : 'lud.py',
"preArg" : ['-s'],
"postArg": []
},
"OpenCL" : {
"PATH" : mg_opencl_bench_paths['lud'],
"exec" : 'lud',
"preArg" : ['-s'],
"postArg": []
},
},
}
example_args = ['64', '128', '256']
find_opencl_device_platform(verbose)
opencl_cpu_device_num = int(os.environ[envoclcpu])
opencl_gpu_device_num = int(os.environ[envoclgpu])
ordered_systems = ["ZipPy", "OpenCL-CPU", "MG-CPU", "OpenCL-GPU", "MG-GPU", "MG-Truffle", "PyPy3", "CPython3"]
opencl_systems = {
"OpenCL-CPU": [['./'], ['-t', 'cpu' , '-d', '%d' % opencl_cpu_device_num, '8']],
"OpenCL-GPU": [['./'], ['-t', 'gpu' , '-d', '%d' % opencl_gpu_device_num, '8']],
}
python_systems = {
"MG-Truffle": [['mx', 'python'], ['8', '--mg-target=truffle']],
"MG-CPU": [['mx', 'python'], ['8', '--mg-target=cpu']],
"MG-GPU": [['mx', 'python'], ['8', '--mg-target=gpu']],
"ZipPy": [['mx', 'python'], ['8']],
"PyPy3": [['pypy3'], ['8']],
"CPython3": [['python3'], ['8']],
}
re_rule = r"^(?P<benchmark>[a-zA-Z0-9\.\-\_]+), Time,(?P<times>(\s[0-9]+(\.[0-9]+)?,)+)"
# prog = re.compile(re_rule, re.MULTILINE)
oclcpu, oclgpu = clinfo_mg()
if not oclcpu or not oclgpu:
print_error("Please fix this before running the benchmarks")
results = {}
print_info('Start time: ' + time.ctime())
for e in examples:
results[e] = {}
out, out_err = _get_out_outerr(verbose)
if not os.path.isfile(examples[e]['OpenCL']['PATH'] + examples[e]['OpenCL']['exec']):
retcode = mx.run(['make'], out=out, err=out_err, cwd=examples[e]['OpenCL']['PATH'], nonZeroIsFatal=True)
for arg in example_args:
results[e][arg] = {}
for ocl in opencl_systems:
cwd=examples[e]['OpenCL']['PATH']
cmd = []
cmd += [opencl_systems[ocl][0][0] + examples[e]['OpenCL']['exec']]
cmd += examples[e]['OpenCL']['preArg'] + [arg] + examples[e]['OpenCL']['postArg'] + opencl_systems[ocl][1]
print_progress(' '.join(map(str, cmd)))
out, out_err = _get_out_outerr(verbose)
retcode = mx.run(cmd, out=out, err=out_err, cwd=cwd, nonZeroIsFatal=False)
if retcode != 0:
results[e][arg][ocl] = -1.0
print_error('%s failed' % ocl)
continue
output = out.data
r = re.findall(re_rule, output, re.MULTILINE)
t = r[0][1]
t = t.replace(' ', '')[0:-1]
times = [float(x) for x in t.split(',')]
results[e][arg][ocl] = min(times)
for pys in python_systems:
cwd=examples[e]['Python']['PATH']
cmd = []
cmd += python_systems[pys][0] + [examples[e]['Python']['PATH'] + examples[e]['Python']['exec']]
cmd += examples[e]['Python']['preArg'] + [arg] + examples[e]['Python']['postArg'] + python_systems[pys][1]
print_progress(' '.join(map(str, cmd)))
out, out_err = _get_out_outerr(verbose)
for t in range(3):
retcode = mx.run(cmd, out=out, err=out_err, nonZeroIsFatal=False)
if retcode == 0:
break
if retcode != 0:
results[e][arg][pys] = -1.0
print_error('%s failed' % pys)
continue
output = out.data
r = re.findall(re_rule, output, re.MULTILINE)
t = r[0][1]
t = t.replace(' ', '')[0:-1]
times = [float(x) for x in t.split(',')]
results[e][arg][pys] = min(times)
print_info('End time: ' + time.ctime())
format_pattern = "%-7s %-4s %-8s %-10s %-8s %-10s %-12s %-10s %-8s %-10s %-10s"
print_progress(format_pattern % tuple(['bench', 'arg'] + ordered_systems + ['Unit']))
for e in results:
for arg in example_args:
r = []
for s in ordered_systems:
r += [results[e][arg][s]]
print_progress(format_pattern % tuple([e, arg] + r + ['(seconds)']))
def get_megaguards_setup(args):
"""run MegaGuards setup"""
parser = ArgumentParser(prog='mx mg')
parser.add_argument('--check-only', '-c', action='store_true', help='Check without making any change.')
parser.add_argument('--force', '-f', action='store_true', help='Force the action and ignore local files.')
parser.add_argument('--verbose', '-v', action='store_true', help='Print all suppressed output.')
parser.add_argument('--init', '-i', action='store_true', help='Setup MegaGuards.')
parser.add_argument('--init-all', action='store_true', help='Setup MegaGuards (including benchmark suite).')
parser.add_argument('--polyhedral-ld', action='store_true', help='Setup Polyhedral analysis binary library (AthenaPet).')
parser.add_argument('--check-polyhedral', action='store_true', help='Test Polyhedral analysis library.')
parser.add_argument('--dataset-test', action='store_true', help='Download junit test dataset.')
parser.add_argument('--dataset-benchmark', action='store_true', help='Download benchmark dataset.')
parser.add_argument('--benchmark-suite', action='store_true', help='Download benchmark suite.')
parser.add_argument('--clinfo', action='store_true', help='Print OpenCL devices information.')
parser.add_argument('--test-gpu', '-t', action='store_true', help='Test GPU OpenCL device.')
parser.add_argument('--test-cpu', action='store_true', help='Test CPU OpenCL | |
<filename>tests/test_choice_calcs.py<gh_stars>1-10
"""
Tests for the choice_calcs.py file.
"""
import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse import diags
from scipy.sparse import block_diag
import pylogit.asym_logit as asym
import pylogit.conditional_logit as mnl
import pylogit.choice_calcs as cc
# Use the following to always show the warnings
np.seterr(all='warn')
warnings.simplefilter("always")
class GenericTestCase(unittest.TestCase):
"""
Defines the common setUp method used for the different type of tests.
"""
def setUp(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
self.fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
self.fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
self.fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
self.fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
self.fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
self.fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
self.fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
self.natural_shapes = asym._convert_eta_to_c(self.fake_shapes,
self.fake_shape_ref_pos)
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.fake_shapes,
self.fake_intercepts,
self.fake_betas))
# The mapping between rows and alternatives is given below.
self.fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1]]))
# Create the mapping between rows and individuals
self.fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
self.fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
self.fake_index = self.fake_design.dot(self.fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": self.fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Store the choices as their own array
self.choice_array = self.fake_df[self.choice_col].values
# Create the index specification and name dictionaryfor the model
self.fake_specification = OrderedDict()
self.fake_names = OrderedDict()
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
self.constructor_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
self.constructor_kwargs = {"intercept_ref_pos":
self.fake_intercept_ref_pos,
"shape_ref_pos": self.fake_shape_ref_pos,
"names": self.fake_names,
"intercept_names":
self.fake_intercept_names,
"shape_names": self.fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
self.model_obj = asym.MNAL(*self.constructor_args,
**self.constructor_kwargs)
# Store a ridge penalty for use in calculations.
self.ridge = 0.5
return None
class ComputationalTests(GenericTestCase):
"""
Tests the computational functions to make sure that they return the
expected results.
"""
# Store a utility transformation function for the tests
def utility_transform(self,
sys_utilities,
alt_IDs,
rows_to_alts,
shape_params,
intercept_params):
return sys_utilities[:, None]
def test_calc_asymptotic_covariance(self):
"""
Ensure that the correct Huber-White covariance matrix is calculated.
"""
ones_array = np.ones(5)
# Create the hessian matrix for testing. It will be a 5 by 5 matrix.
test_hessian = np.diag(2 * ones_array)
# Create the approximation of the Fisher Information Matrix
test_fisher_matrix = np.diag(ones_array)
# Create the inverse of the hessian matrix.
test_hess_inverse = np.diag(0.5 * ones_array)
# Calculated the expected result
expected_result = np.dot(test_hess_inverse,
np.dot(test_fisher_matrix, test_hess_inverse))
# Alias the function being tested
func = cc.calc_asymptotic_covariance
# Perform the test.
function_results = func(test_hessian, test_fisher_matrix)
self.assertIsInstance(function_results, np.ndarray)
self.assertEqual(function_results.shape, test_hessian.shape)
npt.assert_allclose(expected_result, function_results)
return None
def test_log_likelihood(self):
"""
Ensure that we correctly calculate the log-likelihood, both with and
without ridge penalties, and both with and without shape and intercept
parameters.
"""
# Create a utility transformation function for testing
def test_utility_transform(x, *args):
return x
# Calculate the index for each alternative for each individual
test_index = self.fake_design.dot(self.fake_betas)
# Exponentiate each index value
exp_test_index = np.exp(test_index)
# Calculate the denominator for each probability
interim_dot_product = self.fake_rows_to_obs.T.dot(exp_test_index)
test_denoms = self.fake_rows_to_obs.dot(interim_dot_product)
# Calculate the probabilities for each individual
prob_array = exp_test_index / test_denoms
# Calculate what the log-likelihood should be
choices = self.fake_df[self.choice_col].values
expected_log_likelihood = np.dot(choices, np.log(prob_array))
# Create a set of intercepts, that are all zeros
intercepts = np.zeros(2)
# Combine all the 'parameters'
test_all_params = np.concatenate([intercepts, self.fake_betas], axis=0)
# Calculate what the log-likelihood should be with a ridge penalty
penalty = self.ridge * (test_all_params**2).sum()
expected_log_likelihood_penalized = expected_log_likelihood - penalty
# Alias the function being tested
func = cc.calc_log_likelihood
# Create the arguments for the function being tested
args = [self.fake_betas,
self.fake_design,
self.fake_df[self.alt_id_col].values,
self.fake_rows_to_obs,
self.fake_rows_to_alts,
choices,
test_utility_transform]
kwargs = {"intercept_params": intercepts,
"shape_params": None}
# Perform the tests
function_results = func(*args, **kwargs)
self.assertAlmostEqual(expected_log_likelihood, function_results)
# Test the weighted log-likelihood capability
weights = 2 * np.ones(self.fake_design.shape[0])
kwargs["weights"] = weights
function_results_2 = func(*args, **kwargs)
self.assertAlmostEqual(2 * expected_log_likelihood, function_results_2)
kwargs["weights"] = None
# Test the ridge regression calculations
kwargs["ridge"] = self.ridge
function_results_3 = func(*args, **kwargs)
self.assertAlmostEqual(expected_log_likelihood_penalized,
function_results_3)
# Test the function again, this time without intercepts
kwargs["intercept_params"] = None
function_results_4 = func(*args, **kwargs)
self.assertAlmostEqual(expected_log_likelihood_penalized,
function_results_4)
return None
def test_array_size_error_in_calc_probabilities(self):
"""
Ensure that a helpful ValueError is raised when a person tries to
calculate probabilities using BOTH a 2D coefficient array and a 3D
design matrix.
"""
# Alias the function being tested
func = cc.calc_probabilities
# Create fake arguments for the function being tested.
# Note these arguments are not valid in general, but suffice for
# testing the functionality we care about in this function.
args = [np.arange(9).reshape((3, 3)),
np.arange(27).reshape((3, 3, 3)),
None,
None,
None,
None]
# Note the error message that should be shown.
msg_1 = "Cannot calculate probabilities with both 3D design matrix AND"
msg_2 = " 2D coefficient array."
msg = msg_1 + msg_2
self.assertRaisesRegexp(ValueError,
msg,
func,
*args)
return None
def test_return_argument_error_in_calc_probabilities(self):
"""
Ensure that a helpful ValueError is raised when a person tries to
calculate probabilities using BOTH a return_long_probs == False and
chosen_row_to_obs being None.
"""
# Alias the function being tested
func = cc.calc_probabilities
# Create fake arguments for the function being tested.
# Note these arguments are not valid in general, but suffice for
# testing the functionality we care about in this function.
args = [np.arange(9).reshape((3, 3)),
np.arange(9).reshape((3, 3)),
None,
None,
None,
None]
# Note the error message that should be shown.
msg = "chosen_row_to_obs is None AND return_long_probs is False"
self.assertRaisesRegexp(ValueError,
msg,
func,
*args)
return None
def test_1D_calc_probabilities(self):
"""
Ensure that when using a 2D design matrix and 1D vector of parameters,
that the calc_probabilities function returns the correct values. Note
that this test will only verify the functionality under 'normal'
conditions, where the values of the exponentiated indices do not go
to zero nor to infinity.
"""
# Calculate the index vector
expected_index = self.fake_design.dot(self.fake_betas)
# Calculate exp(index)
expected_exp_index = np.exp(expected_index)
# Calculate the sum of exp(index) for each individual
denoms = self.fake_rows_to_obs.T.dot(expected_exp_index)
# Calculate the expected probabilities
expected_probs = expected_exp_index / self.fake_rows_to_obs.dot(denoms)
# Alias the function to be tested
func = cc.calc_probabilities
# Collect the arguments needed for this function
args = [self.fake_betas,
self.fake_design,
self.fake_df[self.alt_id_col].values,
self.fake_rows_to_obs,
self.fake_rows_to_alts,
self.utility_transform]
kwargs = {"intercept_params": self.fake_intercepts,
"shape_params": self.fake_shapes,
"return_long_probs": True}
function_results = func(*args, **kwargs)
# Perform the tests
self.assertIsInstance(function_results, np.ndarray)
self.assertEqual(len(function_results.shape), 1)
self.assertEqual(function_results.shape, (self.fake_design.shape[0],))
npt.assert_allclose(function_results, expected_probs)
return None
def test_return_values_of_calc_probabilities(self):
"""
Ensure that the various configuration of return values can all be
returned.
"""
# Calculate the index vector
expected_index = self.fake_design.dot(self.fake_betas)
# Calculate exp(index)
expected_exp_index = np.exp(expected_index)
# | |
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly | |
# Copyright 2019 <NAME>. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Napalm driver for HP FlexFabric devices
Read https://napalm.readthedocs.io for more information.
"""
from __future__ import print_function
from __future__ import unicode_literals
from netmiko import ConnectHandler, FileTransfer, InLineTransfer
from napalm.base.base import NetworkDriver
from napalm.base.exceptions import (
CommandErrorException,
ConnectionClosedException,
ConnectionException,
)
#only attempt to import py23_compat if python version less than 3
import sys
if sys.version_info[0] < 3:
from napalm.base.utils import py23_compat
import napalm.base.constants as C
import napalm.base.helpers
import re
import socket
import string
# Constants
HOUR_SECONDS = 3600
DAY_SECONDS = 24 * HOUR_SECONDS
WEEK_SECONDS = 7 * DAY_SECONDS
YEAR_SECONDS = 365 * DAY_SECONDS
class FlexFabricDriver(NetworkDriver):
"""Napalm driver for HPE FlexFabric Switches"""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
self.device = None
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
if optional_args is None:
optional_args = {}
# Netmiko possible arguments
netmiko_argument_map = {
'port': None,
'secret': '',
'verbose': False,
'keepalive': 30,
'global_delay_factor': 1,
'use_keys': False,
'key_file': None,
'ssh_strict': False,
'system_host_keys': False,
'alt_host_keys': False,
'alt_key_file': '',
'ssh_config_file': None,
}
# Build dict of any optional Netmiko args
self.netmiko_optional_args = {}
for key, value in netmiko_argument_map.items():
try:
self.netmiko_optional_args[key] = optional_args[key]
except KeyError:
pass
self.global_delay_factor = optional_args.get('global_delay_factor', 1)
self.port = optional_args.get('port', 22)
self.device = None
self.config_replace = False
self.interface_map = {}
self.profile = ["flexfabric"]
def open(self):
"""Open a connection to the device"""
device_type = 'hp_comware_ssh'
self.device = ConnectHandler(
device_type=device_type,
host=self.hostname,
username=self.username,
password=<PASSWORD>,
**self.netmiko_optional_args)
# ensure in enable mode
self.device.enable()
def close(self):
"""Close the connection to the device."""
self.device.disconnect()
def _send_command(self, command):
"""Wrapper for self.device.send.command().
If command is a list will iterate through commands until valid command.
"""
try:
if isinstance(command, list):
for cmd in command:
output = self.device.send_command(cmd)
if "Invalid input: " not in output:
break
else:
output = self.device.send_command(command)
return output
except (socket.error, EOFError) as e:
raise ConnectionClosedException(str(e))
def is_alive(self):
""" Returns a flag with the state of the connection."""
if self.device is None:
return {'is_alive': False}
try:
# SSH
# Try sending ASCII null byte to maintain the connection alive
null = chr(0)
self.device.write_channel(null)
return {
'is_alive': self.device.remote_conn.transport.is_active()
}
except (socket.error, EOFError, OSError):
# If unable to send, we can tell for sure that the connection is unusable
return {'is_alive': False}
def cli(self, commands):
"""
Execute a list of commands and return the output in a dictionary format
using the command as the key.
"""
cli_output = dict()
if type(commands) is not list:
raise TypeError('Please enter a valid list of commands!')
for command in commands:
output = self._send_command(command)
if sys.version_info[0] < 3:
cli_output[py23_compat.text_type(command)] = output
else:
cli_output = output
return cli_output
def get_facts(self):
"""Return a set of facts from the devices."""
# default values.
vendor = "HP"
uptime = -1
serial_number, fqdn, os_version, hostname, domain_name, model = ("",) * 6
# obtain output from device
display_dev = self._send_command("display device manuinfo")
display_ver = self._send_command("display version")
display_curr_conf = self._send_command("display current-configuration | include sysname")
display_domain = self._send_command("display domain | include Domain")
display_interface = self._send_command("display interface brief")
# serial number
chassis = False
for line in display_dev.splitlines():
if not line.startswith(" ") and ("Chassis self" in line or "Slot" in line)\
or ("Slot" in line and "CPU" in line):
chassis = True
if chassis and "DEVICE_SERIAL_NUMBER" in line:
serial_number += (line.split(":")[1])
chassis = False
serial_number = serial_number.strip()
# uptime/model/os_version
for line in display_ver.splitlines():
if " uptime is " in line:
model, uptime_str = line.split(" uptime is ")
uptime = self.parse_uptime(uptime_str)
model = model.strip()
if "System image version" in line:
os_version = line.split(":")[1].strip()
elif "Comware Software, Version" in line:
os_version = line.lstrip("Comware Software, Version")
# hostname
hostname = display_curr_conf.split("sysname")[1].strip()
# domain name
domain_name = display_domain.splitlines()[0].split(":")[1].strip()
#fqdn
if domain_name != "system":
fqdn = "{}.{}".format(hostname, domain_name)
else:
fqdn = hostname
#interface list
interface_list = []
active = False
for line in display_interface.splitlines():
if line.startswith("Interface Link Speed"):
active = True
continue
if active:
interface_list.append(line.split()[0])
if sys.version_info[0] < 3:
return {
"uptime": int(uptime),
"vendor": vendor,
"os_version": py23_compat.text_type(os_version),
"serial_number": py23_compat.text_type(serial_number),
"model": py23_compat.text_type(model),
"hostname": py23_compat.text_type(hostname),
"fqdn": fqdn,
"interface_list": interface_list,
}
else:
return {
"uptime": int(uptime),
"vendor": vendor,
"os_version": os_version,
"serial_number": serial_number,
"model": model,
"hostname": hostname,
"fqdn": fqdn,
"interface_list": interface_list,
}
def get_lldp_neighbors(self):
"""FlexFabric implementation of get_lldp_neighbors."""
lldp = {}
command = "display lldp neighbor-information list"
output = self._send_command(command)
active = False
for line in output.splitlines():
if line.startswith("System Name"):
active = True
continue
if active:
remote_sys, local_if, _, remote_port = line.split()
lldp[local_if] = [{"hostname": remote_sys, "port": remote_port}]
if not lldp:
for line in output.splitlines():
if line.startswith("Local Interface"):
active = True
continue
if active:
split_line = line.split()
local_if, remote_port, remote_sys = split_line[0], split_line[-2], split_line[-1]
lldp[local_if] = [{"hostname": remote_sys, "port": remote_port}]
return lldp
def get_lldp_neighbors_detail(self, interface=""):
lldp = {}
lldp_interfaces = []
if interface: #if interface specified
remote_port = ""
remote_sys = ""
command = "display lldp neighbor-information interface {} verbose".format(interface)
output = self._send_command(command)
for line in output.splitlines():
if line.startswith(" System name"):
remote_sys = line[23:]
if line.startswith(" Port ID"):
remote_port = line[23:]
#add more elements here TODO
lldp[interface] = [{ "remote_system_name": remote_sys, "remote_port": remote_port}]
else: #if didn't specify interface
command = "display lldp neighbor-information list"
output = self._send_command(command)
active = False
for line in output.splitlines():
if line.startswith("System Name"):
active = True
continue
if active:
remote_sys, local_if, _, remote_port = line.split()
lldp[local_if] = [{"remote_system_name": remote_sys, "remote_port": remote_port}]
if not lldp:
for line in output.splitlines():
if line.startswith("Local Interface"):
active = True
continue
if active:
split_line = line.split()
local_if, remote_port, remote_sys = split_line[0], split_line[-2], split_line[-1]
lldp[local_if] = [{ "remote_system_name": remote_sys, "remote_port": remote_port}]
return lldp
def get_environment(self):
environment = {}
cpu_cmd = "display cpu-usage summary"
mem_cmd = "display memory summary"
temp_cmd = "display environment"
fan_cmd = "display fan"
pwr_cmd = "display power"
# fan health
output = self._send_command(fan_cmd)
environment.setdefault("fans", {})
active = False
chassis = 0
for line in output.splitlines():
if line.startswith(" ---"):
active = True
chassis +=1
continue
elif line == "" or line.startswith(" Fan-tray"):
active = False
continue
elif active == False:
continue
line_list = line.split()
if line_list[1] != "Normal":
fan_state = False
else:
fan_state = True
fan_id = str(chassis) + "_" + line_list[0]
environment["fans"][fan_id] = {
"status": fan_state
}
if not environment["fans"]:
for line in output.splitlines():
if line.startswith("Slot") or line.startswith(" Slot"):
chassis +=1
continue
elif "FAN" in line or "Fan " in line:
fan_id = str(chassis) + "_" + line.split()[1].strip(":")
continue
elif "State" in line:
if line.split(":")[-1].strip() != "Normal":
fan_state = False
else:
fan_state = True
environment["fans"][fan_id] = {
"status": fan_state
}
# temperature sensors
output = self._send_command(temp_cmd)
environment.setdefault("temperature", {})
if "Slot" in output.splitlines()[0]:
slot = 0
active = False
for line in output.splitlines():
if "Slot" in line:
slot += 1
active = False
continue
elif not active and line.startswith("Sensor"):
active = True
continue
if active:
split_line = line.split()
location = str(slot) + "_" + "_".join(split_line[0:2])
temperature = float(split_line[2])
environment["temperature"][location] = {
"temperature": temperature,
"is_alert": temperature > float(split_line[-3]),
"is_critical": temperature > float(split_line[-2])
}
else:
if "Chassis" in output.splitlines()[2]:
marker = 4
else:
marker = 3
for line in output.splitlines()[3:]:
split_line = line.split()
location = "_".join(split_line[0:marker])
temperature = float(split_line[marker])
environment["temperature"][location] = {
"temperature": temperature,
"is_alert": temperature > float(split_line[-3]),
"is_critical": temperature > float(split_line[-2])
}
# power supply units
# currently not implemented
environment.setdefault('power', {})
environment['power']['not implemented'] = {'status': True, 'output': -1.0, 'capacity': -1.0}
#TODO
# cpu usage
output = self._send_command(cpu_cmd)
environment.setdefault("cpu", {})
usage = 0.0
if "Wrong parameter found at" in output:
output = self._send_command("display cpu-usage | include 1 minute")
for idx, line in enumerate(output.splitlines()):
environment["cpu"][idx] = {}
environment["cpu"][idx]["%usage"] = 0.0
usage = float(line.split()[0].strip("%"))
environment["cpu"][idx]["%usage"] = usage
else:
if "Chassis" in output.splitlines()[0]:
marker = 4
else:
marker = 3
| |
<filename>2018 Calculate Unconsented Surface Water Takes.py
#A Model fo Assessing the Magnitude of Unconsented Surface Water Use in
#the Canterbury Region
def CalculateFinYear(Month,Year):
if Month >= 7:
FinYear = str(Year) + "_" + str(int(Year) + 1)
else:
FinYear = str(int(Year) - 1) + "_" + str(Year)
return FinYear
import arcpy,os
from arcpy import env
from time import localtime,strftime
env.overwriteOutput = True
docpath = r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes" + "\\"
print "Inital Setup...."
#Import Variables from Config File.
vals = []
animals = ["Domestic","Dairy Cow","Cow","Deer","Sheep","Pig","Goat","Horse","Emu","Ostrich","Camelid","Poultry"]
entry = open(docpath + '\\' + "Config.csv")
entry.readline()
for e in entry:
vals.append(e.strip().split(","))
entry.close()
newentry = False
for val in vals[:-2]:
if not (val[0] in animals):
print '"' + val[0] + '"' + " is a newly listed variable. Please contact the GIS Team to make this change."
newentry = True
if newentry == True:
sys.exit()
for val in vals:
if val[0] == "Domestic":
DOMWU = float(val[1])
elif val[0] == "Dairy Cow":
DCWU = float(val[1])
elif val[0] == "Cow":
BFWU = float(val[1])
elif val[0] == "Deer":
DRWU = float(val[1])
elif val[0] == "Sheep":
SHWU = float(val[1])
elif val[0] == "Pig":
PGWU = float(val[1])
elif val[0] == "Goat":
GTWU = float(val[1])
elif val[0] == "Horse":
HRWU = float(val[1])
elif val[0] == "Emu":
EUWU = float(val[1])
elif val[0] == "Ostrich":
OSWU = float(val[1])
elif val[0] == "Camelid":
CMWU = float(val[1])
elif val[0] == "Poultry":
POWU = float(val[1])
elif val[0] == "Leakage":
leakage = float(val[1])
#output = vals[-1][1]
output = arcpy.GetParameterAsText(1)
if os.path.exists (r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output") == False:
os.mkdir(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output")
if os.path.exists (r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology") == False:
os.mkdir(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology")
if os.path.exists (r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes") == False:
os.mkdir(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes")
if arcpy.Exists(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb") == False:
arcpy.CreateFileGDB_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes", "Scratch.gdb")
else:
arcpy.Compact_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb")
env.workspace = r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb"
env.scratchWorkspace = r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb"
for f in arcpy.ListFeatureClasses():
if arcpy.TestSchemaLock(f):
arcpy.Delete_management(f)
print "Extracting Datasets...."
arcpy.AddMessage("\nExtracting Datasets....")
arcpy.MakeFeatureLayer_management(arcpy.GetParameterAsText(0),"Input")
arcpy.Dissolve_management("Input",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area_Extent")
arcpy.Delete_management("Input")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area_Extent","Extent")
print "\tParcels"
arcpy.AddMessage("\tParcels")
arcpy.MakeFeatureLayer_management(r"Database Connections\DC GISSQL2012 LDS GISuser.sde\LDS.DBO.CADASTRAL_NZTM_PARCELS_VALUATION","Property")
arcpy.SelectLayerByLocation_management("Property","INTERSECT","Extent")
arcpy.SelectLayerByAttribute_management("Property","SUBSET_SELECTION","NOT(FEATCODE in ('Hydro', 'Railway', 'Road', 'Road Strata', 'Streambed'))")
arcpy.CopyFeatures_management("Property",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Parcels")
arcpy.Clip_analysis(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Parcels","Extent",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Parcels")
print "\tNon Parcels"
arcpy.AddMessage("\tNon Parcels")
arcpy.SelectLayerByAttribute_management("Property","NEW_SELECTION","FEATCODE in ('Hydro', 'Railway', 'Road', 'Road Strata', 'Streambed')")
arcpy.CopyFeatures_management("Property",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Non_Parcels")
arcpy.Clip_analysis(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Non_Parcels","Extent",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Parcels")
arcpy.Delete_management("Property")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Parcels")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Int_Non_Parcels")
print "\tWater Supply"
arcpy.AddMessage("\tWater Supply")
arcpy.MakeFeatureLayer_management(r"Database Connections\DC GISSQLEXT2012 GISWebuser.sde\GISPUBLIC.DBO.CLAGG_NZTM_WATER_SUPPLY","Water")
arcpy.SelectLayerByLocation_management("Water","INTERSECT","Extent","1000 METERS")
arcpy.CopyFeatures_management("Water",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Reticulation")
arcpy.Delete_management("Water")
print "\tLanduse"
arcpy.AddMessage("\tLanduse")
arcpy.MakeFeatureLayer_management(r"Database Connections\DC GISSQL2012 GISuser.sde\GIS.DBO.ASUREQUALITY_Agribase","Agribase")
arcpy.SelectLayerByLocation_management("Agribase","INTERSECT","Extent",selection_type="NEW_SELECTION")
arcpy.CopyFeatures_management("Agribase",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse")
print "Removing Non Parcel Areas...."
arcpy.AddMessage("Removing Non Parcel Areas....")
arcpy.MakeFeatureLayer_management(docpath + "\Input\Data.gdb\Meshblocks2018","Mesh_Blocks")
arcpy.Clip_analysis("Mesh_Blocks","Extent",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Meshblocks2018")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Meshblocks2018","Mesh_Blocks2")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Parcels","Property")
arcpy.Clip_analysis("Mesh_Blocks2","Property",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area")
arcpy.Delete_management("Mesh_Blocks")
print "Recalculating Meshblock Ratios..."
arcpy.AddMessage("Recalculating Meshblock Ratios...")
cursor = arcpy.da.UpdateCursor("Area",["SHAPE@AREA","POPULATION","DWELLINGS","DENSITY","PROPERTY_SQM"]) #"HUMAN_SQM",
for row in cursor:
if row[2] <> None:
prop = float(row[0]) * float(row[4])
human = float(row[3]) * float(prop)
else:
prop = 0
human = 0
dwell_ratio = round(row[2],0)/float(row[0])
cursor.updateRow([row[0],long(round(human,0)),long(round(prop,0)),row[3],dwell_ratio])
print "Removing Stock Water Networks...."
arcpy.AddMessage("Removing Stock Water Networks....")
arcpy.Intersect_analysis([[docpath + '\\' + "\Input\Stock Water Takes.gdb\NZTM_Stock_Water_Parcels",1],[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area",2]],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area1")
arcpy.Union_analysis([[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area1",1],[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Parcels",2]],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area2")
arcpy.Union_analysis ([[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area2",1],[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area",2]],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area3")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area3","New_Area")
arcpy.SelectLayerByAttribute_management("New_Area","New_Selection","FID_Area2 <> -1")
arcpy.DeleteFeatures_management("New_Area")
arcpy.Delete_management("New_Area")
if arcpy.TestSchemaLock(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area"):
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area")
if arcpy.TestSchemaLock(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area1"):
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area1")
print "Removing Reticulated Areas...."
arcpy.AddMessage("Removing Reticulated Areas....")
arcpy.MakeFeatureLayer_management(r"Database Connections\DC GISSQL2012 LDS GISuser.sde\LDS.DBO.CADASTRAL_NZTM_Roads","Roads")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Reticulation","Water")
arcpy.SelectLayerByLocation_management("Property","INTERSECT","Water",25,"NEW_SELECTION")
arcpy.SelectLayerByAttribute_management("Property","REMOVE_FROM_SELECTION","FEATCODE = 'road'")
arcpy.CopyFeatures_management("Property",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Property1")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Property1","Parcels_1")
arcpy.SelectLayerByLocation_management("Parcels_1","INTERSECT","Roads",0,"NEW_SELECTION")
arcpy.SelectLayerByLocation_management("Property","INTERSECT","Parcels_1",10,"NEW_SELECTION")
arcpy.SelectLayerByAttribute_management("Parcels_1","CLEAR_SELECTION")
arcpy.SelectLayerByLocation_management("Property","HAVE_THEIR_CENTER_IN","Parcels_1",selection_type="REMOVE_FROM_SELECTION")
arcpy.SelectLayerByAttribute_management("Property","REMOVE_FROM_SELECTION","FEATCODE = 'road'")
arcpy.CopyFeatures_management("Property",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Property2")
arcpy.Merge_management(["Property1","Property2"],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Water_Supply_Parcels")
arcpy.Delete_management("Parcels_1")
arcpy.Intersect_analysis([[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Water_Supply_Parcels",1],[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area3",2]],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area4")
arcpy.Union_analysis ([[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area4",1],[r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area3",2]],r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Reticulated_Area")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Reticulated_Area","New_Area")
arcpy.SelectLayerByAttribute_management("New_Area","New_Selection","FID_Area4 <> -1")
arcpy.Rename_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area2", "Stock_Water_Area")
arcpy.DeleteFeatures_management("New_Area")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Stock_Water_Area","Stock_Water")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Reticulated_Area","Reticulated_Area")
fields = ["POPULATION","DWELLINGS","DENSITY","HUMAN_SQM","PROPERTY_SQM","ESTIMATED","SHAPE","Shape","OBJECTID","SHAPE_Length","SHAPE_Area","Shape_Length","Shape_Area"]
delfields = []
for f in arcpy.ListFields("Stock_Water"):
if (str(f.name) in fields) == False:
delfields.append(str(f.name))
arcpy.DeleteField_management("Stock_Water",delfields)
arcpy.Delete_management("Stock_Water")
delfields = []
for f in arcpy.ListFields("Reticulated_Area"):
if (str(f.name) in fields) == False:
delfields.append(str(f.name))
arcpy.DeleteField_management("Reticulated_Area",delfields)
arcpy.Delete_management("Reticulated_Area")
print "Calculating Human Population per Catchment not on Reticulation...."
arcpy.AddMessage("Calculating Human Population per Catchment not on Reticulation....")
arcpy.AddField_management("Extent","HUMAN_NO","Long")
arcpy.AddField_management("Extent","DENSITY","Float")
arcpy.AddField_management("Extent","PROPERTY_NO","Long")
arcpy.Clip_analysis("Mesh_Blocks2",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Non_Reticulated_Area",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Population")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Population","Pop")
cursor = arcpy.da.UpdateCursor("Extent",["DENSITY","PROPERTY_NO"])
for row in cursor:
dwell = 0
density = 0
cnt = 0
for srow in arcpy.da.SearchCursor("Pop",["SHAPE@AREA","DENSITY","PROPERTY_SQM"]):
if srow[2] <> None:
dwell = dwell + (float(srow[0]) * float(srow[2]))
if srow[1] <> None:
density = density + float(srow[1])
cnt = cnt + 1
if cnt <> 0 :
avg_density = float(density/cnt)
else:
avg_density = 0
cursor.updateRow([avg_density,long(round(dwell,0))])
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Parcels")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Meshblocks2018")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Property1")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Property2")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area1")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area3")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Area4")
print "Processing Agribase Data ...."
arcpy.AddMessage("Processing Agribase Data ....")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse","Land_Use")
arcpy.Dissolve_management("Land_Use",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int","FARM_ID;BEF_NOS;CAM_NOS;DAI_NOS;DEE_NOS;EMU_NOS;GOAT_NOS;HORS_NOS;OSTR_NOS;PIGS_NOS;POU_NOS;SHP_NOS","",
"SINGLE_PART")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int","Land_Use2")
arcpy.SelectLayerByLocation_management("Land_Use2","INTERSECT","Extent","10 Kilometers","NEW_SELECTION")
arcpy.CopyFeatures_management("Land_Use2",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int2")
arcpy.Dissolve_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int2",r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve",
"FARM_ID;BEF_NOS;CAM_NOS;DAI_NOS;DEE_NOS;EMU_NOS;GOAT_NOS;HORS_NOS;OSTR_NOS;PIGS_NOS;POU_NOS;SHP_NOS","","MULTI_PART")
arcpy.Delete_management("Land_Use")
arcpy.Delete_management("Land_Use2")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int")
arcpy.Delete_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve_Int2")
print "Calculating Weighted Animal Square Meter Ratio...."
arcpy.AddMessage("Calculating Weighted Animal Square Meter Ratio....")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve","Landuse_Dis")
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse_Dissolve","Landuse_Dis2")
arcpy.AddField_management("Landuse_Dis","BEF_SQM","Float")
arcpy.AddField_management("Landuse_Dis","DEE_SQM","Float")
arcpy.AddField_management("Landuse_Dis","DAI_SQM","Float")
arcpy.AddField_management("Landuse_Dis","SHP_SQM","Float")
arcpy.AddField_management("Landuse_Dis","PIG_SQM","Float")
arcpy.AddField_management("Landuse_Dis","GOAT_SQM","Float")
arcpy.AddField_management("Landuse_Dis","HORS_SQM","Float")
arcpy.AddField_management("Landuse_Dis","EMU_SQM","Float")
arcpy.AddField_management("Landuse_Dis","OSTR_SQM","Float")
arcpy.AddField_management("Landuse_Dis","CAM_SQM","Float")
arcpy.AddField_management("Landuse_Dis","POU_SQM","Float")
cursor = arcpy.da.UpdateCursor("Landuse_Dis",["FARM_ID","BEF_SQM","DEE_SQM","DAI_SQM","SHP_SQM","PIG_SQM","GOAT_SQM","HORS_SQM","EMU_SQM","OSTR_SQM","CAM_SQM","POU_SQM"])
previd = None
for row in cursor:
if str(row[0]) <> previd: #Removes Duplicate Farm Properties
previd2 = None
area = 0.0
befno = 0.0
deeno = 0.0
daino = 0.0
shpno = 0.0
pigno = 0.0
goatno = 0.0
horsno = 0.0
emuno = 0.0
ostno = 0.0
camno = 0.0
pouno = 0.0
for srow in arcpy.da.SearchCursor("Landuse_Dis2",["SHAPE@AREA","BEF_NOS","DEE_NOS","DAI_NOS","SHP_NOS","PIGS_NOS","GOAT_NOS","HORS_NOS","EMU_NOS","OSTR_NOS","CAM_NOS","POU_NOS","FARM_ID"],"FARM_ID = '" + row[0] + "'"):
if str(srow[12]) <> previd2: #Removes Duplicate Farm Properties when searching
area = float(area + srow[0])
befno = float(befno + srow[1])
deeno = float(deeno + srow[2])
daino = float(daino + srow[3])
shpno = float(shpno + srow[4])
pigno = float(pigno + srow[5])
goatno = float(goatno + srow[6])
horsno = float(horsno + srow[7])
emuno = float(emuno + srow[8])
ostno = float(ostno + srow[9])
camno = float(camno + srow[10])
pouno = float(pouno + srow[11])
previd2 = str(srow[12])
cursor.updateRow([row[0],float(befno/area),float(deeno/area),float(daino/area),float(shpno/area),float(pigno/area),float(goatno/area),float(horsno/area),float(emuno/area),float(ostno/area),float(camno/area),float(pouno/area)])
else:
cursor.updateRow([row[0],0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
previd = str(row[0])
arcpy.Intersect_analysis ([["Non_Reticulated_Area",1],["Landuse_Dis",2]], r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse")
arcpy.AddField_management("Extent","COW_NO","Long")
arcpy.AddField_management("Extent","DEER_NO","Long")
arcpy.AddField_management("Extent","DAIRY_NO","Long")
arcpy.AddField_management("Extent","SHEEP_NO","Long")
arcpy.AddField_management("Extent","PIGS_NO","Long")
arcpy.AddField_management("Extent","GOAT_NO","Long")
arcpy.AddField_management("Extent","HORS_NO","Long")
arcpy.AddField_management("Extent","EMU_NO","Long")
arcpy.AddField_management("Extent","OSTR_NO","Long")
arcpy.AddField_management("Extent","CAM_NO","Long")
arcpy.AddField_management("Extent","POU_NO","Long")
arcpy.AddField_management("Extent","WATER_USE_M3","Float")
cursor = arcpy.da.UpdateCursor("Extent",["COW_NO","DEER_NO","DAIRY_NO","SHEEP_NO","PIGS_NO","GOAT_NO","HORS_NO","EMU_NO","OSTR_NO","CAM_NO","POU_NO"])
arcpy.MakeFeatureLayer_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb\Landuse","Blocks")
for row in cursor:
cow = 0
deer = 0
dairy = 0
sheep = 0
pig = 0
goat = 0
horse = 0
emu = 0
ostrich = 0
camelid = 0
poultry = 0
previd = None
for srow in arcpy.da.SearchCursor("Blocks",["SHAPE@AREA","BEF_SQM","DEE_SQM","DAI_SQM","SHP_SQM","PIG_SQM","GOAT_SQM","HORS_SQM","EMU_SQM","OSTR_SQM","CAM_SQM","POU_SQM","FARM_ID"]):
if str(srow[12]) <> previd: #Reason: Duplicity occurs when farm is broken up into more than one parcel of land.
cow = cow + float(srow[0]) * float(srow[1])
deer = deer + float(srow[0]) * float(srow[2])
dairy = dairy + float(srow[0]) * float(srow[3])
sheep = sheep + float(srow[0]) * float(srow[4])
pig = pig + float(srow[0]) * float(srow[5])
goat = goat + float(srow[0]) * float(srow[6])
horse = horse + float(srow[0]) * float(srow[7])
emu = emu + float(srow[0]) * float(srow[8])
ostrich = ostrich + float(srow[0]) * float(srow[9])
camelid = camelid + float(srow[0]) * float(srow[10])
poultry = poultry + float(srow[0]) * float(srow[11])
previd = str(srow[12])
cursor.updateRow([cow,deer,dairy,sheep,pig,goat,horse,emu,ostrich,camelid,poultry])
print "Calculating Water Use per Catchment...."
arcpy.AddMessage("Calculating Water Use per Catchment....")
cursor = arcpy.da.UpdateCursor("Extent",["COW_NO","DAIRY_NO","DEER_NO","SHEEP_NO","PIGS_NO","GOAT_NO","HORS_NO","EMU_NO","OSTR_NO","CAM_NO","POU_NO",
"HUMAN_NO","DENSITY","PROPERTY_NO","WATER_USE_M3"])
for row in cursor:
use = (float((row[13] * row[12]) * DOMWU) + float(row[1] * DCWU) + float(row[0] * BFWU) + float(row[2] * DRWU) + float(row[3] * SHWU) +
float(row[4] * PGWU) + float(row[5] * GTWU) + float(row[6] * HRWU) + float(row[7] * EUWU) + float(row[8] * OSWU) + float(row[9] * CMWU) + float(row[10] * POWU) + leakage)
cursor.updateRow([row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],long(float(row[12]) * float(row[13])),row[12],row[13],float(use * 0.001)])
print "Exporting Results..."
arcpy.AddMessage("Exporting Results...")
arcpy.Copy_management(r"\\fs02\ManagedShares2\Data\Surface Water\shared\permitted_takes\Output\Hydrology\Permitted Takes\Scratch.gdb",output + "\\Permitted Takes Output " + str(strftime("%Y%m%d%H%M",localtime())) + ".gdb")
entry = open(output + "\\Permitted Takes Output " + str(strftime("%Y%m%d%H%M",localtime())) + ".csv","w")
entry.write("Estimated Population not on a Reticulated System or Stock Water Network\n")
entry.write("\n")
entry.write("User,Est. Population,Factor (Ltrs/head/day),Est. Total Water Use (m3/day)\n")
vals = [[row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14]] for row in
arcpy.da.SearchCursor("Extent",["COW_NO","DAIRY_NO","DEER_NO","SHEEP_NO","PIGS_NO","GOAT_NO","HORS_NO","EMU_NO","OSTR_NO","CAM_NO","POU_NO","HUMAN_NO",
"DENSITY","PROPERTY_NO","WATER_USE_M3"])]
if (len(vals)) == 0:
val = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
else:
val = vals[0]
entry.write("Humans*,"+ str(val[11]) + "," + str(DOMWU) + "," + str(float(((val[13] * val[12]) * DOMWU)* 0.001)) + "\n")
entry.write("Dairy Cattle," + str(val[1]) + "," + str(DCWU) + "," + str(float((val[1] * DCWU) * 0.001)) + "\n")
entry.write("Beef Cattle," + str(val[0]) + "," + str(BFWU) + "," + str(float((val[0] * BFWU) * 0.001)) + "\n")
entry.write("Sheep," + str(val[3]) + "," + str(SHWU) + "," + str(float((val[3] * SHWU) * 0.001)) + "\n")
entry.write("Deer," + str(val[2]) + "," + str(DRWU) + "," + str(float((val[2] * DRWU) * 0.001)) + "\n")
entry.write("Pigs," + str(val[4]) + "," + str(PGWU) + "," + str(float((val[4] * PGWU) * 0.001)) + "\n")
entry.write("Goats," + str(val[5]) + "," + str(GTWU) + "," + str(float((val[5] * GTWU) * 0.001)) + "\n")
entry.write("Horses," + str(val[6]) + "," + str(HRWU) + "," + str(float((val[6] * HRWU) * 0.001)) + "\n")
entry.write("Ostriches and Emus," + str(long(val[7] + val[8])) + "," + str(OSWU) + "," + str(float((val[7] * EUWU) * 0.001) + float((val[8] * OSWU) * 0.001)) + "\n")
entry.write("Alpacas and Llamas," + str(val[9]) + "," + str(CMWU) + "," + str(float((val[9] * CMWU) * 0.001)) + "\n")
entry.write("Poultry," + str(val[10]) + "," + str(POWU) + "," + str(float((val[10] * POWU) * 0.001)) + "\n")
entry.write("\n")
entry.write("Leakage,,," + str(leakage) + "\n")
entry.write("\n")
entry.write("TOTAL,,," + str(val[14]) + "\n")
entry.write("\n")
entry.write("* Based on average | |
Parsnips, cooked, boiled, drained, without salt
11300: ["Pea"], # Peas, edible-podded, raw
11301: [], # Peas, edible-podded, boiled, drained, without salt
11302: [], # Peas, edible-podded, frozen, unprepared
11303: [], # Peas, edible-podded, frozen, cooked, boiled, drained, without salt
11304: ["Pea", "green"], # Peas, green, raw
11305: [], # Peas, green, cooked, boiled, drained, without salt
11306: [], # Peas, green, canned, regular pack, solids and liquids
11308: [], # Peas, green (includes baby and lesuer types), canned, drained solids, unprepared
11310: [], # Peas, green, canned, seasoned, solids and liquids
11311: [], # Peas, green, canned, drained solids, rinsed in tap water
11312: [], # Peas, green, frozen, unprepared (Includes foods for USDA's Food Distribution Program)
11313: [], # Peas, green, frozen, cooked, boiled, drained, without salt
11316: [], # Peas, mature seeds, sprouted, raw
11317: [], # Peas, mature seeds, sprouted, cooked, boiled, drained, without salt
11318: [], # Peas and carrots, canned, regular pack, solids and liquids
11322: [], # Peas and carrots, frozen, unprepared
11323: [], # Peas and carrots, frozen, cooked, boiled, drained, without salt
11324: [], # Peas and onions, canned, solids and liquids
11326: [], # Peas and onions, frozen, unprepared
11327: [], # Peas and onions, frozen, cooked, boiled, drained, without salt
11329: [], # Peppers, hot chili, green, canned, pods, excluding seeds, solids and liquids
11333: ["Green pepper", "sweet"], # Peppers, sweet, green, raw
11334: [], # Peppers, sweet, green, cooked, boiled, drained, without salt
11335: [], # Peppers, sweet, green, canned, solids and liquids
11337: [], # Peppers, sweet, green, frozen, chopped, unprepared
11338: [], # Peppers, sweet, green, frozen, chopped, boiled, drained, without salt
11339: [], # Peppers, sweet, green, sauteed
11344: ["Pigeon pea"], # Pigeonpeas, immature seeds, raw
11345: [], # Pigeonpeas, immature seeds, cooked, boiled, drained, without salt
11349: ["Poi"], # Poi
11350: ["Pokeberry shoot", "", "Poke"], # Pokeberry shoots, (poke), raw
11351: [], # Pokeberry shoots, (poke), cooked, boiled, drained, without salt
11352: ["Potato"], # Potatoes, flesh and skin, raw
11353: [
"Potato",
"russet",
], # Potatoes, russet, flesh and skin, raw (Includes foods for USDA's Food Distribution Program)
11354: ["Potato", "white"], # Potatoes, white, flesh and skin, raw
11355: ["Potato", "red"], # Potatoes, red, flesh and skin, raw
11356: [], # Potatoes, Russet, flesh and skin, baked
11357: [], # Potatoes, white, flesh and skin, baked
11358: [], # Potatoes, red, flesh and skin, baked
11359: [], # Potatoes, french fried, crinkle or regular cut, salt added in processing, frozen, as purchased
11360: [], # Potatoes, french fried, crinkle or regular cut, salt added in processing, frozen, oven-heated
11361: [], # Potatoes, roasted, salt added in processing, frozen, unprepared
11362: [], # Potatoes, raw, skin
11363: [], # Potatoes, baked, flesh, without salt
11364: [], # Potatoes, baked, skin, without salt
11365: [], # Potatoes, boiled, cooked in skin, flesh, without salt
11366: [], # Potatoes, boiled, cooked in skin, skin, without salt
11367: [], # Potatoes, boiled, cooked without skin, flesh, without salt
11368: [], # Potatoes, microwaved, cooked in skin, flesh, without salt
11369: [], # Potatoes, microwaved, cooked in skin, skin, without salt
11370: [], # Potatoes, hash brown, home-prepared
11371: [], # Potatoes, mashed, home-prepared, whole milk and margarine added
11372: [], # Potatoes, scalloped, home-prepared with butter
11373: [], # Potatoes, au gratin, home-prepared from recipe using butter
11374: [], # Potatoes, canned, solids and liquids
11376: [], # Potatoes, canned, drained solids
11378: [], # Potatoes, mashed, dehydrated, flakes without milk, dry form
11379: [], # Potatoes, mashed, dehydrated, prepared from flakes without milk, whole milk and butter added
11380: [], # Potatoes, mashed, dehydrated, granules without milk, dry form
11381: [], # Potatoes, mashed, dehydrated, prepared from granules without milk, whole milk and butter added
11382: [], # Potatoes, mashed, dehydrated, granules with milk, dry form
11383: [], # Potatoes, mashed, dehydrated, prepared from granules with milk, water and margarine added
11384: [], # Potatoes, au gratin, dry mix, unprepared
11385: [], # Potatoes, au gratin, dry mix, prepared with water, whole milk and butter
11386: [], # Potatoes, scalloped, dry mix, unprepared
11387: [], # Potatoes, scalloped, dry mix, prepared with water, whole milk and butter
11390: [], # Potatoes, hash brown, frozen, plain, unprepared
11391: [], # Potatoes, hash brown, frozen, plain, prepared, pan fried in canola oil
11392: [], # Potatoes, hash brown, frozen, with butter sauce, unprepared
11393: [], # Potatoes, hash brown, frozen, with butter sauce, prepared
11394: [], # Potatoes, french fried, shoestring, salt added in processing, frozen, as purchased
11395: [], # Potatoes, french fried, shoestring, salt added in processing, frozen, oven-heated
11396: [], # Potatoes, o'brien, frozen, unprepared
11397: [], # Potatoes, o'brien, frozen, prepared
11398: [], # Potato puffs, frozen, unprepared
11399: [], # Potato puffs, frozen, oven-heated
11401: [], # Potatoes, frozen, whole, cooked, boiled, drained, without salt
11402: [], # Potatoes, french fried, all types, salt added in processing, frozen, unprepared
11403: [], # Potatoes, french fried, all types, salt added in processing, frozen, home-prepared, oven heated
11406: [], # Potatoes, french fried, cottage-cut, salt not added in processing, frozen, as purchased
11407: [], # Potatoes, french fried, cottage-cut, salt not added in processing, frozen, oven-heated
11408: [], # Potatoes, frozen, french fried, par fried, extruded, unprepared
11409: [], # Potatoes, frozen, french fried, par fried, extruded, prepared, heated in oven, without salt
11410: [], # Potato wedges, frozen (Includes foods for USDA's Food Distribution Program)
11411: [], # Potatoes, french fried, steak fries, salt added in processing, frozen, as purchased
11412: [], # Potatoes, french fried, steak fries, salt added in processing, frozen, oven-heated
11413: [], # Potato flour
11414: [], # Potato salad, home-prepared
11416: ["Pumpkin flower"], # Pumpkin flowers, raw
11417: [], # Pumpkin flowers, cooked, boiled, drained, without salt
11418: ["Pumpkin leaf"], # Pumpkin leaves, raw
11419: [], # Pumpkin leaves, cooked, boiled, drained, without salt
11422: ["Pumpkin"], # Pumpkin, raw
11423: [], # Pumpkin, cooked, boiled, drained, without salt
11424: [], # Pumpkin, canned, without salt
11426: [], # Pumpkin pie mix, canned
11427: ["Purslane"], # Purslane, raw
11428: [], # Purslane, cooked, boiled, drained, without salt
11429: ["Radish"], # Radishes, raw
11430: ["Radish", "oriental"], # Radishes, oriental, raw
11431: [], # Radishes, oriental, cooked, boiled, drained, without salt
11432: [], # Radishes, oriental, dried
11435: ["Rutabaga"], # Rutabagas, raw
11436: [], # Rutabagas, cooked, boiled, drained, without salt
11437: ["Salsify", "", "Vegetable oyster"], # Salsify, (vegetable oyster), raw
11438: [], # Salsify, cooked, boiled, drained, without salt
11439: [], # Sauerkraut, canned, solids and liquids
11442: ["Seaweed", "agar"], # Seaweed, agar, raw
11444: ["Seaweed", "irishmoss"], # Seaweed, irishmoss, raw
11445: ["Seaweed", "kelp"], # Seaweed, kelp, raw
11446: ["Seaweed", "laver"], # Seaweed, laver, raw
11447: ["Sesbania flower"], # Sesbania flower, raw
11448: [], # Sesbania flower, cooked, steamed, without salt
11450: ["Soybean", "green"], # Soybeans, green, raw
11451: [], # Soybeans, green, cooked, boiled, drained, without salt
11452: [], # Soybeans, mature seeds, sprouted, raw
11453: [], # Soybeans, mature seeds, sprouted, cooked, steamed
11454: [], # Soybeans, mature seeds, sprouted, cooked, stir-fried
11457: ["Spinach"], # Spinach, raw
11458: [], # Spinach, cooked, boiled, drained, without salt
11459: [], # Spinach, canned, regular pack, solids and liquids
11461: [], # Spinach, canned, regular pack, drained solids
11463: [
"Spinach",
"frozen",
], # Spinach, frozen, chopped or leaf, unprepared (Includes foods for USDA's Food Distribution Program)
11464: [], # Spinach, frozen, chopped or leaf, cooked, boiled, drained, without salt
11467: [
"Squash",
"summer crookneck straightneck",
], # Squash, summer, crookneck and straightneck, raw
11468: [], # Squash, summer, crookneck and straightneck, cooked, boiled, drained, without salt
11471: [], # Squash, summer, crookneck and | |
<reponame>raychorn/svn_Python-2.5.1
"""numarray: The big enchilada numeric module
"""
import sys as _sys
import types, math, os.path
import operator as _operator
import copy as _copy
import warnings as _warnings
from math import pi, e
import memory
import generic as _gen
import _bytes
import _numarray
import _ufunc
import _sort
import numerictypes as _nt
import numinclude as _numinc
import dtype as _dtype
_PROTOTYPE = 0 # Set to 1 to switch to Python prototype code.
# Set to 0 to inherit C code from C basetype.
# rename built-in function type so not to conflict with keyword
_type = type
MAX_LINE_WIDTH = None
PRECISION = None
SUPPRESS_SMALL = None
PyINT_TYPES = {
bool: 1,
int: 1,
long: 1,
}
PyREAL_TYPES = {
bool: 1,
int: 1,
long: 1,
float: 1,
}
# Python numeric types with values indicating level in type hierarchy
PyNUMERIC_TYPES = {
bool: 0,
int: 1,
long: 2,
float: 3,
complex: 4
}
# Mapping back from level to type
PyLevel2Type = {}
for key, value in PyNUMERIC_TYPES.items():
PyLevel2Type[value] = key
del key, value
# Mapping from Python to Numeric types
Py2NumType = {
bool: _nt.Bool,
int: _nt.Long,
long: _nt.Int64,
float: _nt.Float,
complex: _nt.Complex
}
_numfmt_to_typestr = {_nt.Int8:'i1', _nt.UInt8:'u1',
_nt.Int16:'i2', _nt.UInt16:'u2',
_nt.Int32:'i4', _nt.UInt32:'u4',
_nt.Int64:'i8', _nt.UInt64:'u8',
_nt.Float32:'f4', _nt.Float64:'f8',
_nt.Complex32:'c8', _nt.Complex64:'c16',
_nt.Bool:'b1'}
class EarlyEOFError(Exception):
"Raised in fromfile() if EOF unexpectedly occurs."
pass
class SizeMismatchError(Exception):
"Raised in fromfile() if file size does not match shape."
pass
class SizeMismatchWarning(Warning):
"Issued in fromfile() if file size does not match shape."
pass
class FileSeekWarning(Warning):
"Issued in fromfile() if there is unused data and seek() fails"
pass
##fromfile constants
_BLOCKSIZE=1024
STRICT,SLOPPY,WARN=range(3)
def array2list(arr):
return arr.tolist()
# array factory functions
def _all_arrays(args):
for x in args:
if not isinstance(x, NumArray):
return 0
return len(args) > 0
def _maxtype(args):
"""Find the maximum scalar numeric type in the arguments.
An exception is raised if the types are not python numeric types.
"""
if not len(args):
return None
elif isinstance(args, NumArray):
return args.type()
elif _all_arrays(args):
temp = args[0].type()
for x in args[1:]:
if temp < x.type():
temp = x.type()
if isinstance(temp, _nt.BooleanType):
return bool
elif isinstance(temp, _nt.IntegralType):
return int
elif isinstance(temp, _nt.FloatingType):
return float
elif isinstance(temp, _nt.ComplexType):
return complex
else:
return PyLevel2Type[_numarray._maxtype(args)]
def _storePyValueInBuffer(buffer, Ctype, index, value):
"""Store a python value in a buffer, index is in element units, not bytes"""
# Do not use for complex scalars!
Ctype._conv.fromPyValue(value, buffer._data,
index*Ctype.bytes, Ctype.bytes, 0)
def _storePyValueListInBuffer(buffer, Ctype, valuelist):
# Do not use for complex values!
for i in xrange(len(valuelist)):
_storePyValueInBuffer(buffer, Ctype, i, valuelist[i])
def _fillarray(size, start, delta, type=None):
ptype = _maxtype((start, delta))
if ptype == long:
ptype = _nt.Int64
elif PyINT_TYPES.has_key(ptype):
ptype = _nt.Long
elif PyREAL_TYPES.has_key(ptype):
ptype = _nt.Float
else:
ptype = _nt.Complex
if type:
outtype = _nt.getType(type)
if (isinstance(ptype, _nt.ComplexType)
and not isinstance( outtype, _nt.ComplexType)):
raise TypeError("outtype must be a complex type")
else:
outtype = ptype
if outtype > ptype: # Hack for Int64/UInt64 on 32-bit platforms.
ptype = outtype
if isinstance(outtype, _nt.ComplexType):
# Not memory efficient at the moment
real = _fillarray(size, complex(start).real, complex(delta).real,
type = _realtype(ptype))
image = _fillarray(size, complex(start).imag, complex(delta).imag,
type = _realtype(ptype))
outarr = NumArray((size,), outtype, real=real, imag=image)
else:
# save parameters in a buffer
parbuffer = ufunc._bufferPool.getBuffer()
_storePyValueListInBuffer(parbuffer, ptype, [start, delta])
cfunction = _sort.functionDict[repr((ptype.name, 'fillarray'))]
outarr = NumArray((size,), outtype)
if ptype == outtype:
# no conversion necessary, simple case
_ufunc.CheckFPErrors()
cfunction(size, 1, 1, ((outarr._data, 0), (parbuffer._data, 0)))
errorstatus = _ufunc.CheckFPErrors()
if errorstatus:
ufunc.handleError(errorstatus, " in fillarray")
else:
# use buffer loop
convbuffer = ufunc._bufferPool.getBuffer()
convfunction = ptype._conv.astype[outtype.name]
bsize = len(convbuffer._data)/ptype.bytes
iters, lastbsize = divmod(size, bsize)
_ufunc.CheckFPErrors()
outoff = 0
for i in xrange(iters + (lastbsize>0)):
if i == iters:
bsize = lastbsize
cfunction(bsize, 1, 1,
((convbuffer._data, 0), (parbuffer._data, 0)))
convfunction(bsize, 1, 1,
((convbuffer._data, 0), (outarr._data, outoff)))
outoff += bsize*outtype.bytes
start += delta * bsize
_storePyValueListInBuffer(parbuffer, ptype, [start, delta])
errorstatus = _ufunc.CheckFPErrors()
if errorstatus:
ufunc.handleError(errorstatus, " in fillarray")
return outarr
def _frontseqshape(seq):
"""Find the length of all the first elements, return as a list"""
if not len(seq):
return (0,)
if isinstance(seq, str):
return (len(seq),)
try:
shape = []
while 1:
shape.append(len(seq))
try:
seq = seq[0]
if isinstance(seq, str):
return shape
except IndexError:
return shape
except TypeError:
return shape
except ValueError:
if isinstance(seq, NumArray) and seq.rank == 0:
return shape
def fromlist(seq, type=None, shape=None, check_overflow=0, typecode=None, dtype=None):
"""fromlist creates a NumArray from the sequence 'seq' which must be
a list or tuple of python numeric types. If type is specified, it
is as the type of the resulting NumArray. If shape is specified,
it becomes the shape of the result and must have the same number
of elements as seq.
"""
type = _nt._typeFromKeywords(type, typecode, dtype)
if isinstance(seq, _gen.NDArray):
arr = seq.copy()
if arr._type is not type:
arr = arr.astype(type)
if shape is not None and arr._shape is not shape:
arr.shape = shape
return arr
if not len(seq) and type is None:
type = _nt.Long
if type is None:
highest_type = _maxtype(seq)
tshape = _frontseqshape(seq)
if shape is not None and _gen.product(shape) != _gen.product(tshape):
raise ValueError("shape incompatible with sequence")
ndim = len(tshape)
if ndim <= 0:
raise TypeError("Argument must be a sequence")
if type is None:
type = Py2NumType.get(highest_type)
if type is None:
raise TypeError("Cannot create array of type %s" % highest_type.__name__)
tshape = tuple(tshape)
arr = NumArray(shape=tshape, type=type)
arr._check_overflow = check_overflow
arr.fromlist(seq)
arr._check_overflow = 0
if shape is not None:
arr.setshape(shape)
return arr
def getTypeObject(sequence, type):
"""getTypeObject computes the typeObject for 'sequence' if 'type' is
'unspecified. Otherwise, it returns the typeObject specified by
'type'.
"""
if type is not None:
return type
elif isinstance(sequence, NumArray): # handle array([])
return sequence.type()
elif hasattr(sequence, "typecode"): # for Numeric/MA
return sequence.typecode()
elif (isinstance(sequence, (types.ListType, types.TupleType)) and
len(sequence) == 0):
return _nt.Long
else:
if isinstance(sequence, (types.IntType, types.LongType,
types.FloatType, types.ComplexType)):
sequence = [sequence]
try:
return Py2NumType[ _maxtype(sequence) ]
except KeyError:
raise TypeError("Can't determine a reasonable type from sequence")
def array(sequence=None, typecode=None, copy=True, savespace=False,
type=None, shape=None, dtype=None):
"""Attempt to convert SEQUENCE to an array of specified TYPE and SHAPE.
SEQUENCE:
Data sequence. Can be None, an object with an __array__ method,
a NumArray, a buffer, a string, a (non-string) sequence, a file
or a number. Unicode objects are currently not handled.
If sequence is None and shape is None the result is None.
TYPECODE, TYPE:
Specify only one of TYPE or TYPECODE; they are equivalent. TYPE
may be a string, in which it case it must be a Numeric or
numarray typecode, or it may be a numarray type instance. If
TYPE is None, an attempt is made to infer a type from SEQUENCE; if
this cannot be done, the type is usually set to Int32, or Int8
in the case of strings.
COPY:
If False, an attempt is made to not copy data.
SHAPE:
None, an integer or a sequence of integers specifying the shape
of the returned NumArray. If SHAPE is None, the shape is
inferred from SEQUENCE.
SAVESPACE:
Ignored; for Numeric compatibility.
See the numarray docs for a complete description of this
function's behaviour.
"""
type=_nt._typeFromKeywords(type,typecode,dtype)
if not isinstance(sequence, _gen.NDArray):
a = None
if hasattr(sequence, "__array_struct__"):
a = _numarray._array_from_array_struct(sequence)
elif (hasattr(sequence, "__array_shape__") and
hasattr(sequence, "__array_typestr__")):
typestr = sequence.__array_typestr__
if typestr[0] == "<":
endian = "little"
elif typestr[0] == ">":
endian = "big"
else:
raise ValueError("Invalid __array_typestr__:", )
offset = getattr(sequence, "__array_offset__", 0)
a = NumArray(buffer=sequence,
shape=shape or sequence.__array_shape__,
type=typestr[1:],
byteoffset=offset,
byteorder=endian)
strides = getattr(sequence, "__array_strides__", a._strides)
if strides is not None:
a._strides = strides
if a is not None: # One of the array interfaces worked
if copy or (type is not None):
a = a.astype(type)
return a
if sequence is None and shape is None:
return None
if shape is not None and not isinstance(shape,tuple):
try:
shape=tuple(shape)
except TypeError:
shape=(shape,)
if sequence is None:
if type is None:
type=_nt.Int32
if shape is None:
shape=(0,)
return NumArray(buffer=sequence, shape=shape, type=type)
if hasattr(sequence,"__array__"):
a=sequence.__array__(type)
if not isinstance(a,NumArray):
##OK if the user knows what he/she/it is doing
_warnings.warn("__array__ returned non-NumArray instance")
if shape is not None:
##however, if the user specifies a shape, the returned
##instance must have a "setshape" method
a.setshape(shape)
return a
| |
<gh_stars>1-10
#!/usr/bin/env python3
import http.server
import http.cookies
import socketserver
import base64
import subprocess
import socket
import os
import json
import sys
import time
import uuid
import tempfile
import importlib
from urllib.parse import urlparse, parse_qs
sys.path.append('lib')
from amp import db, config
import amp.players
import amp.rpc.local
PORT = 6969
mode_registry = {}
class ModeMeta(type):
def __new__(cls, name, bases, dct):
new_cls = super(ModeMeta, cls).__new__(cls, name, bases, dct)
if 'name' in dct:
mode_registry[dct['name']] = new_cls
if 'names' in dct:
for name in dct['names']:
mode_registry[name] = new_cls
return new_cls
class Mode(metaclass=ModeMeta):
def __init__(self, server, session):
self.owner = server
self.session = session
def get(self, args):
return (400, {"api_error": "Internal API error: You have requested a mode which was initialized by not implemented by the server."})
class ModeStatus(Mode):
name = "status"
def get(self, args):
output = {}
output["selected_player"] = self.session._player
output["players"] = self.owner.players
output["player"] = self.session.player()
output["now_playing"] = self.session.currentSong()
output["playlist"] = self.owner.db.PlayerQueue(self.session._player)
output["who"] = self.session.user()
output["can_skip"] = self.session.can_skip()
output["is_admin"] = self.session.is_admin()
return (200, output)
class ModeGlobalStatus(Mode):
name = "global_status"
def get(self, args):
output = {}
output["who"] = self.session.user()
output["player_names"] = self.owner.players
output["players"] = {}
for i in output['player_names']:
x = self.owner.db.SELECT("players", {"player_id": i})
if x:
pl = {}
pl["info"] = x[0]
pl["song"] = self.owner.db.SELECT(
"songs", {"song_id": x[0]["song_id"]})[0]
output["players"][i] = pl
return (200, output)
class ModeSearch(Mode):
name = "paged_search"
def get(self, args):
if "value" not in args:
return (400, {"api_error": "Search requests require a 'value' argument."})
limit = 10
offset = 0
if "limit" in args:
limit = args["limit"]
if "offset" in args:
offset = args["offset"]
results = self.owner.db.Search(args["value"], limit, offset)
return (200, results)
class ModeQuickSearch(Mode):
name = "quick_search"
def get(self, args):
if "q" not in args:
return (400, {"api_error": "Quick search results a 'q' argument."})
limit = 10
if "limit" in args:
limit = args["limit"]
results = self.owner.db.QuickSearch(args["q"], limit)
return (200, results)
class ModeRandom(Mode):
name = "random"
def get(self, args):
limit = 10
if "amount" in args:
limit = args["amount"]
results = self.owner.db.Random(limit)
return (200, results)
class ModeRecent(Mode):
name = "recent"
def get(self, args):
limit = 10
if "amount" in args:
limit = args["amount"]
results = self.owner.db.Recent(limit)
return (200, results)
class ModeHistory(Mode):
name = "history"
def get(self, args):
limit = 10
voter = None
if "amount" in args:
limit = args["amount"]
if "voter" in args:
voter = args["voter"]
results = self.owner.db.History(voter=voter, limit=limit)
history = []
for song in results:
if len(history) and song["song_id"] == history[-1]["song_id"] and history[-1]["time"] == song["time"]:
history[-1]["who"].append(song["who"])
else:
song["who"] = [song["who"]]
history.append(song)
return (200, results)
class ModeDetails(Mode):
name = "get_details"
def get(self, args):
if "song_id" not in args:
return (400, {"api_error": "get_details requires a 'song_id'"})
obj = self.owner.db.SELECT("songs", {"song_id": args["song_id"]})[0]
obj["now"] = int(time.time())
voters = self.owner.db.SELECT(
"votes", {"song_id": obj["song_id"], "player_id": self.session._player})
obj["who"] = []
for i in voters:
obj["who"].append(i['who'])
return (200, {"song": obj})
class ModeArt(Mode):
name = "art"
def get(self, args):
if "song_id" not in args:
return (400, {"api_error": "art requires a 'song_id'"})
if "size" not in args:
size = 500
else:
size = int(args["size"])
if size > 1000:
size = 1000
obj = self.owner.db.SELECT("songs", {"song_id": args["song_id"]})[0]
possible = ["acoustics-art.png", "acoustics-art.jpg",
"cover.png", "cover.jpg", "Folder.png", "Folder.jpg"]
path = "web/www-data/images/cd_case.png"
for i in possible:
fpath = os.path.join(os.path.dirname(obj["path"]), i)
if os.path.exists(fpath):
path = fpath
break
f = tempfile.NamedTemporaryFile()
subprocess.call(
["convert", path, "-resize", "%dx%d" % (size, size), f.name])
filecontents = f.read()
f.close()
return (200, filecontents, "image/png")
class ModeReorderQueue(Mode):
name = "reorder_queue"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return ModeStatus.get(self, [])
p = 0
for i in args["song_id"].split(";"):
self.owner.db.UpdateVote(
i, self.session.user(), self.session._player, p)
p += 1
return ModeStatus.get(self, [])
class ModeSelect(Mode):
name = "select"
def get(self, args):
if "field" not in args:
return (400, {"api_error": "select requires a 'field' argument."})
if "value" not in args:
return (400, {"api_error": "select requires a 'value' argument."})
return (200, self.owner.db.Select(args["field"], args["value"]))
class ModeChangePlayer(Mode):
name = "change_player"
def get(self, args):
if "player_id" not in args:
return (400, {"api_error": "Player change requires a player to change to."})
if args["player_id"] not in self.owner.players:
return (400, {"api_error": "Bad player id."})
self.session._player = args["player_id"]
return ModeStatus.get(self, args)
class ModeTopVoted(Mode):
name = "top_voted"
def get(self, args):
if "limit" in args:
limit = args["limit"]
else:
limit = 10
return (200, self.owner.db.TopVoted(limit))
class ModeAlbumSearch(Mode):
name = "album_search"
def get(self, args):
if "album" not in args:
return (400, {"api_error": "album_search requires an 'album' argument."})
return (200, self.owner.db.AlbumSearch(args["album"]))
class ModeVote(Mode):
name = "vote"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return (400, {"api_error": "vote requires a 'song_id' argument."})
priorityNumber = self.owner.db.NextVote(
self.session.user(), self.session._player)
# XXX: We don't check max-votes
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.AddVote(
self.session.user(), self.session._player, i, priorityNumber)
else:
self.owner.db.AddVote(
self.session.user(), self.session._player, args['song_id'], priorityNumber)
return ModeStatus.get(self, args)
class ModeUnvote(Mode):
name = "unvote"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return (400, {"api_error": "vote requires a 'song_id' argument."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.Unvote(self.session.user(), i)
else:
self.owner.db.Unvote(self.session.user(), args['song_id'])
return ModeStatus.get(self, args)
class ModePlaylists(Mode):
name = "playlists"
def get(self, args):
who = ""
title = ""
if "who" in args:
who = args["who"]
if "title" in args:
title = args["title"]
return (200, self.owner.db.Playlists(who, title))
class ModePlaylistsLoose(Mode):
name = "playlists_loose"
def get(self, args):
value = ""
if "value" in args:
value = args["value"]
return (200, self.owner.db.PlaylistsLoose(value))
class ModePlaylistContents(Mode):
name = "playlist_contents"
def get(self, args):
if not "playlist_id" in args:
return (400, {"api_error": "playlist_contents requires a 'playlist_id' argument."})
return (200, self.owner.db.PlaylistContents(args["playlist_id"]))
class ModePlaylistInfo(Mode):
name = "playlist_info"
def get(self, args):
if not "playlist_id" in args:
return (400, {"api_error": "playlist_info requires a 'playlist_id' argument."})
return (200, self.owner.db.PlaylistInfo(args["playlist_id"]))
class ModeAddToPlaylist(Mode):
name = "add_to_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if "song_id" not in args:
return (400, {"api_error": "add_to_playlist requires a 'song_id' argument."})
if not "playlist_id" in args:
return (400, {"api_error": "add_to_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.AddToPlaylist(args["playlist_id"], i)
else:
self.owner.db.AddToPlaylist(args["playlist_id"], args['song_id'])
return ModePlaylistContents.get(self, args)
class ModeRemoveFromPlaylist(Mode):
name = "remove_from_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if "song_id" not in args:
return (400, {"api_error": "remove_from_playlist requires a 'song_id' argument."})
if not "playlist_id" in args:
return (400, {"api_error": "remove_from_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.RemoveFromPlaylist(args["playlist_id"], i)
else:
self.owner.db.RemoveFromPlaylist(
args["playlist_id"], args['song_id'])
return ModePlaylistContents.get(self, args)
class ModeCreatePlaylist(Mode):
name = "create_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if not "title" in args:
return (400, {"api_error": "create_playlist requires a 'title' argument."})
self.owner.db.CreatePlaylist(self.session.user(), args["title"])
args["who"] = self.session.user()
return ModePlaylists.get(self, args)
class ModeDeletePlaylist(Mode):
name = "delete_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if not "playlist_id" in args:
return (400, {"api_error": "delete_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
self.owner.db.DeletePlaylist(args["playlist_id"])
args["who"] = self.session.user()
return ModePlaylists.get(self, args)
class ModePurge(Mode):
name = "purge"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "This action can only be executed by logged-in uesrs."})
if not "who" in args or not self.session.is_admin():
args["who"] = self.session.user()
self.owner.db.Purge(args["who"], self.session._player)
return ModeStatus.get(self, args)
class ModeStats(Mode):
name = "stats"
def get(self, args):
who = None
if "who" in args:
who = args["who"]
output = {}
output["total_songs"] = self.owner.db.SongCount()
output["top_artists"] = self.owner.db.TopArtists(who)
return (200, output)
class ModeControls(Mode):
names = ["start", "stop", "skip", "pause", "volume", "zap"]
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to control the player."})
_args = [args["mode"]]
if "value" in args:
_args.append(args["value"])
if args["mode"] == "skip" and not self.session.can_skip():
return (500, {"auth_error": "You can not skip this song."})
self.owner.rpc(self.session._player, _args)
time.sleep(1) # Give the players a | |
}
# Execute
response = self.adminclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.stage, 'prebirth')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
self.assertEqual(d.created_by, self.adminuser)
def test_create_registration_normaluser(self):
# Setup
self.make_source_normaluser()
post_data = {
"stage": "postbirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.normalclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_voice_source_normaluser')
self.assertEqual(d.stage, 'postbirth')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_create_registration_set_readonly_field(self):
# Setup
self.make_source_adminuser()
post_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": {"test_key1": "test_value1"},
"validated": True
}
# Execute
response = self.adminclient.post('/api/v1/registration/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Registration.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.stage, 'prebirth')
self.assertEqual(d.validated, False) # Should ignore True post_data
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_update_registration_adminuser(self):
# Setup
registration = self.make_registration_normaluser()
post_data = {
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.patch(
'/api/v1/registration/%s/' % registration.id,
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
registration.refresh_from_db()
self.assertEqual(registration.data, {"test_key1": "test_value1"})
self.assertEqual(registration.updated_by, self.adminuser)
def test_update_registration_normaluser(self):
# Setup
registration = self.make_registration_normaluser()
post_data = {
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.normalclient.patch(
'/api/v1/registration/%s/' % registration.id,
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
registration.refresh_from_db()
self.assertEqual(registration.data, {"test_key1": "test_value1"})
self.assertEqual(registration.updated_by, self.normaluser)
def test_update_registration_readonly_field(self):
# Setup
registration = self.make_registration_normaluser()
post_data = {
"data": {"test_key1": "test_value1"},
"validated": True
}
# Execute
response = self.adminclient.patch(
'/api/v1/registration/%s/' % registration.id,
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
registration.refresh_from_db()
self.assertEqual(registration.data, {"test_key1": "test_value1"})
self.assertEqual(registration.updated_by, self.adminuser)
self.assertEqual(registration.validated, False)
def test_list_registrations(self):
# Setup
registration1 = self.make_registration_normaluser()
registration2 = self.make_registration_adminuser()
registration3 = self.make_registration_normaluser()
# Execute
response = self.normalclient.get(
'/api/v1/registrations/', content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
body = response.json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], str(registration3.id))
self.assertEqual(body["results"][1]["id"], str(registration2.id))
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
body = self.normalclient.get(body["next"]).json()
self.assertEqual(len(body["results"]), 1)
self.assertEqual(body["results"][0]["id"], str(registration1.id))
self.assertIsNotNone(body["previous"])
self.assertIsNone(body["next"])
body = self.normalclient.get(body["previous"]).json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], str(registration3.id))
self.assertEqual(body["results"][1]["id"], str(registration2.id))
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
def make_different_registrations(self):
self.make_source_adminuser()
registration1_data = {
"stage": "prebirth",
"mother_id": "mother01-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_mother"].copy(),
"source": self.make_source_adminuser(),
"validated": True
}
registration1 = Registration.objects.create(**registration1_data)
registration2_data = {
"stage": "postbirth",
"mother_id": "mother02-63e2-4acc-9b94-26663b9bc267",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_normaluser(),
"validated": False
}
registration2 = Registration.objects.create(**registration2_data)
return (registration1, registration2)
def test_filter_registration_mother_id(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?mother_id=%s' % registration1.mother_id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_stage(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?stage=%s' % registration2.stage,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_validated(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?validated=%s' % registration1.validated,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_source(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?source=%s' % registration2.source.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_created_after(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# While the '+00:00' is valid according to ISO 8601, the version of
# django-filter we are using does not support it
date_string = registration2.created_at.isoformat().replace(
"+00:00", "Z")
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?created_after=%s' % date_string,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration2.id))
def test_filter_registration_created_before(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# While the '+00:00' is valid according to ISO 8601, the version of
# django-filter we are using does not support it
date_string = registration1.created_at.isoformat().replace(
"+00:00", "Z")
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?created_before=%s' % date_string,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(registration1.id))
def test_filter_registration_no_matches(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?mother_id=test_id',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 0)
def test_filter_registration_unknown_filter(self):
# Setup
registration1, registration2 = self.make_different_registrations()
# Execute
response = self.adminclient.get(
'/api/v1/registrations/?something=test_id',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 2)
class TestFieldValidation(AuthenticatedAPITestCase):
def test_is_valid_date(self):
# Setup
good_date = "19820315"
invalid_date = "19830229"
bad_date = "1234"
# Execute
# Check
self.assertEqual(is_valid_date(good_date), True)
self.assertEqual(is_valid_date(invalid_date), False)
self.assertEqual(is_valid_date(bad_date), False)
def test_is_valid_uuid(self):
# Setup
valid_uuid = str(uuid.uuid4())
invalid_uuid = "f9bfa2d7-5b62-4011-8eac-76bca34781a"
# Execute
# Check
self.assertEqual(is_valid_uuid(valid_uuid), True)
self.assertEqual(is_valid_uuid(invalid_uuid), False)
def test_is_valid_lang(self):
# Setup
valid_lang = "pcm_NG"
invalid_lang = "pidgin"
# Execute
# Check
self.assertEqual(is_valid_lang(valid_lang), True)
self.assertEqual(is_valid_lang(invalid_lang), False)
def test_is_valid_state(self):
# Setup
valid_state = "cross_river"
invalid_state = "new_jersey"
# Execute
# Check
self.assertEqual(is_valid_state(valid_state), True)
self.assertEqual(is_valid_state(invalid_state), False)
def test_is_valid_role(self):
# Setup
valid_role = "midwife"
invalid_role = "nurse"
# Execute
# Check
self.assertEqual(is_valid_role(valid_role), True)
self.assertEqual(is_valid_role(invalid_role), False)
def test_is_valid_msg_type(self):
# Setup
valid_msg_type1 = "text"
valid_msg_type2 = "audio"
invalid_msg_type = "email"
# Execute
# Check
self.assertEqual(is_valid_msg_type(valid_msg_type1), True)
self.assertEqual(is_valid_msg_type(valid_msg_type2), True)
self.assertEqual(is_valid_msg_type(invalid_msg_type), False)
def test_is_valid_msg_receiver(self):
# Setup
valid_msg_receiver = "father_only"
invalid_msg_receiver = "mama"
# Execute
# Check
self.assertEqual(is_valid_msg_receiver(valid_msg_receiver), True)
self.assertEqual(is_valid_msg_receiver(invalid_msg_receiver), False)
def test_is_valid_loss_reason(self):
# Setup
valid_loss_reason = "miscarriage"
invalid_loss_reason = "other"
# Execute
# Check
self.assertEqual(is_valid_loss_reason(valid_loss_reason), True)
self.assertEqual(is_valid_loss_reason(invalid_loss_reason), False)
def test_check_field_values(self):
# Setup
valid_hw_pre_registration_data = REG_DATA["hw_pre_friend"].copy()
invalid_hw_pre_registration_data = REG_DATA["hw_pre_friend"].copy()
invalid_hw_pre_registration_data["msg_receiver"] = "somebody"
# Execute
cfv_valid = validate_registration.check_field_values(
REG_FIELDS["hw_pre_friend"], valid_hw_pre_registration_data)
cfv_invalid = validate_registration.check_field_values(
REG_FIELDS["hw_pre_friend"], invalid_hw_pre_registration_data)
# Check
self.assertEqual(cfv_valid, [])
self.assertEqual(cfv_invalid, ['msg_receiver'])
class TestRegistrationValidation(AuthenticatedAPITestCase):
def test_validate_hw_prebirth(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_pre")
self.assertEqual(registration.data["preg_week"], 28)
self.assertEqual(registration.validated, True)
def test_validate_hw_postbirth(self):
# Setup
registration_data = {
"stage": "postbirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_post"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "hw_post")
self.assertEqual(registration.data["baby_age"], 28)
self.assertEqual(registration.validated, True)
def test_validate_pbl_loss(self):
# Setup
registration_data = {
"stage": "loss",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["pbl_loss"].copy(),
"source": self.make_source_normaluser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "pbl_loss")
self.assertEqual(registration.validated, True)
def test_validate_public(self):
# Setup
registration_data = {
"stage": "public",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["public"].copy(),
"source": self.make_source_normaluser()
}
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.data["reg_type"], "public")
self.assertEqual(registration.validated, True)
def test_validate_pregnancy_too_long(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["last_period_date"] = "20130101"
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
def test_validate_pregnancy_9_weeks(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["last_period_date"] = "20150612" # 9 weeks
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
def test_validate_pregnancy_10_weeks(self):
# Setup
registration_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["last_period_date"] = "20150605" # 10 weeks
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, True)
self.assertEqual(registration.validated, True)
def test_validate_baby_too_young(self):
# Setup
registration_data = {
"stage": "postbirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_post"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["baby_dob"] = "20150818"
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
def test_validate_baby_too_old(self):
# Setup
registration_data = {
"stage": "postbirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_post"].copy(),
"source": self.make_source_adminuser()
}
registration_data["data"]["baby_dob"] = "20130717"
registration = Registration.objects.create(**registration_data)
# Execute
v = validate_registration.validate(registration)
# Check
self.assertEqual(v, False)
self.assertEqual(registration.validated, False)
@responses.activate
def test_validate_registration_run_success(self):
# Setup
# mock mother messageset lookup
query_string = '?short_name=prebirth.mother.text.10_42'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 1,
"short_name": 'prebirth.mother.text.10_42',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock household messageset lookup
query_string = '?short_name=prebirth.household.audio.10_42.fri.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 3,
"short_name": 'prebirth.household.audio.10_42.fri.9_11',
"default_schedule": 3
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock household schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/3/',
json={"id": 3, "day_of_week": "5"},
status=200, content_type='application/json',
)
# mock mother MSISDN lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/mother00-9d89-4aa6-99ff-13c225365b5d/addresses/msisdn?default=True', # noqa
json={
"next": None, "previous": None,
"results": [{"address": "+234123"}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock friend MSISDN lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/friend00-73a2-4d89-b045-d52004c025fe/addresses/msisdn?default=True', # noqa
json={
"next": None, "previous": None,
"results": [{"address": "+234124"}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock public subscription lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/?active=True&completed=False&messageset_contains=public.mother&identity=mother00-9d89-4aa6-99ff-13c225365b5d', # noqa
json={
"next": None, "previous": None,
"results": []
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother welcome SMS send
responses.add(
responses.POST,
'http://localhost:8006/api/v1/outbound/',
json={"id": 1},
status=200, content_type='application/json',
)
# prepare registration data
registration_data = {
"stage": "prebirth",
"mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d",
"data": REG_DATA["hw_pre_friend"].copy(),
"source": self.make_source_adminuser()
}
registration = Registration.objects.create(**registration_data)
# Execute
result = validate_registration.apply_async(args=[registration.id])
# Check
self.assertEqual(result.get(), "Validation completed - Success")
def test_validate_registration_run_failure_missing_field(self):
| |
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def contour_var(self):
# ==========================
''' Calculates the long term variance of a contour field '''
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
ii = self.CDF_INDX.get()
if self.CDF[ii].PARENT is None:
toconsola('Calculating variance of current CONTOUR field')
else:
ii = self.CDF[ii].PARENT
toconsola('Calculating variance of PARENT CONTOUR field, ii=',ii)
K = self.CDF[ii].K.get()
L = self.CDF[ii].L.get()
nt = self.CDF[ii].FLD.icdf.nt
if nt <= 1:
messagebox.showinfo(message='Variance requires more than one time records')
return
for L in range(0,nt):
data = self.CDF[ii].FLD.read(K=K,L=L,wid=self.cons)
if L==0:
num1 = data.copy()
num2 = np.square(data)
else:
num1 += data
num2 += np.square(data)
#data = num2/nt - np.square(num1/nt)
data = num2/(nt-1) - np.square(num1)/(nt*(nt-1))
CDF = CONTOUR()
CDF.SOURCE = 'VARIANCE'
CDF.PARENT = ii
CDF.FLD.data = data.copy()
CDF.FLD.minval = float(data.min())
CDF.FLD.maxval = float(data.max())
toconsola('Variance Min val = '+str(CDF.FLD.minval),wid=self.cons)
toconsola('Variance Max val = '+str(CDF.FLD.maxval),wid=self.cons)
# Middle of the time segment
t2 = 0.5*(self.CDF[ii].T_LIST[0]+self.CDF[ii].T_LIST[-1])
CDF.K.set(K)
CDF.L.set(0)
CDF.K_LIST = [K]
CDF.L_LIST = [0]
CDF.Z_LIST = [self.CDF[ii].Z_LIST[K]]
CDF.T_LIST = [t2]
try:
CDF.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
CDF.DATE = [0.5*(self.CDF[ii].FLD.icdf.nt-1)]
CDF.ALIAS.set('Variance')
CDF.FLD.x = self.CDF[ii].FLD.x
CDF.FLD.y = self.CDF[ii].FLD.y
CDF.FLD.xx = self.CDF[ii].FLD.xx
CDF.FLD.yy = self.CDF[ii].FLD.yy
CDF.FLD.ndims = self.CDF[ii].FLD.ndims
CDF.FLD.with_axes = self.CDF[ii].FLD.with_axes
CDF.FLD.units = self.CDF[ii].FLD.units
CDF.FLD.missing = self.CDF[ii].FLD.missing
CDF.FLD.varname = self.CDF[ii].FLD.varname
CDF.FLD.varid = self.CDF[ii].FLD.varid
CDF.FLD.xmin = self.CDF[ii].FLD.xmin
CDF.FLD.xmax = self.CDF[ii].FLD.xmax
CDF.FLD.ymin = self.CDF[ii].FLD.ymin
CDF.FLD.ymax = self.CDF[ii].FLD.ymax
CDF.FILENAME.set(self.CDF[ii].FILENAME.get())
CDF.varname.set(CDF.FLD.varname)
CDF.FLD.nc = Dataset(self.CDF[ii].FILENAME.get())
CDF.FLD.icdf = tools.geocdf(wid=self.cons)
# We copy the original icdf information
conf = self.CDF[ii].FLD.icdf.conf_get()
CDF.FLD.icdf.conf_set(conf)
# Add the appropriate changes
CDF.FLD.icdf.VAR_MENU = [CDF.FLD.varname]
CDF.FLD.icdf.nt = 1
conf = self.CDF[ii].PLOT.conf_get()
CDF.PLOT.conf_set(conf)
toconsola('Setting contour intervals ...',wid=self.cons)
try:
CDF.PLOT.CONTOUR_MIN.set(myround(CDF.FLD.minval))
except:
CDF.PLOT.CONTOUR_MIN.set(CDF.FLD.minval)
try:
CDF.PLOT.CONTOUR_MAX.set(myround(CDF.FLD.maxval))
except:
CDF.PLOT.CONTOUR_MAX.set(CDF.FLD.maxval)
dd = CDF.PLOT.CONTOUR_MAX.get() - CDF.PLOT.CONTOUR_MIN.get()
try:
CDF.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
except:
CDF.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
CDF.show.set(True)
self.CDF[ii].show.set(False)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
self.LAYERS.add(TYPE='FLD',Filename=self.CDF[ii].FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.CDF[ii].FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def get_map_coords(self):
# ====================
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_xysel.destroy()
self.Window_xysel = None
def _done():
# -----------
_close()
if self.Window_xysel is None:
self.CAPTURE_POINT = True
self.Window_xysel = tk.Toplevel(self.master)
self.Window_xysel.title('Select point')
self.Window_xysel.resizable(width=False,height=False)
self.Window_xysel.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_xysel,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x = ',width=5).grid(row=1,column=0,sticky='e')
ttk.Entry(F0,textvariable=self.pxo,width=15).grid(row=1,column=1,columnspan=3,sticky='ew',pady=5)
ttk.Label(F0,text='y = ',width=5).grid(row=1,column=4,sticky='e')
ttk.Entry(F0,textvariable=self.pyo,width=15).grid(row=1,column=5,columnspan=3,sticky='ew',pady=5)
ttk.Button(F0,text='Cancel',command=_close).grid(row=2,column=4,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=2,column=5,sticky='e',padx=5)
F0.grid()
self.Window_xysel.wait_window()
return [self.pxo.get(), self.pyo.get()]
def vector_series(self):
# ==========================
''' Opens a figure and shows the time series of the velocity.
The user has selected a point. '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
K = self.VEC[ii].K.get()
nt = self.VEC[ii].U.icdf.nt
ndims = self.VEC[ii].U.ndims
if nt == 1:
messagebox.showinfo(message='Single time step. No time series')
return
yy = self.get_map_coords()
xo = yy[0]; yo = yy[1]
dis = (xo-self.VEC[ii].U.xx)**2 + (yo-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
self.VEC[ii].jo.set(jo)
self.VEC[ii].io.set(io)
toconsola('Vector selected point: '+str(io)+', '+str(jo),wid=self.cons)
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
t = []
for i in range(nt):
t.append(datetime.datetime.strptime(str(self.VEC[ii].DATE[i]),'%Y-%m-%d %H:%M:%S'))
Window = tk.Toplevel(self.master)
Window.title('PLOTXY')
Window.resizable(width=False,height=False)
#Window.protocol('WM_DELETE_WINDOW',_close)
plotxy.PLOTXY(Window,t=t,u=u,v=v)
def vector_mean(self):
# ==========================
''' Calculates the long term mean of a vector field '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
if self.VEC[ii].PARENT is None:
pass
else:
ii = self.VEC[ii].PARENT
K = self.VEC[ii].K.get()
L = self.VEC[ii].L.get()
nt = self.VEC[ii].U.icdf.nt
for L in range(0,nt):
print('L = ', L)
udata = self.VEC[ii].U.read(K=K,L=L,wid=self.cons)
vdata = self.VEC[ii].V.read(K=K,L=L,wid=self.cons)
#ny, nx = udata.shape
#udata = udata.reshape((1,ny,nx))
#vdata = vdata.reshape((1,ny,nx))
#if L==0:
# unum = udata.copy()
# vnum = vdata.copy()
#else:
# unum = np.ma.concatenate([unum,udata])
# vnum = np.ma.concatenate([vnum,vdata])
if L==0:
unum = udata.copy()
vnum = vdata.copy()
else:
unum = unum + udata
vnum = vnum + vdata
VEC = VECTOR()
# Make sure that the missing value is NaN:
#udata = unum.mean(axis=0)
#vdata = vnum.mean(axis=0)
VEC.SOURCE = 'MEAN'
VEC.PARENT = ii
udata = unum / nt
vdata = vnum / nt
_u = udata.filled(fill_value=np.nan)
_v = vdata.filled(fill_value=np.nan)
udata = np.ma.masked_equal(_u,np.nan); del _u
vdata = np.ma.masked_equal(_v,np.nan); del _v
VEC.U.data = udata
VEC.V.data = vdata
VEC.K.set(K)
if len(self.VEC[ii].Z_LIST) > 0:
VEC.K_LIST = [K]
VEC.Z_LIST = [self.VEC[ii].Z_LIST[K]]
VEC.L.set(0)
VEC.L_LIST = [0]
# Middle of the time segment
t2 = 0.5*(self.VEC[ii].T_LIST[0]+self.VEC[ii].T_LIST[-1])
VEC.T_LIST = [t2]
try:
VEC.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
VEC.DATE = [0.5*(self.VEC[ii].U.icdf.nt-1)]
VEC.grid_type.set(VEC.grid_type.get())
VEC.ALIAS.set('Average')
VEC.U.x = self.VEC[ii].U.x
VEC.U.y = self.VEC[ii].U.y
VEC.U.xx = self.VEC[ii].U.xx
VEC.U.yy = self.VEC[ii].U.yy
VEC.U.ndims = self.VEC[ii].U.ndims
VEC.U.with_axes = self.VEC[ii].U.with_axes
VEC.U.units = self.VEC[ii].U.units
VEC.U.missing = self.VEC[ii].U.missing
VEC.U.varname = self.VEC[ii].U.varname
VEC.U.varid = self.VEC[ii].U.varid
VEC.U.xmin = self.VEC[ii].U.xmin
VEC.U.xmax = self.VEC[ii].U.xmax
VEC.V.x = self.VEC[ii].V.x
VEC.V.y = self.VEC[ii].V.y
VEC.V.xx = self.VEC[ii].V.xx
VEC.V.yy = self.VEC[ii].V.yy
VEC.V.ndims = self.VEC[ii].V.ndims
VEC.V.with_axes = self.VEC[ii].V.with_axes
VEC.V.units = self.VEC[ii].V.units
VEC.V.missing = self.VEC[ii].V.missing
VEC.V.varname = self.VEC[ii].V.varname
VEC.V.varid = self.VEC[ii].V.varid
VEC.V.xmin = self.VEC[ii].V.xmin
VEC.V.xmax = self.VEC[ii].V.xmax
VEC.UFILENAME.set(self.VEC[ii].UFILENAME.get())
VEC.VFILENAME.set(self.VEC[ii].VFILENAME.get())
VEC.uname.set(VEC.U.varname)
VEC.vname.set(VEC.V.varname)
VEC.U.nc = Dataset(self.VEC[ii].UFILENAME.get())
VEC.V.nc = Dataset(self.VEC[ii].VFILENAME.get())
VEC.U.icdf = tools.geocdf(wid=self.cons)
VEC.V.icdf = tools.geocdf(wid=self.cons)
conf = self.VEC[ii].U.icdf.conf_get()
VEC.U.icdf.conf_set(conf)
VEC.U.icdf.VAR_MENU = [VEC.U.varname]
VEC.U.icdf.nt = 1
conf = self.VEC[ii].V.icdf.conf_get()
VEC.V.icdf.conf_set(conf)
VEC.V.icdf.VAR_MENU = [VEC.V.varname]
VEC.V.icdf.nt = 1
conf = self.VEC[ii].PLOT.conf_get()
VEC.PLOT.conf_set(conf)
VEC.show.set(True)
self.VEC[ii].show.set(False)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
self.LAYERS.add(TYPE='VEC',Filename=self.VEC[ii].UFILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.VEC[ii].UFILENAME.get())
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def marker_editor(self):
# ====================
MARKER = geomarker.parameters()
marklabel = tk.StringVar()
# Map projection
#
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_markered.destroy()
self.Window_markered = None
def _done():
# -----------
_close()
MARKER.SOURCE = 'VIEWER'
MARKER.FILENAME.set(None)
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=None,N=len(MARKER.lon),wid=self.cons)
self.LAYERS.print()
#self.nfeatures += 1
#self.FEATNAMES.append(MARKER.FILENAME.get())
#self.FEATTYPES.append('MARKER')
#self.FEATORDER.append(self.nmarker-1)
ii = self.MARKER_INDX.get()
self.make_plot()
def _clear():
# -----------
global log
log.delete('1.0','end')
marklabel.set('')
def _add():
# ---------
''' Add the new mark '''
#string = '\t {} {} {} \n'.format(self.pxo.get(),self.pyo.get(),marklabel.get())
string = '%9.4f, %9.4f, %s\n' %(self.pxo.get(),self.pyo.get(),marklabel.get())
print('string = ', string)
log.insert('end',string)
MARKER.lon.append(self.pxo.get())
MARKER.lat.append(self.pyo.get())
MARKER.label.append(marklabel.get())
MARKER.n = len(MARKER.lon)
geomarker.drawing(self.ax, proj['proj'], MARKER)
self.canvas.draw()
marklabel.set('')
def _load():
# ---------
global log
''' Load an existent marker filek '''
nn = filedialog.askopenfilename(filetypes=[('CSV','*.csv'),
('TXT','*.txt'),
('ALL','*')],
initialdir='./',
parent=self.Window_marker)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
MARKER.Read(filename)
if MARKER.n == 0:
return
for l in range(MARKER.n):
string = '%9.4f, %9.4f, %s\n' %(MARKER.lon[l], \
MARKER.lat[l],
MARKER.label[l])
log.insert('end',string)
def _save():
# ---------
global log
aa = log.get("1.0","end-1c")
''' Save markers onto file '''
filetypes = [('Text file','.txt')]
nn = filedialog.asksaveasfilename(title='Save marker file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
filename = '%s' %nn
toconsola('Saving entries to file ' +filename,wid=self.cons)
f = open(filename,'w')
f.write(aa)
f.close()
if self.Window_markered is None:
self.CAPTURE_POINT = True
self.Window_markered = tk.Toplevel(self.master)
self.Window_markered.title('Marker editor')
self.Window_markered.resizable(width=False,height=False)
self.Window_markered.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_markered,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x',width=12).grid(row=1,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='y').grid(row=1,column=1,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='Label').grid(row=1,column=2,columnspan=6,sticky='we',pady=10)
ttk.Entry(F0,textvariable=self.pxo,width=12).grid(row=2,column=0,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=self.pyo,width=12).grid(row=2,column=1,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=marklabel,width=12).grid(row=2,column=2,columnspan=1,sticky='ew',pady=5)
ttk.Button(F0,text='Add',command=_add).grid(row=2,column=3,sticky='ew',pady=5)
global log
log = tk.Text(F0,height=5)
log.grid(row=3,column=0,columnspan=4,padx=10,pady=10,sticky='nsew')
#log.configure(state='disabled')
# Scrollbar
scrollb = tk.Scrollbar(F0,command=log.yview)
scrollb.grid(row=3,column=4,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
ttk.Button(F0,text='Clear',command=_clear).grid(row=4,column=0,sticky='e',padx=5)
ttk.Button(F0,text='Load',command=_load).grid(row=4,column=1,sticky='e',padx=5)
ttk.Button(F0,text='Save',command=_save).grid(row=4,column=2,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=4,column=3,sticky='e',padx=5)
F0.grid()
# ====================
def get_ellipse(self):
# ====================
''' Widget to read Ellipses '''
self.ESOURCE = tk.StringVar()
ELLIPSE = ellipse.ELLIPSE()
self.ESOURCE.set(self.ELLIPSE_OPTIONS[0])
def _cancel():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
def _close():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
self.make_plot()
if self.Window_cellipse is not None:
self.Window_cellipse.destroy()
self.Window_cellipse = None
def _done():
# ===========
_close()
def _clear():
# ===========
if self.nellipse == 0:
return
ii = self.ELLIPSE_INDX.get()
self.LAYERS.erase('ELLIPSE',ii,wid=self.cons)
self.LAYERS.print()
#for i in range(self.nfiles):
# if self.FILETYPES[i] == 'ELLIPSE' and self.FILEORDER[i] == ii:
# del self.FILENAMES[i]
# del self.FILETYPES[i]
# del self.FILEORDER[i]
# del self.SEQUENCES[i]
# del self.SEQLEADER[i]
# del self.SEQNTIMES[i]
# self.nfiles -= 1
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.ELLIPSE[ii]
self.nellipse -= 1
ii = self.nellipse-1 if ii >= self.nellipse else ii
toconsola('New ellipse = '+str(ii),wid=self.cons)
self.ELLIPSE_INDX.set(ii)
_refill(ii)
def _reget():
# ===========
self.ELLIPSE_INDEX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ============
if | |
'''
Superclass to extract yield data from tables
and from mppnp simulations
<NAME> 11/2013
Two classes: One for reading and extracting of
NuGrid table data, the other one for SN1a data.
'''
import matplotlib.pyplot as plt
import numpy as np
import os
color=['r','k','b','g']
marker_type=['o','p','s','D']
line_style=['--','-','-.',':']
#global notebookmode
notebookmode=False
#class read_yields():
#
# def __init__(self,nugridtable='element_yield_table.txt',sn1a_table='sn1a_ivo12_stable_z.txt'):
#
# self.sn1a_table=sn1a_table
# self.nugridtable=nugridtable ,...
class read_nugrid_yields():
def __init__(self,nugridtable,isotopes=[],excludemass=[]):
'''
dir : specifing the filename of the table file
'''
table=nugridtable
import os
if '/' in table:
self.label=table.split('/')[-1]
else:
self.label=table
self.path=table
if notebookmode==True:
os.system('sudo python cp.py '+nugridtable)
file1=open('tmp/'+nugridtable)
lines=file1.readlines()
file1.close()
os.system('sudo python delete.py '+nugridtable)
else:
file1=open(nugridtable)
lines=file1.readlines()
file1.close()
header1=[]
table_header=[]
age=[]
yield_data=[]
#kin_e=[]
#lum_bands=[]
#m_final=[]
header_done=False
ignore=False
col_attrs_data=[]
######read through all lines
for line in lines:
if 'H' in line[0]:
if not 'Table' in line:
if header_done==False:
header1.append(line.strip())
else:
table_header[-1].append(line.strip())
else:
ignore=False
for kk in range(len(excludemass)):
if float(excludemass[kk]) == float(line.split(',')[0].split('=')[1]):
ignore=True
#print 'ignore',float(line.split(',')[0].split('=')[1])
break
#print line,'ignore',ignore
if ignore==True:
header_done=True
continue
table_header.append([])
table_header[-1].append(line.strip())
yield_data.append([])
#lum_bands.append([])
#m_final.append([])
col_attrs_data.append([])
col_attrs_data[-1].append(line.strip())
header_done=True
continue
if ignore==True:
continue
if header_done==True:
#col_attrs_data.append([])
col_attrs_data[-1].append(float(line.split(':')[1]))
#age is special col_attrs, used in chem_evol.py
if 'Lifetime' in line:
age.append(float(line.split(':')[1]))
'''
if 'kinetic energy' in line:
kin_e.append(float(line.split(':')[1]))
if 'band' in line:
lum_bands[-1].append(float(line.split(':')[1]))
if 'Mfinal' in line:
m_final[-1].append(float(line.split(':')[1]))
'''
continue
if ignore==True:
continue
if '&Isotopes &Yields' in line or '&Elements &Yields' in line:
title_line=line.split('&')[1:]
column_titles=[]
for t in title_line:
yield_data[-1].append([])
column_titles.append(t.strip())
#print column_titles
continue
#iso ,name and yields
iso_name=line.split('&')[1].strip()
#print line
#print line.split('&')
yield_data[-1][0].append(line.split('&')[1].strip())
#if len(isotopes)>0:
# if not iso_name in isotopes:
#else:
yield_data[-1][1].append(float(line.split('&')[2].strip()))
# for additional data
for t in range(2,len(yield_data[-1])):
if column_titles[t] == 'A' or column_titles[t] =='Z':
yield_data[-1][t].append(int(line.split('&')[t+1].strip()))
else:
yield_data[-1][t].append(float(line.split('&')[t+1].strip()))
#choose only isotoopes and right order
######reading finished
#In [43]: tablesN.col_attrs
#Out[43]: ['Isotopes', 'Yields', 'X0', 'Z', 'A']
if len(isotopes)>0:
#print 'correct for isotopes'
data_new=[]
for k in range(len(yield_data)):
#print 'k'
data_new.append([])
#print 'len',len(yield_data[k])
#print ([[]]*len(yield_data[k]))[0]
for h in range(len(yield_data[k])):
data_new[-1].append([])
#print 'testaa',data_new[-1]
data_all=yield_data[k]
for iso_name in isotopes:
if iso_name in data_all[0]:
#print 'test',data_all[1][data_all[0].index(iso_name)]
for hh in range(1,len(data_all)):
data_new[-1][hh].append(data_all[hh][data_all[0].index(iso_name)])
#data_new[-1][1].append(data_all[2][data_all[0].index(iso_name)])
#data_new[-1][1].append(data_all[2][data_all[0].index(iso_name)])
else:
for hh in range(1,len(data_all)):
data_new[-1][hh].append(0)
#data_new[-1][1].append(0)
#print 'GRID exclude',iso_name
data_new[-1][0].append(iso_name)
#print 'new list'
#print data_new[0][0]
#print data_new[0][1]
yield_data=data_new
self.yield_data=yield_data
#table header points to element in yield_data
self.table_idx={}
i=0
self.col_attrs=[]
self.table_mz=[]
self.metallicities=[]
#self.col_attrs=table_header
#go through all MZ pairs
for table1 in table_header:
#go through col_attrs
for k in range(len(table1)):
table1[k]=table1[k][2:]
if 'Table' in table1[k]:
self.table_idx[table1[k].split(':')[1].strip()]=i
tablename=table1[k].split(':')[1].strip()
self.table_mz.append(tablename)
metal=tablename.split(',')[1].split('=')[1][:-1]
if float(metal) not in self.metallicities:
self.metallicities.append(float(metal))
if table1 ==table_header[0]:
if 'Table' in table1[k]:
table1[k] = 'Table (M,Z):'
self.col_attrs.append(table1[k].split(':')[0].strip())
#col_attrs_data
#table1.split(':')[1].strip()
i+=1
#define header
self.header_attrs={}
#print 'header1: ',header1
for h in header1:
self.header_attrs[h.split(':')[0][1:].strip()]=h.split(':')[1].strip()
self.data_cols=column_titles #previous data_attrs
self.age=age
#self.kin_e=kin_e
#self.lum_bands=lum_bands
#self.m_final=m_final
self.col_attrs_data=col_attrs_data
def set(self,M=0,Z=-1,specie='',value=0):
'''
Replace the values in column 3 which
are usually the yields with value.
Use in combination with the write routine
to write out modification into new file.
M: initial mass to be modified
Z: initial Z to
specie: quantity (e.g. yield) of specie will be modified
'''
inp='(M='+str(float(M))+',Z='+str(float(Z))+')'
idx=self.table_idx[inp]
data=self.yield_data[idx]
idx_col=self.data_cols.index('Yields')
set1=self.yield_data[idx][idx_col]
specie_all= data[0]
for k in range(len(set1)):
if specie == specie_all[k]:
#return set1[k]
self.yield_data[idx][idx_col][k] = value
def write_table(self,filename='isotope_yield_table_mod.txt'):
'''
Allows to write out table in NuGrid yield table format.
Note that method has to be generalized for all tables
and lines about NuGrid removed.
fname: Table name
needs ascii_table.py from NuGrid python tools
'''
#part of the NuGrid python tools
import ascii_table as ascii1
import getpass
user=getpass.getuser()
import time
date=time.strftime("%d %b %Y", time.localtime())
tables=self.table_mz
#write header attrs
f=open(filename,'w')
self.header_attrs
out=''
l='H NuGrid yields Set1: '+self.header_attrs['NuGrid yields Set1']+'\n'
out = out +l
l='H Data prepared by: '+user+'\n'
out=out +l
l='H Data prepared date: '+date+'\n'
out=out +l
l='H Isotopes: '+ self.header_attrs['Isotopes'] +'\n'
out = out +l
l='H Number of metallicities: '+self.header_attrs['Number of metallicities']+'\n'
out = out +l
l='H Units: ' + self.header_attrs['Units'] + '\n'
out = out + l
f.write(out)
f.close()
for k in range(len(tables)):
print 'Write table ',tables[k]
mass=float(self.table_mz[k].split(',')[0].split('=')[1])
metallicity=float(self.table_mz[k].split(',')[1].split('=')[1][:-1])
data=self.yield_data[k]
#search data_cols
idx_y=self.data_cols.index('Yields')
yields=data[idx_y]
idx_x0=self.data_cols.index('X0')
mass_frac_ini=data[idx_x0]
idx_specie=self.data_cols.index(self.data_cols[0])
species=data[idx_specie]
#over col attrs, first is MZ pair which will be skipped, see special_header
attr_lines=[]
for h in range(1,len(self.col_attrs)):
attr=self.col_attrs[h]
idx=self.col_attrs.index(attr)
# over MZ pairs
attr_data=self.col_attrs_data[k][idx]
line=attr+': '+'{:.3E}'.format(attr_data)
attr_lines.append(line)
special_header='Table: (M='+str(mass)+',Z='+str(metallicity)+')'
dcols=[self.data_cols[0],'Yields','X0']
data=[species,list(yields),mass_frac_ini]
headers=[special_header]+attr_lines
ascii1.writeGCE_table(filename=filename,headers=headers,data=data,dcols=dcols)
'''
#add ages
#time=self.age[k]
time=[]
idx=self.col_attrs.index('Lifetime')
for k in range(len(self.table_mz)):
time.append(col_attrs_data[k][idx])
f1=open(filename,'r')
lines=f1.readlines()
f1.close()
i=-1
line1=''
while (True):
i+=1
if i>len(lines)-1:
break
line=lines[i]
line1+=lines[i]
if tables[k] in lines[i]:
line1+=('H Lifetime: '+'{:.3E}'.format(time)+'\n')
f1=open(filename,'w')
f1.write(line1)
f1.close()
'''
def get(self,M=0,Z=-1,quantity='',specie=''):
'''
Allows to extract table data in 2 Modes:
1) For extracting of table data for
star of mass M and metallicity Z.
Returns either table attributes,
given by yield.col_attrs
or table columns,
given by yield.data_cols.
2) For extraction of a table attribute
from all available tables. Can be
directly used in the following way:
get(tableattribute)
M: Stellar mass in Msun
Z: Stellar metallicity (e.g. solar: 0.02)
quantity: table attribute or data column/data_cols
specie: optional, return certain specie (e.g. 'H-1')
'''
#scale down to Z=0.00001
#print 'get yields ',Z
if float(Z) == 0.00001:
#scale abundance
if quantity=='Yields':
return self.get_scaled_Z(M=M,Z=Z,quantity=quantity,specie=specie)
#Take all other parameter from Z=0.0001 case
else:
Z=0.0001
all_tattrs=False
if Z ==-1:
if M ==0 and len(quantity)>0:
quantity1=quantity
all_tattrs=True
elif (M in self.col_attrs) and quantity == '':
quantity1=M
all_tattrs=True
else:
print 'Error: Wrong input'
return 0
quantity=quantity1
if (all_tattrs==False) and (not M ==0):
inp='(M='+str(float(M))+',Z='+str(float(Z))+')'
idx=self.table_idx[inp]
#print 'len tableidx:',len(self.table_idx)
#print 'len age',len(self.age)
'''
if quantity=='Lifetime':
if all_tattrs==True:
data=self.age
else:
data=self.age[idx]
return data
if quantity =='Total kinetic energy':
if all_tattrs==True:
data=self.kin_e
else:
data=self.kin_e[idx]
return data
if quantity == 'Lyman-Werner band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][0]
else:
data=self.lum_bands[idx][0]
return data
if quantity== 'Hydrogen-ionizing band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][1]
else:
data=self.lum_bands[idx][1]
return data
if quantity == 'High-energy band':
if all_tattrs==True:
data=[list(i) for i in zip(*self.lum_bands)][2]
else:
data=self.lum_bands[idx][2]
return data
if quantity == 'Mfinal':
if all_tattrs==True:
data=self.m_final
else:
data=self.m_final[idx][0]
return data
if quantity== 'Table (M,Z)':
if all_tattrs==True:
data=self.table_mz
else:
data=self.table_mz[idx]
return data
'''
if quantity in self.col_attrs:
if all_tattrs==False:
data=self.col_attrs_data[idx][self.col_attrs.index(quantity)]
return data
else:
data=[]
for k in range(len(self.table_idx)):
data.append(self.col_attrs_data[k][self.col_attrs.index(quantity)])
return data
if quantity=='masses':
data_tables=self.table_mz
masses=[]
for table in data_tables:
if str(float(Z)) in table:
masses.append(float(table.split(',')[0].split('=')[1]))
return masses
else:
data=self.yield_data[idx]
if specie=='':
idx_col=self.data_cols.index(quantity)
set1=data[idx_col]
return set1
else:
idx_col=self.data_cols.index('Yields')
set1=data[idx_col]
specie_all= data[0]
for k in range(len(set1)):
if specie == specie_all[k]: #bug was here
return set1[k]
def get_scaled_Z(self,table, table_yields,iniabu,iniabu_scale,M=0,Z=0,quantity='Yields',specie=''):
'''
Scaled down yields of isotopes 'He','C', 'O', 'Mg', 'Ca', 'Ti', 'Fe', 'Co','Zn','H','N'
down to Z=1e-5 and Z=1e-6 (for Brian). The rest is set to zero.
'''
#print '####################################'
#print 'Enter routine get_scaled_Z'
elem_prim=['He','C', 'O', 'Mg', 'Ca', 'Ti', 'Fe', 'Co','Zn','H']
elem_sec=['N']
##Scale down
import utils as u
import re
#table=ry.read_nugrid_yields('yield_tables/prodfac_iso_table.txt')
#table_yields=ry.read_nugrid_yields('yield_tables/isotope_yield_table.txt')
#iniabu=u.iniabu('yield_tables/iniabu/iniab1.0E-05GN93_alpha.ppn')
#iniabu_scale=u.iniabu('yield_tables/iniabu/iniab1.0E-04GN93_alpha.ppn')
iniiso=[]
iniabu_massfrac=[]
for k in range(len(iniabu.habu)):
iso=iniabu.habu.keys()[k]
iniiso.append(re.split('(\d+)',iso)[0].strip().capitalize()+'-'+re.split('(\d+)',iso)[1])
iniabu_massfrac.append(iniabu.habu.values()[k])
#iniabu_scale=u.iniabu('yield_tables/iniabu/iniab1.0E-04GN93_alpha.ppn')
iniiso_scale=[]
iniabu_scale_massfrac=[]
for k in range(len(iniabu_scale.habu)):
iso=iniabu_scale.habu.keys()[k]
iniiso_scale.append(re.split('(\d+)',iso)[0].strip().capitalize()+'-'+re.split('(\d+)',iso)[1])
iniabu_scale_massfrac.append(iniabu_scale.habu.values()[k])
grid_yields=[]
grid_masses=[]
isotope_names=[]
origin_yields=[]
for k in range(len(table.table_mz)):
if 'Z=0.0001' in table.table_mz[k]:
#print table.table_mz[k]
mini=float(table.table_mz[k].split('=')[1].split(',')[0])
grid_masses.append(mini)
#this is production factor (see file name)
prodfac=table.get(M=mini,Z=0.0001,quantity='Yields')
isotopes=table.get(M=mini,Z=0.0001,quantity='Isotopes')
#this is yields
yields=table_yields.get(M=mini,Z=0.0001,quantity='Yields')
mtot_eject=sum(yields)
origin_yields.append([])
#print 'tot eject',mtot_eject
mout=[]
sumnonh=0
isotope_names.append([])
for h in range(len(isotopes)):
if not (isotopes[h].split('-')[0] in (elem_prim+elem_sec) ):
#Isotopes/elements not considered/scaled are set to 0
#mout.append(0)
#isotope_names[-1].append(isotopes[h])
continue
isotope_names[-1].append(isotopes[h])
idx=iniiso.index(isotopes[h])
inix=iniabu_massfrac[idx]
idx=iniiso_scale.index(isotopes[h])
inix_scale=iniabu_scale_massfrac[idx]
prodf=prodfac[isotopes.index(isotopes[h])]
origin_yields[-1].append(yields[isotopes.index(isotopes[h])])
if isotopes[h].split('-')[0] in elem_prim:
#primary
mout1=(prodf-1.)*(inix_scale*mtot_eject) + (inix*mtot_eject)
#check if amount destroyed was more than it was initial there
if mout1<0:
#print 'Problem with ',isotopes[h]
#print 'Was more destroyed than evailable'
#Then only what was there can be destroyed
mout1=0
#if isotopes[h] == 'C-13':
# print 'inix',inix
# print 'inixscale',inix_scale
# print 'prodf',prodf
# print (prodf)*(inix_scale*mtot_eject)
# print (inix*mtot_eject)
else:
#secondary
mout1=(prodf-1.)*(inix*mtot_eject) + (inix*mtot_eject)
if (not isotopes[h]) == 'H-1' and (mout1>0):
sumnonh+= (mout1 - (inix*mtot_eject))
mout.append(mout1)
#for mass conservation, assume total mass lost is same as in case of Z=0.0001
idx_h=isotope_names[-1].index('H-1')
mout[idx_h]-=sumnonh
for k in range(len(mout)):
mout[k] = float('{:.3E}'.format(mout[k]))
grid_yields.append(mout)
####data
idx=grid_masses.index(M)
all_tattrs=False
if specie=='':
return grid_yields[idx]
else:
set1=data[idx]
names=isotope_names[idx]
for k in range(len(names)):
if specie in names[k]:
return set1[k]
class read_yield_sn1a_tables():
def __init__(self,sn1a_table,isotopes=[]):
'''
Read SN1a tables.
Fills up missing isotope yields
with zeros.
If different Zs are available
do ...
'''
import re
if notebookmode==True:
os.system('sudo python cp.py '+sn1a_table)
f1=open('tmp/'+sn1a_table)
lines=f1.readlines()
f1.close()
os.system('sudo python delete.py '+sn1a_table)
else:
f1=open(sn1a_table)
lines=f1.readlines()
f1.close()
iso=[]
self.header=[]
self.col_attrs=[]
yields=[]
metallicities=[]
isotopes_avail=[]
for line in lines:
#for header
if 'H' in line[0]:
self.header.append(line)
continue
if ('Isotopes' in line) or ('Elements' in line):
l=line.replace('\n','').split('&')[1:]
self.col_attrs=l
metallicities=l[1:]
#print metallicities
# metallicity dependent yields
#if len(l)>2:
#else:
for k in l[1:]:
yields.append([])
continue
linesp=line.strip().split('&')[1:]
iso.append(linesp[0].strip())
#print iso
for k in range(1,len(linesp)):
yields[k-1].append(float(linesp[k]))
#if isotope list emtpy take all isotopes
if len(isotopes)==0:
isotopes=iso
yields1=[]
#fill up the missing isotope yields with zero
for z in range(len(yields)):
yields1.append([])
for iso1 in isotopes:
#iso1=iso1.split('-')[1]+iso1.split('-')[0]
#ison= iso1+((10-len(iso1))*' ')
if iso1 in iso:
yields1[-1].append(yields[z][iso.index(iso1)])
else:
yields1[-1].append(0.)
self.yields=yields1
self.metallicities=[]
for m in metallicities:
self.metallicities.append(float(m.split('=')[1]))
#self.metallicities=metallicities
#print yields1
self.isotopes=iso
def get(self,Z=0,quantity='Yields',specie=''):
'''
Allows to extract SN1a table data.
If metallicity dependent yield tables
were used, data is taken for the closest metallicity available
to reach given Z
quantity: if 'Yields' return yields
if 'Isotopes' return all isotopes available
'''
if quantity=='Yields':
idx = (np.abs(np.array(self.metallicities)-Z)).argmin()
yields=self.yields[idx]
return np.array(yields)
elif quantity=='Isotopes':
return self.isotopes
class read_yield_rawd_tables():
def __init__(self,rawd_table,isotopes):
'''
Read RAWD tables.
Fills up missing isotope yields
with zeros.
If different Zs are available
do ...
'''
import re
if notebookmode==True:
os.system('sudo python cp.py '+rawd_table)
f1=open('tmp/'+rawd_table)
lines=f1.readlines()
f1.close()
os.system('sudo python delete.py '+rawd_table)
else:
f1=open(rawd_table)
lines=f1.readlines()
f1.close()
iso=[]
self.header=[]
self.col_attrs=[]
yields=[]
metallicities=[]
for line in lines:
#for header
if 'H' in line[0]:
self.header.append(line)
continue
if ('Isotopes' in line) or ('Elements' in line):
l=line.replace('\n','').split('&')[1:]
self.col_attrs=l
metallicities=l[1:]
#print metallicities
# metallicity dependent yields
#if len(l)>2:
#else:
| |
in name_parts:
if folder_name == '':
pass
elif folder_name == 'Public Objects':
folder_contents = self.get_folder_contents_by_guid(system_folder=TaskProc.SystemFolders.PublicObjects,
type_restriction=intermediatefolder_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
)
else:
found = False
new_folder_contents = None
for sub_folder in folder_contents:
if sub_folder.name == folder_name:
found = True
if sub_folder.object_type == ObjectType.Folder:
# If this is the last folder use the passed type_restriction and name patterns
if folder_name == name_parts[-1]:
new_folder_contents = self.get_folder_contents_by_guid(
folder_guid=sub_folder.guid,
type_restriction=type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
else:
new_folder_contents = self.get_folder_contents_by_guid(
folder_guid=sub_folder.guid,
type_restriction=intermediatefolder_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
)
else:
new_folder_contents = sub_folder
if not found:
if isinstance(name, str):
msg = f'"{folder_name}" not found when processing path {name}\nParts={name_parts}'
else:
msg = f'"{folder_name}" not found when processing path {name}'
raise FileNotFoundError(msg)
else:
folder_contents = new_folder_contents
return folder_contents
def get_folder_contents(self,
name: Union[str, List[str]],
type_restriction: Optional[set] = None,
sort_key: Optional[FolderSortOrder] = None,
sort_ascending: Optional[bool] = True,
recursive: Optional[bool] = True,
flatten_structure: Optional[bool] = True,
name_patterns_to_include: Optional[List[str]] = None,
name_patterns_to_exclude: Optional[List[str]] = None,
) -> List[FolderObject]:
if type_restriction is not None:
sub_type_restriction = type_restriction.copy()
if recursive:
sub_type_restriction.add(ObjectSubType.Folder)
else:
sub_type_restriction = None
if isinstance(name, str) and len(name) == 32 and '/' not in name and '\\' not in name:
folder_contents = self.get_folder_contents_by_guid(folder_guid=name,
type_restriction=sub_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
else:
folder_contents = self.get_folder_contents_by_name(name,
type_restriction=sub_type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
if recursive:
for item in folder_contents:
if item.object_type == ObjectType.Folder:
try:
contents = self.get_folder_contents(
name=item.guid,
type_restriction=type_restriction,
sort_key=sort_key,
sort_ascending=sort_ascending,
recursive=recursive,
flatten_structure=flatten_structure,
name_patterns_to_include=name_patterns_to_include,
name_patterns_to_exclude=name_patterns_to_exclude,
)
except FileNotFoundError as e:
contents = e
if flatten_structure:
if isinstance(contents, list):
folder_contents.extend(contents)
else:
item.contents = contents
if flatten_structure:
if type_restriction is not None:
folder_contents = [sub for sub in folder_contents if sub.object_subtype in type_restriction]
return folder_contents
def get_folder_object(self,
name: str,
type_restriction: Optional[set] = None,
) -> FolderObject:
name_parts = TaskProc.path_parts(name)
folder_name = '/'.join(name_parts[:-1])
object_name = name_parts[-1]
folder_contents = self.get_folder_contents(folder_name, type_restriction=type_restriction, name_patterns_to_include=[object_name])
if len(folder_contents) == 0:
raise FileNotFoundError("Folder {} does not contain {} (that matches type {})".format(
folder_name, object_name, type_restriction
))
elif len(folder_contents) > 1:
raise FileNotFoundError("Folder {} does contains multiple matches for {} (that match type {})\n {}".format(
folder_name, object_name, type_restriction, folder_contents,
))
else:
return folder_contents[0]
def get_matching_objects_list(self, path_list: list, type_restriction: set, error_list=None) -> List[FolderObject]:
"""
Get a list of matching FolderObjects based on a list of object name patterns.
Patterns accept wildcards:
- * for any set of characters. Allowed in the object name part of the path but not the folder name part.
- Patterns that end in [r] will match objects in any sub folder. Any non / characters immediately before
the [r] will be considered as an object name pattern to match in all sub folders.
Parameters
----------
path_list:
A list of path patterns
type_restriction:
A set of ObjectSubType values to allow.
error_list:
Option list to return path errors (FileNotFoundError) in. If not passed, then errors are raised.
Returns
-------
A list of matching FolderObject
"""
if isinstance(path_list, str):
path_list = [path_list]
result_list = list()
for path in path_list:
path = path.strip()
try:
if path == '':
pass
elif path[-3:].lower() == '[r]':
# Ends in [r] so recursive search is needed
path_parts = self.path_parts(path)
folder = path_parts[:-1]
file_name = path_parts[-1][:-3]
if file_name == '':
file_name_list = None
else:
file_name_list = [file_name]
contents = self.get_folder_contents(
name=folder,
name_patterns_to_include=file_name_list,
recursive=True,
flatten_structure=True,
type_restriction=type_restriction,
)
if len(contents) == 0:
msg = f"Path pattern {path} returned no matches"
if error_list is not None:
error_list.append(msg)
else:
self.log.warning(msg)
result_list.extend(contents)
else:
# Non recursive pass last part as name_patterns_to_include
path_parts = self.path_parts(path)
contents = self.get_folder_contents(
name=path_parts[:-1],
name_patterns_to_include=[path_parts[-1]],
recursive=False,
flatten_structure=True,
type_restriction=type_restriction,
)
if len(contents) == 0:
self.log.warning("Path pattern {} returned no matches".format(path))
result_list.extend(contents)
except FileNotFoundError as e:
if error_list is None:
raise e
else:
error_list.append(f'{path} yields {e}')
return result_list
def get_executable_object(self, folder_obj: FolderObject) -> ExecutableBase:
# Check based on object type
if folder_obj.object_subtype == ObjectSubType.ReportWritingDocument:
# Document
return Document(self, guid=folder_obj.guid, name=folder_obj.full_name())
elif folder_obj.object_subtype == ObjectSubType.ReportCube:
# Cube
return Report(self, guid=folder_obj.guid, name=folder_obj.full_name())
else:
# Regular report
return Report(self, guid=folder_obj.guid, name=folder_obj.full_name())
def list_elements(self, attribute_id):
"""
Returns the elements associated with the given attribute id.
Note that if the call fails (i.e. MicroStrategy returns an
out of memory stack trace) the returned list is empty
Args:
attribute_id (str): the attribute guid
Returns:
list: a list of strings containing the names for attribute values
"""
arguments = {'taskId': 'browseElements',
'attributeID': attribute_id,
'sessionState': self._session}
response = self.request(arguments)
result = []
for attr in response('block'):
if attr.find('n').string:
result.append(attr.find('n').string)
return result
def check_user_privileges(self, privilege_types: Set[PrivilegeTypes]=None) -> dict:
if privilege_types is None:
privilege_types = {PrivilegeTypes.WebExecuteAnalysis}
arguments = {'taskId': 'checkUserPrivileges',
'privilegeTypes': privilege_types,
'sessionState': self._session}
response = self.request(arguments)
priv_dict = dict()
priv_entries = response.find_all('privilege')
for privilege in priv_entries:
priv = privilege['type']
try:
priv = int(priv)
if priv in PrivilegeTypesIDDict:
priv = PrivilegeTypesIDDict[priv]
except ValueError:
pass
value = privilege['value']
if value == '1':
value = True
elif value == '0':
value = False
else:
raise ValueError("Priv value {} is not valid in {}".format(value, priv_entries))
priv_dict[priv] = value
return priv_dict
def get_user_info(self):
profile_objects = self.get_folder_contents_by_guid(system_folder=TaskProc.SystemFolders.ProfileObjects)
profile_first_object = profile_objects[0]
profile_name = profile_first_object.path[-1]
# For example <NAME> (jadams)
full_name, user_id = profile_name.split('(', 1)
user_id = user_id[:-1]
return full_name, user_id
def get_attribute(self, attribute_id):
"""
Returns the attribute object for the given attribute id.
Args:
attribute_id (str): the attribute guid
Returns:
Attribute: Attribute object for this guid
Raises:
MstrClientException: if no attribute id is supplied
"""
if not attribute_id:
raise MstrClientException("You must provide an attribute id")
arguments = {'taskId': 'getAttributeForms',
'attributeID': attribute_id,
'sessionState': self._session
}
response = self.request(arguments)
return Attribute(response.find('dssid').string, response.find('n').string)
def logout(self):
arguments = {
'taskId': 'logout',
'sessionState': self._session,
}
arguments.update(BASE_PARAMS)
try:
result = self.request(arguments, max_retries=0)
except Exception as e:
result = str(e)
self._session = None
if self.trace:
self.log.debug("logging out returned %s" % result)
def request(self, arguments: dict, max_retries: int = None) -> BeautifulSoup:
"""
Assembles the url and performs a get request to
the MicroStrategy Task Service API
Arumgents
---------
arguments:
Maps get key parameters to values
max_retries:
Optional. Number of retries to allow. Default = 1.
Returns:
The xml response as a BeautifulSoup 4 object.
"""
if max_retries is None:
max_retries = self.max_retries
arguments.update(BASE_PARAMS)
for arg_name, arg_value in arguments.items():
if isinstance(arg_value, str):
pass
elif isinstance(arg_value, Enum):
arguments[arg_name] = str(arg_value.value)
elif isinstance(arg_value, BitSet):
arguments[arg_name] = arg_value.combine()
elif isinstance(arg_value, list) or isinstance(arg_value, set):
if len(arg_value) == 0:
arguments[arg_name] = ''
elif isinstance(list(arg_value)[0], Enum):
new_arg_value = set()
for arg_sub_value in arg_value:
if isinstance(arg_sub_value, Enum):
new_arg_value.add(str(arg_sub_value.value))
else:
new_arg_value.add(str(arg_sub_value))
arg_value = new_arg_value
arguments[arg_name] = ','.join(arg_value)
else:
arguments[arg_name] = str(arg_value)
if self.trace:
self.log.debug("arguments {}".format(arguments))
request = self._base_url + urllib.parse.urlencode(arguments)
if self.trace:
self.log.debug("submitting request {}".format(request))
result_bs4 = None
done = False
tries = 0
exception = None
while not done:
try:
response = requests.get(request, cookies=self.cookies)
if self.trace:
self.log.debug(f"received response {response}")
if response.status_code != 200:
exception = MstrClientException(
msg=f"Server response {response}.",
request=request
)
else:
self.cookies = response.cookies
result_bs4 = BeautifulSoup(response.text, 'xml')
task_response = result_bs4.find('taskResponse')
if task_response is None:
self.log.error(response)
self.log.error(task_response)
error = f"Unexpected server response with no taskResponse tag {result_bs4.prettify()}"
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
else:
if task_response.attrs is None or 'statusCode' not in task_response.attrs:
self.log.error(response)
self.log.error(task_response)
error = f"Unexpected server response with no statusCode in taskResponse tag {task_response}"
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
else:
if task_response['statusCode'] in ['400', '500']:
self.log.error(response)
self.log.error(task_response)
error = task_response['errorMsg']
exception = MstrClientException(
msg=f"Server error '{error}'",
request=request
)
except requests.packages.urllib3.exceptions.NewConnectionError as e:
exception = e
if exception is None:
done = True
else:
error = exception.msg
messages_to_retry = self._messages_to_retry
time.sleep(1)
if isinstance(exception, requests.packages.urllib3.exceptions.NewConnectionError):
if tries < max_retries:
self.log.info("Request failed with error {}".format(repr(exception)))
time.sleep(self.retry_delay)
self.log.info("Retrying. Tries={} < {} max".format(tries, max_retries))
# Count these as 1/1000 of a try (allows 5 minutes of retries) for each max_retries
tries += (1/300)
else:
self.log.error('. Tries limit {} reached'.format(tries))
raise exception
elif 'automatically logged out' in error:
if tries < max_retries:
tries += 1
# We can't re-login if we don't have a username (ie. we authenticated with a session_state value)
if self.username is not None:
self.log.info("Request failed with error {}".format(repr(exception)))
time.sleep(self.retry_delay)
self.log.info("Logging back in. | |
<filename>datasets.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import cv2
import h5py
import numpy as np
import tensorflow as tf
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
seed = 1337
def one_hot(labels_dense, num_classes=10):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSetLoader:
@staticmethod
def get_extension(ext):
if ext in ['jpg', 'png']:
return 'img'
elif ext == 'tfr':
return 'tfr'
elif ext == 'h5':
return 'h5'
elif ext == 'npy':
return 'npy'
else:
raise ValueError("[-] There'is no supporting file... [%s] :(" % ext)
@staticmethod
def get_img(path, size=(64, 64), interp=cv2.INTER_CUBIC):
img = cv2.imread(path, cv2.IMREAD_COLOR)[..., ::-1] # BGR to RGB
if img.shape[0] == size[0]:
return img
else:
return cv2.resize(img, size, interp)
@staticmethod
def parse_tfr_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
@staticmethod
def parse_tfr_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
@staticmethod
def img_scaling(img, scale='0,1'):
if scale == '0,1':
try:
img /= 255.
except TypeError: # ufunc 'true divide' output ~
img = np.true_divide(img, 255.0, casting='unsafe')
elif scale == '-1,1':
try:
img = (img / 127.5) - 1.
except TypeError:
img = np.true_divide(img, 127.5, casting='unsafe') - 1.
else:
raise ValueError("[-] Only '0,1' or '-1,1' please - (%s)" % scale)
return img
def __init__(self, path, size=None, name='to_tfr', use_save=False, save_file_name='',
buffer_size=4096, n_threads=8,
use_image_scaling=True, image_scale='0,1', img_save_method=cv2.INTER_LINEAR, debug=True):
self.op = name.split('_')
self.debug = debug
try:
assert len(self.op) == 2
except AssertionError:
raise AssertionError("[-] Invalid Target Types :(")
self.size = size
try:
assert self.size
except AssertionError:
raise AssertionError("[-] Invalid Target Sizes :(")
# To-DO
# Supporting 4D Image
self.height = size[0]
self.width = size[1]
self.channel = size[2]
self.path = path
try:
assert os.path.exists(self.path)
except AssertionError:
raise AssertionError("[-] Path(%s) does not exist :(" % self.path)
self.buffer_size = buffer_size
self.n_threads = n_threads
if os.path.isfile(self.path):
self.file_list = [self.path]
self.file_ext = self.path.split('.')[-1]
self.file_names = [self.path]
else:
self.file_list = sorted(os.listdir(self.path))
self.file_ext = self.file_list[0].split('.')[-1]
self.file_names = glob(self.path + '/*')
self.raw_data = np.ndarray([], dtype=np.uint8) # (N, H * W * C)
if self.debug:
print("[*] Detected Path is [%s]" % self.path)
print("[*] Detected File Extension is [%s]" % self.file_ext)
print("[*] Detected First File Name is [%s] (%d File(s))" % (self.file_names[0], len(self.file_names)))
self.types = ('img', 'tfr', 'h5', 'npy') # Supporting Data Types
self.op_src = self.get_extension(self.file_ext)
self.op_dst = self.op[1]
try:
chk_src, chk_dst = False, False
for t in self.types:
if self.op_src == t:
chk_src = True
if self.op_dst == t:
chk_dst = True
assert chk_src and chk_dst
except AssertionError:
raise AssertionError("[-] Invalid Operation Types (%s, %s) :(" % (self.op_src, self.op_dst))
self.img_save_method = img_save_method
if self.op_src == self.types[0]:
self.load_img()
elif self.op_src == self.types[1]:
self.load_tfr()
elif self.op_src == self.types[2]:
self.load_h5()
elif self.op_src == self.types[3]:
self.load_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
# Random Shuffle
order = np.arange(self.raw_data.shape[0])
np.random.RandomState(seed).shuffle(order)
self.raw_data = self.raw_data[order]
# Clip [0, 255]
try:
self.raw_data = np.rint(self.raw_data).clip(0, 255).astype(np.uint8)
except MemoryError:
pass
self.use_save = use_save
self.save_file_name = save_file_name
if self.use_save:
try:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] Empty save-file name :(")
if self.op_dst == self.types[0]:
self.convert_to_img()
elif self.op_dst == self.types[1]:
self.tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
self.tfr_writer = tf.python_io.TFRecordWriter(self.save_file_name + ".tfrecords", self.tfr_opt)
self.convert_to_tfr()
elif self.op_dst == self.types[2]:
self.convert_to_h5()
elif self.op_dst == self.types[3]:
self.convert_to_npy()
else:
raise NotImplementedError("[-] Not Supported Type :(")
self.use_image_scaling = use_image_scaling
self.img_scale = image_scale
if self.use_image_scaling:
self.raw_data = self.img_scaling(self.raw_data, self.img_scale)
def load_img(self):
self.raw_data = np.zeros((len(self.file_list), self.height * self.width * self.channel),
dtype=np.uint8)
for i, fn in tqdm(enumerate(self.file_names)):
self.raw_data[i] = self.get_img(fn, (self.height, self.width), self.img_save_method).flatten()
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[i].shape)
print("[*] Image Size : ", self.raw_data[i].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[i]), np.max(self.raw_data[i])))
self.debug = False
def load_tfr(self):
self.raw_data = tf.data.TFRecordDataset(self.file_names, compression_type='', buffer_size=self.buffer_size)
self.raw_data = self.raw_data.map(self.parse_tfr_tf, num_parallel_calls=self.n_threads)
def load_h5(self, size=0, offset=0):
init = True
for fl in self.file_list: # For multiple .h5 files
with h5py.File(fl, 'r') as hf:
data = hf['images']
full_size = len(data)
if size == 0:
size = full_size
n_chunks = int(np.ceil(full_size / size))
if offset >= n_chunks:
print("[*] Looping from back to start.")
offset %= n_chunks
if offset == n_chunks - 1:
print("[-] Not enough data available, clipping to end.")
data = data[offset * size:]
else:
data = data[offset * size:(offset + 1) * size]
data = np.array(data, dtype=np.uint8)
print("[+] ", fl, " => Image size : ", data.shape)
if init:
self.raw_data = data
init = False
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
continue
else:
self.raw_data = np.concatenate((self.raw_data, data))
def load_npy(self):
self.raw_data = np.rollaxis(np.squeeze(np.load(self.file_names), axis=0), 0, 3)
if self.debug: # just once
print("[*] Image Shape : ", self.raw_data[0].shape)
print("[*] Image Size : ", self.raw_data[0].size)
print("[*] Image MIN/MAX : (%d, %d)" % (np.min(self.raw_data[0]), np.max(self.raw_data[0])))
self.debug = False
def convert_to_img(self):
def to_img(i):
cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
return True
raw_data_shape = self.raw_data.shape # (N, H * W * C)
try:
assert os.path.exists(self.save_file_name)
except AssertionError:
print("[-] There's no %s :(" % self.save_file_name)
print("[*] Make directory at %s... " % self.save_file_name)
os.mkdir(self.save_file_name)
ii = [i for i in range(raw_data_shape[0])]
pool = Pool(self.n_threads)
print(pool.map(to_img, ii))
def convert_to_tfr(self):
for data in self.raw_data:
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=data.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[data.tostring()]))
}))
self.tfr_writer.write(ex.SerializeToString())
def convert_to_h5(self):
with h5py.File(self.save_file_name, 'w') as f:
f.create_dataset("images", data=self.raw_data)
def convert_to_npy(self):
np.save(self.save_file_name, self.raw_data)
class MNISTDataSet:
def __init__(self, use_split=False, split_rate=0.15, random_state=42, ds_path=None):
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_path = ds_path
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] MNIST DataSet Path is required!")
from tensorflow.examples.tutorials.mnist import input_data
self.data = input_data.read_data_sets(self.ds_path, one_hot=True) # download MNIST
# training data
self.train_data = self.data.train
self.train_images = self.train_data.images
self.train_labels = self.train_data.labels
self.valid_images = None
self.valid_labels = None
# test data
self.test_data = self.data.test
self.test_images = self.test_data.images
self.test_labels = self.test_data.labels
# split training data set into train, valid
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.train_images, self.train_labels,
test_size=self.split_rate,
random_state=self.random_state)
class CiFarDataSet:
@staticmethod
def unpickle(file):
import pickle
# WARN: Only for python3, NOT FOR python2
assert sys.version_info >= (3, 0)
with open(file, 'rb') as f:
return pickle.load(f, encoding='bytes')
def __init__(self, height=32, width=32, channel=3,
use_split=False, split_rate=0.2, random_state=42, ds_name="cifar-10", ds_path=None):
"""
# General Settings
:param height: input image height, default 32
:param width: input image width, default 32
:param channel: input image channel, default 3 (RGB)
- in case of CIFAR, image size is 32 x 32 x 3 (HWC).
# Pre-Processing Option
:param use_split: training DataSet splitting, default True
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
# DataSet Option
:param ds_name: DataSet's name, default cifar-10
:param ds_path: DataSet's path, default None
"""
self.height = height
self.width = width
self.channel = channel
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.ds_name = ds_name
self.ds_path = ds_path # DataSet path
self.n_classes = 10 # DataSet the number of classes, default 10
self.train_images = None
self.valid_images = None
self.test_images = None
self.train_labels = None
self.valid_labels = None
self.test_labels = None
try:
assert self.ds_path
except AssertionError:
raise AssertionError("[-] CIFAR10/100 DataSets' Path is required!")
if self.ds_name == "cifar-10":
self.cifar_10() # loading Cifar-10
elif self.ds_name == "cifar-100":
self.cifar_100() # loading Cifar-100
else:
raise NotImplementedError("[-] Only 'cifar-10' or 'cifar-100'")
def cifar_10(self):
self.n_classes = 10 # labels
train_batch_1 = self.unpickle("{0}/data_batch_1".format(self.ds_path))
train_batch_2 = self.unpickle("{0}/data_batch_2".format(self.ds_path))
train_batch_3 = self.unpickle("{0}/data_batch_3".format(self.ds_path))
train_batch_4 = self.unpickle("{0}/data_batch_4".format(self.ds_path))
train_batch_5 = self.unpickle("{0}/data_batch_5".format(self.ds_path))
# training data & label
train_data = np.concatenate([
train_batch_1[b'data'],
train_batch_2[b'data'],
train_batch_3[b'data'],
train_batch_4[b'data'],
train_batch_5[b'data'],
], axis=0)
train_labels = np.concatenate([
train_batch_1[b'labels'],
train_batch_2[b'labels'],
train_batch_3[b'labels'],
train_batch_4[b'labels'],
train_batch_5[b'labels'],
], axis=0)
# Image size : 32x32x3
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test_batch".format(self.ds_path))
test_data = test_batch[b'data']
test_labels = np.array(test_batch[b'labels'])
# image size : 32x32x3
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
| |
'''Video Processing
=====================
Provides a class that manipulates files en-masse using FFmpeg. It can
compress/uncompress/merge/concatenate or perform other tasks on video files.
In order to use this module, the ffmpeg binaries need to be installed in the
parent directory of this module, or in $(FFMPEG_ROOT)/bin.
Keyboard Keys
-------------
`space`:
Toggles the current pause state.
`enter`:
Starts processing.
`escape`:
Stop the processing.
'''
import sys
import json
from os import makedirs
from os.path import join, exists, expanduser, abspath, isdir, isfile, dirname,\
split, splitext, getsize, sep
import logging
from threading import Thread
import time
from functools import partial
import traceback
import subprocess as sp
import tempfile
import re
from re import match, escape, sub
from time import sleep
from collections import defaultdict
from kivy.clock import Clock
from kivy.compat import clock
from kivy.factory import Factory
from kivy.uix.behaviors.knspace import KNSpaceBehavior
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.logger import Logger
from kivy.event import EventDispatcher
from kivy.properties import (NumericProperty, ReferenceListProperty,
ObjectProperty, ListProperty, StringProperty, BooleanProperty,
DictProperty, AliasProperty, OptionProperty, ConfigParserProperty)
from cplcom import config_name
from filers.tools import (str_to_float, pretty_space, pretty_time, KivyQueue,
to_bool, ConfigProperty, byteify)
from filers import root_data_path
__all__ = ('VideoConverter', )
def exit_converter():
c = VideoConverterController.converter_singleton
if c:
try:
c.stop(terminate=True)
except:
pass
try:
c.save_config()
except Exception as e:
Logger.error('Converter: {}'.format(e))
Logger.exception(e)
class VideoConverterController(EventDispatcher):
settings_path = ConfigParserProperty(
join(root_data_path, 'converter.json'), 'Filers',
'converter_settings_path', config_name)
conversion_group_settings = ListProperty([])
conversion_group_widgets = []
converter_singleton = None
converter_view = ObjectProperty(None)
container = ObjectProperty(None)
res_container = ObjectProperty(None)
settings_display = None
current_group_i = None
files = []
processed = 0
processing = False
def __init__(self, **kwargs):
super(VideoConverterController, self).__init__(**kwargs)
VideoConverterController.converter_singleton = self
self.settings_display = Factory.ConverterSettings(controller=self)
self.load_config(self.settings_path)
self.conversion_group_settings = []
self.conversion_group_widgets = []
@staticmethod
def get_window_title():
c = VideoConverterController.converter_singleton
if not c or not c.files or len(c.files) == c.processed:
return ''
s = ' - Converter'
if not c.processed:
s += ' ({})'.format(len(c.files))
else:
s += ' ({}/{})'.format(len(c.processed, c.files))
if not c.processing:
s += ' PAUSED'
return s
def log_error(self, msg=None, e=None, exc_info=None, level='error'):
q = self.converter_view.error_output.queue
l = getattr(Logger, level)
val = msg
if msg:
if e:
val = '{}: {}'.format(msg, repr(e))
l(val)
else:
l(msg)
if exc_info is not None:
Logger.error(e, exc_info=exc_info)
if val:
q.add_item(val)
def load_config(self, filename):
if not isfile(filename):
return
filename = abspath(filename)
for c in self.conversion_group_widgets[:]:
self.delete_group(c)
try:
with open(filename) as fh:
global_opt, convert_opt = json.load(fh)
global_opt, convert_opt = byteify(global_opt), byteify(convert_opt)
for k, v in global_opt.items():
setattr(self, k, v)
for d in convert_opt:
self.add_group(settings=d, show=False)
except Exception as e:
self.log_error(e=e, exc_info=sys.exc_info(), msg='Loading config')
else:
if filename:
self.settings_path = filename
def save_config(self, filename=None):
filename = filename or self.settings_path
if not filename:
return
try:
with open(filename, 'w') as fh:
json.dump(
(self.get_config_dict(), self.conversion_group_settings),
fh, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
self.log_error(e=e, exc_info=sys.exc_info(), msg='Loading config')
else:
if filename:
self.settings_path = filename
def ui_config(self, load, path, selection, filename):
fname = abspath(join(path, filename))
if load:
self.load_config(fname)
else:
self.save_config(fname)
def get_config_dict(self):
attrs = []
return {k: getattr(self, k) for k in attrs}
def add_group(self, settings={}, show=True):
item = ConversionGroup(controller=self)
settings = self.settings_display.get_settings(settings)
self.conversion_group_widgets.append(item)
self.conversion_group_settings.append(settings)
self.container.add_widget(item)
if show:
self.show_settings(item)
def delete_group(self, item):
self.settings_display.dismiss()
i = self.conversion_group_widgets.index(item)
del self.conversion_group_settings[i]
del self.conversion_group_widgets[i]
self.container.remove_widget(item)
def show_settings(self, item):
self.settings_display.item = item
self.settings_display.open()
def stop(self, terminate=False):
pass
def update_item_settings(self, item, src):
pass
class ConversionGroup(KNSpaceBehavior, GridLayout):
controller = ObjectProperty(None, rebind=True)
in_ex_file = StringProperty('')
out_ex_file = StringProperty('')
class ConverterSettings(KNSpaceBehavior, Popup):
controller = ObjectProperty(None, rebind=True)
item = ObjectProperty(None, allownone=True)
def get_settings(self, settings={}):
s = {}
s.update(settings)
return s
def set_settings(self, settings={}):
pass
class VideoConverter(KNSpaceBehavior, GridLayout):
def __init__(self, **kwargs):
super(VideoConverter, self).__init__(**kwargs)
def init(*largs):
self.controller = VideoConverterController(
converter_view=self, container=self.ids.container,
res_container=self.ids.res_container)
Clock.schedule_once(init, 0)
controller = ObjectProperty(None, rebind=True)
"""
unicode_type = unicode if PY2 else str
'''
Unicode type used to convert anything into unicode.
'''
ConfigProperty = partial(ConfigProperty, section='Processor',
config_name=config_name)
'''
A partially initialized :py:class:`~kivy.properties.ConfigParserProperty`.
The section name is `'Processor'` and the Config class name is
:attr:`~filers.config_name`.
'''
class Processor(GridLayout):
'''
See module description.
'''
queue = None
''' The :class:`~filers.tools.KivyQueue` with which we communicate with the
kivy event loop. The work thread sends updates to Kivy with this queue.
Following is the list of queue keys that can be sent, along with their
possible values.
`clean`: None
Sent when the threads starts.
`count`: int
Sent periodically while pre-reading the input files describing
the files read so far. It's a 5-tuple of: # output files,
# input files, # of walked directories, total size of the input
files, and a dictionary where the keys are ignored files, or
extensions types (e.g. .txt) and their values are the number of
times they were ignored.
`count_done`: int
Identical to `count`, except it's sent when the count is done.
`failure`: str
Sent when something went wrong and the threads ends. The
value is a string with the reason for the failure. Upon failure,
the controller should call stop and set itself in stopped mode.
`file_cmd`: str
Sent for every file before it is processed. It's a string
containing the full command with which FFmpeg will be called.
`file_stat`: 11-tuple
Sent after each file that has been processed (e.g. moved)
containing status information. It's a 11-tuple of: the total size
of output files processed, the estimated total size of all the
output files, the total size of input files processed, the total
size of all the input files (can change dynamically as files are
skipped), the total number of input files processed, the count of
all the input files, the total number of output files processed,
the count of all the output files, the estimated bps at which
things are done, the total time elapsed, the estimated time left.
`skipped`: str
Sent when the file is skipped due to error. The
string describes the files involved and the reason.
`done`: None
Sent when the thread has completed it's work.
'''
thread = None
''' The thread that runs our secondary thread. All disk R/W and processing
is done from that thread. See :attr:`process_thread`. Defaults to None.
'''
ffmpeg_path = ''
''' The full path to the FFmpeg executable. To find it, it looks in the
same path that :py:mod:`ffpyplayer` looks for the binaries
(`FFMPEG_ROOT` in os.environ as well as the parent directory of this file).
'''
running = False
''' Whether the thread is running. It is set to True before launching the
thread, and the thread resets it to False before exiting. Defaults to
False. See :attr:`process_thread`.
'''
finish = False
''' When set to True, it signals the thread to terminate. Defaults to
False.
'''
pause = False
''' When set to True, it signals the thread to pause. Setting to False will
un-pause. Defaults to False.
'''
report = ''
''' A text report of the files to be processed and ignored. This is
generated before any processing occurs. Defaults to `''`.
'''
error_list = []
''' A list of text items, each item representing a file that failed to be
processed. It is updated dynamically. Defaults to `[]`.
'''
success_list = []
''' A list of text items, each item representing a file that was
successfully processed. It is updated dynamically. Defaults to `[]`.
'''
input_split_pat = re.compile('''((?:[^,"']|"[^"]*"|'[^']*')+)''')
''' The compiled pattern we use to break apart the list of input files to
process. Defaults to the compiled value of `', *'`.
'''
input = ConfigProperty(u'', 'input', unicode_type)
''' The list of input files and folders to be processed. It is
a comma (plus optional spaces) separated list. File or directory names
that contain a space, should be quoted with `"`. Triple clicking on this
field will launch a file browser.
Defaults to `u''`.
'''
simple_filt = ConfigProperty(True, 'simple_filt', to_bool)
''' Whether the filter we use to filter the input files with
uses the simple common format (where * - match anything, ? match any single
char), if True. If False, it's a python regex string. Defaults to True.
'''
input_filter = ConfigProperty(u'*.avi', 'input_filter', unicode_type)
''' The filter to use to filter the input files. See
:attr:`simple_filt`. Defaults to `'*.avi'`.
'''
group_filt = ConfigProperty(u'', 'group_filt', unicode_type)
''' The matching string parts to remove to get the output
filename. If :attr:`simple_filt` is True, it uses `*` to match any group
of chars, and `?` | |
fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route del default dev eth0")
dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0")
def test_RouteAddRemoveIpv4RouteUnresolvedNeigh(self, dvs, testlog):
self.setup_db(dvs)
self.clear_srv_config(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# set ip address
self.add_ip_address("Ethernet0", "10.0.0.0/31")
self.add_ip_address("Ethernet4", "10.0.0.2/31")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# add route entry -- single nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.1\"")
# add route entry -- multiple nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.3\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "2.2.2.0/24")
self.pdb.wait_for_entry("ROUTE_TABLE", "3.3.3.0/24")
# check for unresolved neighbor entries
self.pdb.wait_for_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:10.0.0.1")
self.pdb.wait_for_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:10.0.0.3")
# check routes does not show up in ASIC_DB
self.check_deleted_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# set ip address and default route
dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0")
dvs.servers[0].runcmd("ip route add default via 10.0.0.0")
dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0")
dvs.servers[1].runcmd("ip route add default via 10.0.0.2")
time.sleep(2)
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "2.2.2.0/24")
self.pdb.wait_for_entry("ROUTE_TABLE", "3.3.3.0/24")
# check neighbor got resolved and removed from NEIGH_RESOLVE_TABLE
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:10.0.0.1")
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:10.0.0.3")
# check ASIC route database
self.check_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 192.168.3.11/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 3.3.3.0/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 192.168.3.11/24 10.0.0.3\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "2.2.2.0/24")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3.3.3.0/24")
# check ASIC route database
self.check_deleted_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# remove ip address
self.remove_ip_address("Ethernet0", "10.0.0.0/31")
self.remove_ip_address("Ethernet4", "10.0.0.2/31")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# remove ip address and default route
dvs.servers[0].runcmd("ip route del default dev eth0")
dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0")
dvs.servers[1].runcmd("ip route del default dev eth0")
dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0")
def test_RouteAddRemoveIpv6RouteUnresolvedNeigh(self, dvs, testlog):
self.setup_db(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# set ip address
self.add_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.add_ip_address("Ethernet4", "2001::1/64")
dvs.runcmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# add route entry -- single nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
# add route entry -- multiple nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fc00:db20:35b:7399::5/64 2001::2\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "3000::/64")
self.pdb.wait_for_entry("ROUTE_TABLE", "4000::/64")
# check for unresolved neighbor entries
self.pdb.wait_for_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:fc00:e968:6179::de52:7100")
self.pdb.wait_for_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:2001::2")
# check routes does not show up in ASIC_DB
self.check_deleted_route_entries(["3000::/64", "4000::/64"])
# set ip address and default route
dvs.servers[0].runcmd("ip -6 address add fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[0].runcmd("ip -6 route add default via fc00:e968:6179::de52:7100")
dvs.servers[1].runcmd("ip -6 address add 2001::2/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route add default via 2001::1")
time.sleep(5)
dvs.servers[0].runcmd("ping -6 -c 1 2001::2")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "3000::/64")
self.pdb.wait_for_entry("ROUTE_TABLE", "4000::/64")
# check neighbor got resolved and removed from NEIGH_RESOLVE_TABLE
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:fc00:e968:6179::de52:7100")
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:2001::2")
# check ASIC route database
self.check_route_entries(["3000::/64", "4000::/64"])
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fc00:db20:35b:7399::5/64 2001::2\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3000::/64")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "4000::/64")
# check ASIC route database
self.check_deleted_route_entries(["3000::/64", "4000::/64"])
# remove ip address
self.remove_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.remove_ip_address("Ethernet4", "2001::1/64")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# remove ip address and default route
dvs.servers[0].runcmd("ip -6 route del default dev eth0")
dvs.servers[0].runcmd("ip -6 address del fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route del default dev eth0")
dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0")
def test_RouteAddRemoveIpv4RouteWithVrf(self, dvs, testlog):
self.setup_db(dvs)
# create vrf
vrf_1_oid = self.create_vrf("Vrf_1")
vrf_2_oid = self.create_vrf("Vrf_2")
# create l3 interface
self.create_l3_intf("Ethernet0", "Vrf_1")
self.create_l3_intf("Ethernet4", "Vrf_1")
self.create_l3_intf("Ethernet8", "Vrf_2")
self.create_l3_intf("Ethernet12", "Vrf_2")
# set ip address
self.add_ip_address("Ethernet0", "10.0.0.0/31")
self.add_ip_address("Ethernet4", "10.0.0.2/31")
self.add_ip_address("Ethernet8", "10.0.0.0/31")
self.add_ip_address("Ethernet12", "10.0.0.2/31")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
self.set_admin_status("Ethernet8", "up")
self.set_admin_status("Ethernet12", "up")
# set ip address and default route
dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0")
dvs.servers[0].runcmd("ip route add default via 10.0.0.0")
dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0")
dvs.servers[1].runcmd("ip route add default via 10.0.0.2")
dvs.servers[2].runcmd("ip address add 10.0.0.1/31 dev eth0")
dvs.servers[2].runcmd("ip route add default via 10.0.0.0")
dvs.servers[3].runcmd("ip address add 10.0.0.3/31 dev eth0")
dvs.servers[3].runcmd("ip route add default via 10.0.0.2")
# get neighbor entry
dvs.servers[0].runcmd("ping -c 1 10.0.0.3")
dvs.servers[2].runcmd("ping -c 1 10.0.0.3")
# add route
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.1 vrf Vrf_1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.1 vrf Vrf_2\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE:Vrf_1", "2.2.2.0/24")
self.pdb.wait_for_entry("ROUTE_TABLE:Vrf_2", "3.3.3.0/24")
# check ASIC route database
self.check_route_entries_with_vrf(["2.2.2.0/24", "3.3.3.0/24"], [vrf_1_oid, vrf_2_oid])
# remove route
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 192.168.3.11/24 10.0.0.1 vrf Vrf_1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 3.3.3.0/24 10.0.0.1 vrf Vrf_2\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE:Vrf_1", "2.2.2.0/24")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE:Vrf_2", "3.3.3.0/24")
# check ASIC route database
self.check_deleted_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# remove ip address
self.remove_ip_address("Ethernet0", "10.0.0.0/31")
self.remove_ip_address("Ethernet4", "10.0.0.2/31")
self.remove_ip_address("Ethernet8", "10.0.0.0/31")
self.remove_ip_address("Ethernet12", "10.0.0.2/31")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.remove_l3_intf("Ethernet8")
self.remove_l3_intf("Ethernet12")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
self.set_admin_status("Ethernet8", "down")
self.set_admin_status("Ethernet12", "down")
# remove vrf
self.remove_vrf("Vrf_1")
self.remove_vrf("Vrf_2")
# remove ip address and default route
dvs.servers[0].runcmd("ip route del default dev eth0")
dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0")
dvs.servers[1].runcmd("ip route del default dev eth0")
dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0")
dvs.servers[2].runcmd("ip route del default dev eth0")
dvs.servers[2].runcmd("ip address del 10.0.0.1/31 dev eth0")
dvs.servers[3].runcmd("ip route del default dev eth0")
dvs.servers[3].runcmd("ip address del 10.0.0.3/31 dev eth0")
@pytest.mark.skip(reason="FRR 7.5 issue https://github.com/Azure/sonic-buildimage/issues/6359")
def test_RouteAddRemoveIpv6RouteWithVrf(self, dvs, testlog):
self.setup_db(dvs)
# create vrf
vrf_1_oid = self.create_vrf("Vrf_1")
vrf_2_oid = self.create_vrf("Vrf_2")
# create l3 interface
self.create_l3_intf("Ethernet0", "Vrf_1")
self.create_l3_intf("Ethernet4", "Vrf_1")
self.create_l3_intf("Ethernet8", "Vrf_2")
self.create_l3_intf("Ethernet12", "Vrf_2")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
self.set_admin_status("Ethernet8", "up")
self.set_admin_status("Ethernet12", "up")
# set ip address
self.add_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.add_ip_address("Ethernet4", "2001::1/64")
self.add_ip_address("Ethernet8", "fc00:e968:6179::de52:7100/64")
self.add_ip_address("Ethernet12", "2001::1/64")
dvs.runcmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# set ip address and default route
dvs.servers[0].runcmd("ip -6 address add fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[0].runcmd("ip -6 route add default via fc00:e968:6179::de52:7100")
dvs.servers[1].runcmd("ip -6 address add 2001::2/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route add default via 2001::1")
dvs.servers[2].runcmd("ip -6 address add fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[2].runcmd("ip -6 route add default via fc00:e968:6179::de52:7100")
dvs.servers[3].runcmd("ip -6 address add 2001::2/64 dev eth0")
dvs.servers[3].runcmd("ip -6 route add default via 2001::1")
time.sleep(2)
# get neighbor entry
dvs.servers[0].runcmd("ping -6 -c 1 2001::2")
dvs.servers[2].runcmd("ping -6 -c 1 2001::2")
# add route
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100 vrf Vrf_1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100 vrf Vrf_2\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE:Vrf_1", "3000::/64")
self.pdb.wait_for_entry("ROUTE_TABLE:Vrf_2", "4000::/64")
# check ASIC route database
self.check_route_entries_with_vrf(["3000::/64", "4000::/64"], [vrf_1_oid, vrf_2_oid])
# remove route
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100 vrf Vrf_1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100 vrf Vrf_2\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE:Vrf_1", "3000::/64")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE:Vrf_2", "4000::/64")
# check ASIC route database
self.check_deleted_route_entries(["3000::/64", "4000::/64"])
# remove ip address
self.remove_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.remove_ip_address("Ethernet4", "2001::1/64")
self.remove_ip_address("Ethernet8", "fc00:e968:6179::de52:7100/64")
self.remove_ip_address("Ethernet12", "2001::1/64")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.remove_l3_intf("Ethernet8")
self.remove_l3_intf("Ethernet12")
# bring down interface
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
self.set_admin_status("Ethernet8", "down")
self.set_admin_status("Ethernet12", "down")
# remove vrf
self.remove_vrf("Vrf_1")
self.remove_vrf("Vrf_2")
# remove ip address and default route
dvs.servers[0].runcmd("ip -6 route del default dev eth0")
dvs.servers[0].runcmd("ip -6 address del fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route del default dev eth0")
dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0")
dvs.servers[2].runcmd("ip -6 route del default dev eth0")
dvs.servers[2].runcmd("ip -6 address del fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[3].runcmd("ip -6 route del default dev eth0")
dvs.servers[3].runcmd("ip -6 address del 2001::2/64 dev eth0")
@pytest.mark.skip(reason="FRR 7.5 issue https://github.com/Azure/sonic-buildimage/issues/6359")
def test_RouteAndNexthopInDifferentVrf(self, dvs, testlog):
self.setup_db(dvs)
# create vrf
vrf_1_oid = self.create_vrf("Vrf_1")
vrf_2_oid = self.create_vrf("Vrf_2")
# create l3 interface
self.create_l3_intf("Ethernet0", "Vrf_1")
self.create_l3_intf("Ethernet4", "Vrf_1")
self.create_l3_intf("Ethernet8", "Vrf_2")
self.create_l3_intf("Ethernet12", "Vrf_2")
# set ip address
self.add_ip_address("Ethernet0", "10.0.0.1/24")
self.add_ip_address("Ethernet4", "10.0.1.1/24")
self.add_ip_address("Ethernet8", "172.16.17.32/24")
self.add_ip_address("Ethernet12", "172.16.31.10/24")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
self.set_admin_status("Ethernet8", "up")
self.set_admin_status("Ethernet12", "up")
# set ip address and default route
dvs.servers[0].runcmd("ip address add 10.0.0.2/24 dev eth0")
dvs.servers[0].runcmd("ip route add default via 10.0.0.1")
dvs.servers[1].runcmd("ip address add 10.0.1.2/24 dev eth0")
dvs.servers[1].runcmd("ip route add default via 10.0.1.1")
dvs.servers[2].runcmd("ip address add 172.16.17.32/24 dev eth0")
dvs.servers[2].runcmd("ip route add default via 172.16.17.32")
dvs.servers[3].runcmd("ip address add 192.168.127.12/24 dev eth0")
dvs.servers[3].runcmd("ip route add default via 172.16.31.10")
# get neighbor entry
dvs.servers[0].runcmd("ping -c 1 10.0.1.2")
dvs.servers[2].runcmd("ping -c 1 192.168.127.12")
# add route
| |
<reponame>davidwych/VoterDataProject
polls = {
'newsint2_baseline' : ('Interest in news and public affairs',
'Some people seem to follow what\'s going on in government and public affairs most of the time, whether there\'s an election going on or not. Others aren\'t that interested. Would you say you follow what\'s going on in government and public affairs ...?',
[[1], [2], [3], [4]],
['Most of the time', 'Some of the time', 'Only now and then', 'Hardly at all']),
'newsint_2016' : ('Political Interest',
'Some people seem to follow what\'s going on in government and public affairs most of the time, whether there\'s an election going on or not. Others aren\'t that interested. Would you say you follow what\'s going on in government and public affairs ...?',
[[1], [2], [3], [4]],
['Most of the time', 'Some of the time', 'Only now and then', 'Hardly at all']),
'track_baseline' : ('Direction of the country',
'Would you say things in the country are...',
[[1], [2], [3]],
['Generally headed in the right direction', 'Off on the wrong track','Not sure']),
'track_2016' : ('Direction of the country (2016)',
'Would you say things in the country are...',
[[1], [2], [3]],
['Generally headed in the right direction', 'Off on the wrong track','Not sure']),
'americatrend_2016' : ('Life in America today for people like R compared to fifty years ago',
'In general, would you say life in America today is better, worse, or about the same as it was fifty years ago for people like you?',
[[1], [2], [3], [4]],
['Better', 'About the same', 'Worse', 'Don\'t Know']),
'wealth_2016' : ('Distribution of money and wealth in this country',
'Do you feel that the distribution of money and wealth in this country is fair, or do you feel that the money and wealth in this country should be more evenly distributed among more people?',
[[1], [2], [8]],
['Distribution is fair', 'Should be more evenly distributed', 'Don\'t know']),
'values_culture_2016' : ('In America, values and culture of people like R are...',
'In America today, do you feel the values and culture of people like you are:',
[[1], [2], [3], [8]],
['Generally becoming more widespread and accepted', 'Holding Steady', 'Generally becoming rarer', 'Don\'t Know']),
'trustgovt_baseline' : ('Trust government to do what\'s right',
'How much of the time do you think you can trust the government in Washington to do what is right?',
[[1], [2], [3]],
['Just about always', 'Most of the time', 'Some of the time']),
'trustgovt_2016' : ('Trust government (2016)',
'How much of the time do you think you can trust the government in Washington to do what is right?',
[[1], [2], [3]],
['Just about always', 'Most of the time', 'Some of the time']),
'trust_people_2016' : ('Most people can/can\'t be trusted',
'Generally speaking, would you say that most people can be trusted or that you can\'t be too careful in dealing with people?',
[[1], [2], [8]],
['Can\'t be too careful in dealing with people', 'Most people can be trusted', 'Don\'t know']),
'helpful_people_2016' : ('People try to be helpful or are they mostly just looking out for themselves',
'Would you say that most of the time people try to be helpful, or that they are mostly just looking out for themselves?',
[[1], [2], [8]],
['People try to be helpful', 'People are looking out for themselves', 'Don\'t know']),
'obamaapp_baseline' : ('Barack Obama Approval',
"Do you approve or dissaprove of the way Barack Obama is handlind his job as president?",
[[1],[2],[3],[4],[5]],
['Stronly approve', 'Somewhat approve', 'Somewhat disapprove', 'Strongly disapprove', 'Not Sure']),
'obamaapp_2016' : ('Barack Obama Approval (2016)',
"Do you approve or dissaprove of the way <NAME> is handlind his job as president?",
[[1],[2],[3],[4],[5]],
['Stronly approve', 'Somewhat approve', 'Somewhat disapprove', 'Strongly disapprove', 'Not Sure']),
'watchtv_baseline' : ('Hours watch TV daily',
"On a typical weekday, how many hours of TV do you watch?",
[[1],[2],[3],[4]],
['None', '1-2 Hours', '3-4 Hours', 'More than 4 hours']),
'ideo5_baseline' : ('Ideology',
"Thinking about politics these days, how would you describe your own political viewpoint?",
[[1],[2],[3],[4],[5],[6]],
['Very Liberal', 'Liberal', 'Moderate', 'Conservative', 'Very Conservative', 'Not Sure']),
'imiss_a_baseline' : ('Issue Importance: Iraq War',
"How important are the following issues to you -- The War in Iraq?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_b_baseline' : ('Issue Importance: The Economy',
"How important are the following issues to you -- The economy?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_c_baseline' : ('Issue Importance: Immigration',
"How important are the following issues to you -- Immigration?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_d_baseline' : ('Issue Importance: The Environment',
"How important are the following issues to you -- The Environment?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_f_baseline' : ('Issue Importance: Terrorism',
"How important are the following issues to you -- Terrorism?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_g_baseline' : ('Issue Importance: Gay Rights',
"How important are the following issues to you -- Gay Rights?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_h_baseline' : ('Issue Importance: Education',
"How important are the following issues to you -- Education?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_j_baseline' : ('Issue Importance: Health Care',
"How important are the following issues to you -- Health Care?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_m_baseline' : ('Issue Importance: Social Security',
"How important are the following issues to you -- Social Security?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_p_baseline' : ('Issue Importance: The Budget Deficit',
"How important are the following issues to you -- The Budget Deficit?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_q_baseline' : ('Issue Importance: The War in Afganistan',
"How important are the following issues to you -- The War in Afganistan?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_r_baseline' : ('Issue Importance: Taxes',
"How important are the following issues to you -- Taxes?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_s_baseline' : ('Issue Importance: Medicare',
"How important are the following issues to you -- Medicare?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_t_baseline' : ('Issue Importance: Abortion',
"How important are the following issues to you -- Abortion?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_a_2016' : ('Issue Importance: Crime (2016)',
"How important are the following issues to you -- Crime?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_b_2016' : ('Issue Importance: The Economy (2016)',
"How important are the following issues to you -- The economy?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_c_2016' : ('Issue Importance: Immigration (2016)',
"How important are the following issues to you -- Immigration?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_d_2016' : ('Issue Importance: The Environment (2016)',
"How important are the following issues to you -- The Environment?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_e_2016' : ('Issue Importance: Religious Liberty (2016)',
"How important are the following issues to you -- Religious Liberty?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_f_2016' : ('Issue Importance: Terrorism (2016)',
"How important are the following issues to you -- Terrorism?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_g_2016' : ('Issue Importance: Gay Rights (2016)',
"How important are the following issues to you -- Gay Rights?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_h_2016' : ('Issue Importance: Education (2016)',
"How important are the following issues to you -- Education?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_h_2016' : ('Issue Importance: Family and Medical Leave (2016)',
"How important are the following issues to you -- Family and Medical Leave?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_j_2016' : ('Issue Importance: Health Care (2016)',
"How important are the following issues to you -- Health Care?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not Very Important', 'Unimportant']),
'imiss_k_2016' : ('Issue Importance: Money in Politics (2016)',
"How important are the following issues to you -- Money in Politics?",
[[1],[2],[3],[4]],
['Very Important', 'Somewhat Important','Not | |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Contains life cycle state encoding class which is
used to generate new life cycle encodings.
"""
import logging as log
import random
from collections import OrderedDict
from Crypto.Hash import cSHAKE128
from lib.common import (check_int, ecc_encode, get_hd, hd_histogram,
is_valid_codeword, random_or_hexvalue, scatter_bits)
# Seed diversification constant for LcStEnc (this enables to use
# the same seed for different classes)
LC_SEED_DIVERSIFIER = 1939944205722120255
# State types and permissible format for entries
# The format is index dependent, e.g. ['0', 'A1', 'B1'] for index 1
LC_STATE_TYPES = {
'lc_state': ['0', 'A{}', 'B{}'],
'lc_cnt': ['0', 'C{}', 'D{}'],
'lc_id_state': ['0', 'E{}', 'F{}']
}
def _is_incremental_codeword(word1, word2):
'''Test whether word2 is incremental wrt word1.'''
if len(word1) != len(word2):
raise RuntimeError('Words are not of equal size')
_word1 = int(word1, 2)
_word2 = int(word2, 2)
# This basically checks that the second word does not
# clear any bits that are set to 1 in the first word.
return ((_word1 & _word2) == _word1)
def _get_incremental_codewords(config, base_ecc, existing_words):
'''Get all possible incremental codewords fulfilling the constraints.'''
base_data = base_ecc[config['secded']['ecc_width']:]
# We only need to spin through data bits that have not been set yet.
# Hence, we first count how many bits are zero (and hence still
# modifyable). Then, we enumerate all possible combinations and scatter
# the bits of the enumerated values into the correct bit positions using
# the scatter_bits() function.
incr_cands = []
free_bits = base_data.count('0')
for k in range(1, 2**free_bits):
# Get incremental dataword by scattering the enumeration bits
# into the zero bit positions in base_data.
incr_cand = scatter_bits(base_data,
format(k, '0' + str(free_bits) + 'b'))
incr_cand_ecc = ecc_encode(config, incr_cand)
# Dataword is correct by construction, but we need to check whether
# the ECC bits are incremental.
if _is_incremental_codeword(base_ecc, incr_cand_ecc):
# Check whether the candidate fulfills the maximum
# Hamming weight constraint.
if incr_cand_ecc.count('1') <= config['max_hw']:
# Check Hamming distance wrt all existing words.
for w in existing_words + [base_ecc]:
if get_hd(incr_cand_ecc, w) < config['min_hd']:
break
else:
incr_cands.append(incr_cand_ecc)
return incr_cands
def _get_new_state_word_pair(config, existing_words):
'''Randomly generate a new incrementally writable word pair'''
while 1:
# Draw a random number and check whether it is unique and whether
# the Hamming weight is in range.
width = config['secded']['data_width']
ecc_width = config['secded']['ecc_width']
base = random.getrandbits(width)
base = format(base, '0' + str(width) + 'b')
base_cand_ecc = ecc_encode(config, base)
# disallow all-zero and all-one states
pop_cnt = base_cand_ecc.count('1')
if pop_cnt >= config['min_hw'] and pop_cnt <= config['max_hw']:
# Check Hamming distance wrt all existing words
for w in existing_words:
if get_hd(base_cand_ecc, w) < config['min_hd']:
break
else:
# Get encoded incremental candidates.
incr_cands_ecc = _get_incremental_codewords(
config, base_cand_ecc, existing_words)
# there are valid candidates, draw one at random.
# otherwise we just start over.
if incr_cands_ecc:
incr_cand_ecc = random.choice(incr_cands_ecc)
log.info('word {}: {}|{} -> {}|{}'.format(
int(len(existing_words) / 2),
base_cand_ecc[ecc_width:], base_cand_ecc[0:ecc_width],
incr_cand_ecc[ecc_width:], incr_cand_ecc[0:ecc_width]))
existing_words.append(base_cand_ecc)
existing_words.append(incr_cand_ecc)
return (base_cand_ecc, incr_cand_ecc)
def _validate_words(config, words):
'''Validate generated words (base and incremental).'''
for k, w in enumerate(words):
# Check whether word is valid wrt to ECC polynomial.
if not is_valid_codeword(config, w):
raise RuntimeError('Codeword {} at index {} is not valid'.format(
w, k))
# Check that word fulfills the Hamming weight constraints.
pop_cnt = w.count('1')
if pop_cnt < config['min_hw'] or pop_cnt > config['max_hw']:
raise RuntimeError(
'Codeword {} at index {} has wrong Hamming weight'.format(
w, k))
# Check Hamming distance wrt to all other existing words.
# If the constraint is larger than 0 this implies uniqueness.
if k < len(words) - 1:
for k2, w2 in enumerate(words[k + 1:]):
if get_hd(w, w2) < config['min_hd']:
raise RuntimeError(
'Hamming distance between codeword {} at index {} '
'and codeword {} at index {} is too low.'.format(
w, k, w2, k + 1 + k2))
def _validate_secded(config):
'''Validate SECDED configuration'''
config['secded'].setdefault('data_width', 0)
config['secded'].setdefault('ecc_width', 0)
config['secded'].setdefault('ecc_matrix', [[]])
config['secded']['data_width'] = check_int(config['secded']['data_width'])
config['secded']['ecc_width'] = check_int(config['secded']['ecc_width'])
total_width = config['secded']['data_width'] + config['secded']['ecc_width']
if config['secded']['data_width'] % 8:
raise RuntimeError('SECDED data width must be a multiple of 8')
if config['secded']['ecc_width'] != len(config['secded']['ecc_matrix']):
raise RuntimeError('ECC matrix does not have correct number of rows')
log.info('SECDED Matrix:')
for i, l in enumerate(config['secded']['ecc_matrix']):
log.info('ECC Bit {} Fanin: {}'.format(i, l))
for j, e in enumerate(l):
e = check_int(e)
if e < 0 or e >= total_width:
raise RuntimeError('ECC bit position is out of bounds')
config['secded']['ecc_matrix'][i][j] = e
def _validate_constraints(config):
'''Validates Hamming weight and distance constraints'''
config.setdefault('min_hw', 0)
config.setdefault('max_hw', 0)
config.setdefault('min_hd', 0)
config['min_hw'] = check_int(config['min_hw'])
config['max_hw'] = check_int(config['max_hw'])
config['min_hd'] = check_int(config['min_hd'])
total_width = config['secded']['data_width'] + config['secded']['ecc_width']
if config['min_hw'] >= total_width or \
config['max_hw'] > total_width or \
config['min_hw'] >= config['max_hw']:
raise RuntimeError('Hamming weight constraints are inconsistent.')
if config['max_hw'] - config['min_hw'] + 1 < config['min_hd']:
raise RuntimeError('Hamming distance constraint is inconsistent.')
def _validate_tokens(config):
'''Validates and hashes the tokens'''
config.setdefault('token_size', 128)
config['token_size'] = check_int(config['token_size'])
# This needs to be byte aligned
if config['token_size'] % 8:
raise ValueError('Size of token {} must be byte aligned'
.format(token['name']))
num_bytes = config['token_size'] // 8
hashed_tokens = []
for token in config['tokens']:
random_or_hexvalue(token, 'value', config['token_size'])
hashed_token = OrderedDict()
hashed_token['name'] = token['name'] + 'Hashed'
data = token['value'].to_bytes(num_bytes, byteorder='little')
# Custom string chosen for life cycle KMAC App interface
custom = 'LC_CTRL'.encode('UTF-8')
hashobj = cSHAKE128.new(data=data, custom=custom)
hashed_token['value'] = int.from_bytes(hashobj.read(num_bytes),
byteorder='little')
hashed_tokens.append(hashed_token)
config['tokens'] += hashed_tokens
def _validate_state_declarations(config):
'''Validates life cycle state and counter declarations'''
for typ in LC_STATE_TYPES.keys():
for k, state in enumerate(config[typ].keys()):
if k == 0:
config['num_' + typ + '_words'] = len(config[typ][state])
log.info('Inferred {} = {}'.format(
'num_' + typ + '_words', config['num_' + typ + '_words']))
if config['num_' + typ + '_words'] != len(config[typ][state]):
raise RuntimeError(
'{} entry {} has incorrect length {}'.format(
typ, state, len(config[typ][state])))
# Render the format templates above.
for j, entry in enumerate(config[typ][state]):
legal_values = [fmt.format(j) for fmt in LC_STATE_TYPES[typ]]
if entry not in legal_values:
raise RuntimeError(
'Illegal entry "{}" found in {} of {}'.format(
entry, state, typ))
def _generate_words(config):
'''Generate encoding words'''
config['genwords'] = {} # dict holding the word pairs for each state type
existing_words = [] # temporary list of all words for uniqueness tests
for typ in LC_STATE_TYPES.keys():
config['genwords'][typ] = []
for k in range(config['num_' + typ + '_words']):
new_word = _get_new_state_word_pair(config, existing_words)
config['genwords'][typ].append(new_word)
# Validate words (this must not fail at this point).
_validate_words(config, existing_words)
# Calculate and store statistics
config['stats'] = hd_histogram(existing_words)
log.info('')
log.info('Hamming distance histogram:')
log.info('')
for bar in config['stats']["bars"]:
log.info(bar)
log.info('')
log.info('Minimum HD: {}'.format(config['stats']['min_hd']))
log.info('Maximum HD: {}'.format(config['stats']['max_hd']))
log.info('Minimum HW: {}'.format(config['stats']['min_hw']))
log.info('Maximum HW: {}'.format(config['stats']['max_hw']))
class LcStEnc():
'''Life cycle state encoding generator class
The constructor expects the parsed configuration
hjson to be passed in.
'''
# This holds the config dict.
config = {}
def __init__(self, config):
'''The constructor validates the configuration dict.'''
log.info('')
log.info('Generate life cycle state')
log.info('')
if 'seed' not in config:
raise RuntimeError('Missing seed in configuration')
if 'secded' not in config:
raise RuntimeError('Missing secded configuration')
if 'tokens' not in config:
raise RuntimeError('Missing token configuration')
for typ in LC_STATE_TYPES.keys():
if typ not in config:
raise RuntimeError('Missing {} definition'.format(typ))
config['seed'] = check_int(config['seed'])
log.info('Seed: {0:x}'.format(config['seed']))
log.info('')
# Re-initialize with seed to make results reproducible.
random.seed(LC_SEED_DIVERSIFIER + int(config['seed']))
log.info('Checking SECDED.')
_validate_secded(config)
log.info('')
log.info('Checking Hamming weight and distance constraints.')
_validate_constraints(config)
log.info('')
log.info('Hashing tokens.')
_validate_tokens(config)
log.info('')
log.info('Checking state declarations.')
_validate_state_declarations(config)
log.info('')
log.info('Generate incremental word encodings.')
_generate_words(config)
self.config = config
log.info('')
log.info('Successfully generated life cycle state.')
log.info('')
def encode(self, name, state):
'''Look up state encoding and return as integer value'''
data_width = self.config['secded']['data_width']
ecc_width = self.config['secded']['ecc_width']
if name not in LC_STATE_TYPES:
raise RuntimeError('Unknown state type {}'.format(name))
if state not in self.config[name]:
raise RuntimeError('Unknown state {} of type {}'.format(
state, name))
# Assemble list of state words
words = []
for j, entry in enumerate(self.config[name][state]):
# This creates an index lookup table
val_idx = {
fmt.format(j): i
for i, fmt in enumerate(LC_STATE_TYPES[name])
}
idx = val_idx[entry]
if idx == 0:
words.append(0)
else:
# Only extract data portion, discard ECC portion
word = self.config['genwords'][name][j][idx - 1][ecc_width:]
words.append(int(word, 2))
# Convert words to one value
outval = 0
for k, word in enumerate(words):
outval += word << | |
<reponame>AlexanderKalistratov/hpat<gh_stars>1-10
import operator
from collections import namedtuple
import pandas as pd
import numpy as np
import numba
from numba import types, cgutils
from numba.extending import (models, register_model, lower_cast, infer_getattr,
type_callable, infer, overload, make_attribute_wrapper, intrinsic,
lower_builtin, overload_method)
from numba.typing.templates import (infer_global, AbstractTemplate, signature,
AttributeTemplate, bound_function)
from numba.targets.imputils import impl_ret_new_ref, impl_ret_borrowed
import hpat
from hpat.hiframes.pd_series_ext import SeriesType
from hpat.str_ext import string_type
from hpat.str_arr_ext import string_array_type
class DataFrameType(types.Type): # TODO: IterableType over column names
"""Temporary type class for DataFrame objects.
"""
def __init__(self, data=None, index=None, columns=None, has_parent=False):
# data is tuple of Array types
# index is Array type (TODO: Index obj)
# columns is tuple of strings
self.data = data
if index is None:
index = types.none
self.index = index
self.columns = columns
# keeping whether it is unboxed from Python to enable reflection of new
# columns
self.has_parent = has_parent
super(DataFrameType, self).__init__(
name="dataframe({}, {}, {}, {})".format(
data, index, columns, has_parent))
def copy(self, index=None, has_parent=None):
# XXX is copy necessary?
if index is None:
index = types.none if self.index == types.none else self.index.copy()
data = tuple(a.copy() for a in self.data)
if has_parent is None:
has_parent = self.has_parent
return DataFrameType(data, index, self.columns, has_parent)
@property
def key(self):
# needed?
return self.data, self.index, self.columns, self.has_parent
def unify(self, typingctx, other):
if (isinstance(other, DataFrameType)
and len(other.data) == len(self.data)
and other.columns == self.columns
and other.has_parent == self.has_parent):
new_index = types.none
if self.index != types.none and other.index != types.none:
new_index = self.index.unify(typingctx, other.index)
elif other.index != types.none:
new_index = other.index
elif self.index != types.none:
new_index = self.index
data = tuple(a.unify(typingctx, b) for a, b in zip(self.data, other.data))
return DataFrameType(
data, new_index, self.columns, self.has_parent)
def can_convert_to(self, typingctx, other):
return
# overload resolution tries to convert for even get_dataframe_data()
# TODO: find valid conversion possibilities
# if (isinstance(other, DataFrameType)
# and len(other.data) == len(self.data)
# and other.columns == self.columns):
# import pdb; pdb.set_trace()
# data_convert = max(a.can_convert_to(typingctx, b)
# for a,b in zip(self.data, other.data))
# if self.index == types.none and other.index == types.none:
# return data_convert
# if self.index != types.none and other.index != types.none:
# return max(data_convert,
# self.index.can_convert_to(typingctx, other.index))
def is_precise(self):
return all(a.is_precise() for a in self.data) and self.index.is_precise()
# TODO: encapsulate in meminfo since dataframe is mutible, for example:
# df = pd.DataFrame({'A': A})
# df2 = df
# if cond:
# df['A'] = B
# df2.A
# TODO: meminfo for reference counting of dataframes
@register_model(DataFrameType)
class DataFrameModel(models.StructModel):
def __init__(self, dmm, fe_type):
n_cols = len(fe_type.columns)
members = [
('data', types.Tuple(fe_type.data)),
('index', fe_type.index),
('columns', types.UniTuple(string_type, n_cols)),
# for lazy unboxing of df coming from Python (usually argument)
# list of flags noting which columns and index are unboxed
# index flag is last
('unboxed', types.UniTuple(types.int8, n_cols + 1)),
('parent', types.pyobject),
]
super(DataFrameModel, self).__init__(dmm, fe_type, members)
make_attribute_wrapper(DataFrameType, 'data', '_data')
make_attribute_wrapper(DataFrameType, 'index', '_index')
make_attribute_wrapper(DataFrameType, 'columns', '_columns')
make_attribute_wrapper(DataFrameType, 'unboxed', '_unboxed')
make_attribute_wrapper(DataFrameType, 'parent', '_parent')
@infer_getattr
class DataFrameAttribute(AttributeTemplate):
key = DataFrameType
def resolve_shape(self, ary):
return types.UniTuple(types.intp, 2)
def resolve_iat(self, ary):
return DataFrameIatType(ary)
def resolve_iloc(self, ary):
return DataFrameILocType(ary)
def resolve_loc(self, ary):
return DataFrameLocType(ary)
def resolve_values(self, ary):
# using np.stack(data, 1) for both typing and implementation
stack_sig = self.context.resolve_function_type(
np.stack, (types.Tuple(ary.data), types.IntegerLiteral(1)), {})
return stack_sig.return_type
@bound_function("df.apply")
def resolve_apply(self, df, args, kws):
kws = dict(kws)
func = args[0] if len(args) > 0 else kws.get('func', None)
# check lambda
if not isinstance(func, types.MakeFunctionLiteral):
raise ValueError("df.apply(): lambda not found")
# check axis
axis = args[1] if len(args) > 1 else kws.get('axis', None)
if (axis is None or not isinstance(axis, types.IntegerLiteral)
or axis.literal_value != 1):
raise ValueError("only apply() with axis=1 supported")
# using NamedTuple instead of Series, TODO: pass Series
Row = namedtuple('R', df.columns)
# the data elements come from getitem of Series to perform conversion
# e.g. dt64 to timestamp in TestDate.test_ts_map_date2
dtypes = []
for arr_typ in df.data:
series_typ = SeriesType(arr_typ.dtype, arr_typ, df.index, True)
el_typ = self.context.resolve_function_type(
operator.getitem, (series_typ, types.int64), {}).return_type
dtypes.append(el_typ)
row_typ = types.NamedTuple(dtypes, Row)
code = func.literal_value.code
f_ir = numba.ir_utils.get_ir_of_code({'np': np}, code)
_, f_return_type, _ = numba.compiler.type_inference_stage(
self.context, f_ir, (row_typ,), None)
return signature(SeriesType(f_return_type), *args)
@bound_function("df.describe")
def resolve_describe(self, df, args, kws):
# TODO: use overload
# TODO: return proper series output
return signature(string_type, *args)
def generic_resolve(self, df, attr):
if attr in df.columns:
ind = df.columns.index(attr)
arr_typ = df.data[ind]
return SeriesType(arr_typ.dtype, arr_typ, df.index, True)
@intrinsic
def init_dataframe(typingctx, *args):
"""Create a DataFrame with provided data, index and columns values.
Used as a single constructor for DataFrame and assigning its data, so that
optimization passes can look for init_dataframe() to see if underlying
data has changed, and get the array variables from init_dataframe() args if
not changed.
"""
n_cols = len(args) // 2
data_typs = tuple(args[:n_cols])
index_typ = args[n_cols]
column_names = tuple(a.literal_value for a in args[n_cols + 1:])
def codegen(context, builder, signature, args):
in_tup = args[0]
data_arrs = [builder.extract_value(in_tup, i) for i in range(n_cols)]
index = builder.extract_value(in_tup, n_cols)
column_strs = [numba.unicode.make_string_from_constant(
context, builder, string_type, c) for c in column_names]
# create dataframe struct and store values
dataframe = cgutils.create_struct_proxy(
signature.return_type)(context, builder)
data_tup = context.make_tuple(
builder, types.Tuple(data_typs), data_arrs)
column_tup = context.make_tuple(
builder, types.UniTuple(string_type, n_cols), column_strs)
zero = context.get_constant(types.int8, 0)
unboxed_tup = context.make_tuple(
builder, types.UniTuple(types.int8, n_cols + 1), [zero] * (n_cols + 1))
dataframe.data = data_tup
dataframe.index = index
dataframe.columns = column_tup
dataframe.unboxed = unboxed_tup
dataframe.parent = context.get_constant_null(types.pyobject)
# increase refcount of stored values
if context.enable_nrt:
context.nrt.incref(builder, index_typ, index)
for var, typ in zip(data_arrs, data_typs):
context.nrt.incref(builder, typ, var)
for var in column_strs:
context.nrt.incref(builder, string_type, var)
return dataframe._getvalue()
ret_typ = DataFrameType(data_typs, index_typ, column_names)
sig = signature(ret_typ, types.Tuple(args))
return sig, codegen
@intrinsic
def has_parent(typingctx, df=None):
def codegen(context, builder, sig, args):
dataframe = cgutils.create_struct_proxy(
sig.args[0])(context, builder, value=args[0])
return cgutils.is_not_null(builder, dataframe.parent)
return signature(types.bool_, df), codegen
# TODO: alias analysis
# this function should be used for getting df._data for alias analysis to work
# no_cpython_wrapper since Array(DatetimeDate) cannot be boxed
@numba.generated_jit(nopython=True, no_cpython_wrapper=True)
def get_dataframe_data(df, i):
def _impl(df, i):
if has_parent(df) and df._unboxed[i] == 0:
# TODO: make df refcounted to avoid repeated unboxing
df = hpat.hiframes.boxing.unbox_dataframe_column(df, i)
return df._data[i]
return _impl
# TODO: use separate index type instead of just storing array
@numba.generated_jit(nopython=True, no_cpython_wrapper=True)
def get_dataframe_index(df):
return lambda df: df._index
@intrinsic
def set_df_index(typingctx, df_t, index_t=None):
"""used in very limited cases like distributed to_csv() to create a new
dataframe with index
"""
# TODO: make inplace when dfs are full objects
def codegen(context, builder, signature, args):
in_df_arg = args[0]
index = args[1]
in_df = cgutils.create_struct_proxy(
signature.args[0])(context, builder, value=in_df_arg)
# create dataframe struct and store values
dataframe = cgutils.create_struct_proxy(
signature.return_type)(context, builder)
dataframe.data = in_df.data
dataframe.index = index
dataframe.columns = in_df.columns
dataframe.unboxed = in_df.unboxed
dataframe.parent = in_df.parent
# increase refcount of stored values
if context.enable_nrt:
context.nrt.incref(builder, index_t, index)
# TODO: refcount
context.nrt.incref(builder, types.Tuple(df_t.data), dataframe.data)
context.nrt.incref(
builder, types.UniTuple(string_type, len(df_t.columns)),
dataframe.columns)
return dataframe._getvalue()
ret_typ = DataFrameType(df_t.data, index_t, df_t.columns)
sig = signature(ret_typ, df_t, index_t)
return sig, codegen
@intrinsic
def set_df_column_with_reflect(typingctx, df, cname, arr):
"""Set df column and reflect to parent Python object
return a new df.
"""
col_name = cname.literal_value
n_cols = len(df.columns)
new_n_cols = n_cols
data_typs = df.data
column_names = df.columns
index_typ = df.index
is_new_col = col_name not in df.columns
col_ind = n_cols
if is_new_col:
data_typs += (arr,)
column_names += (col_name,)
new_n_cols += 1
else:
col_ind = df.columns.index(col_name)
data_typs = tuple((arr if i == col_ind else data_typs[i])
for i in range(n_cols))
def codegen(context, builder, signature, args):
df_arg, _, arr_arg = args
in_dataframe = cgutils.create_struct_proxy(df)(
context, builder, value=df_arg)
data_arrs = [builder.extract_value(in_dataframe.data, i)
if i != col_ind else arr_arg for i in range(n_cols)]
if is_new_col:
data_arrs.append(arr_arg)
column_strs = [numba.unicode.make_string_from_constant(
context, builder, string_type, c) for c in column_names]
zero = context.get_constant(types.int8, 0)
one = context.get_constant(types.int8, 1)
unboxed_vals = [builder.extract_value(in_dataframe.unboxed, i)
if i != col_ind else one for i in range(n_cols)]
if is_new_col:
unboxed_vals.append(one) # for new data array
unboxed_vals.append(zero) # for index
index = in_dataframe.index
# create dataframe struct and store values
out_dataframe = cgutils.create_struct_proxy(
signature.return_type)(context, builder)
data_tup = context.make_tuple(
builder, types.Tuple(data_typs), data_arrs)
column_tup = context.make_tuple(
builder, types.UniTuple(string_type, new_n_cols), column_strs)
unboxed_tup = context.make_tuple(
builder, types.UniTuple(types.int8, new_n_cols + 1), unboxed_vals)
out_dataframe.data = data_tup
out_dataframe.index = index
out_dataframe.columns = column_tup
out_dataframe.unboxed = unboxed_tup
out_dataframe.parent = in_dataframe.parent # TODO: refcount of parent?
# increase refcount of stored values
if context.enable_nrt:
context.nrt.incref(builder, index_typ, index)
for var, typ in zip(data_arrs, data_typs):
context.nrt.incref(builder, typ, var)
for | |
volume %(k)s should be in 'available' status "
"but is in '%(v)s' status."
) % {'k': name_or_id, 'v': volume.status}
raise exc.InvalidSpec(message=msg)
except exc.InternalError as ex:
if reason == 'create':
raise exc.EResourceCreation(type='server',
message=six.text_type(ex))
elif ex.code == 404:
msg = _("The specified volume '%(k)s' could not be found."
) % {'k': name_or_id}
raise exc.InvalidSpec(message=msg)
else:
raise
def do_validate(self, obj):
"""Validate if the spec has provided valid info for server creation.
:param obj: The node object.
"""
# validate availability_zone
az_name = self.properties[self.AVAILABILITY_ZONE]
if az_name is not None:
self._validate_az(obj, az_name)
# validate flavor
flavor = self.properties[self.FLAVOR]
self._validate_flavor(obj, flavor)
# validate image
image = self.properties[self.IMAGE]
if image is not None:
self._validate_image(obj, image)
# validate key_name
keypair = self.properties[self.KEY_NAME]
if keypair is not None:
self._validate_keypair(obj, keypair)
# validate networks
networks = self.properties[self.NETWORKS]
for net in networks:
self._validate_network(obj, net)
return True
def _resolve_bdm(self, obj, bdm, reason=None):
for bd in bdm:
for key in self.BDM2_KEYS:
if bd[key] is None:
del bd[key]
if 'uuid' in bd and 'source_type' in bd:
if bd['source_type'] == 'image':
self._validate_image(obj, bd['uuid'], reason)
elif bd['source_type'] == 'volume':
self._validate_volume(obj, bd['uuid'], reason)
return bdm
def _check_security_groups(self, nc, net_spec, result):
"""Check security groups.
:param nc: network driver connection.
:param net_spec: the specification to check.
:param result: the result that is used as return value.
:returns: None if succeeded or an error message if things go wrong.
"""
sgs = net_spec.get(self.PORT_SECURITY_GROUPS)
if not sgs:
return
res = []
try:
for sg in sgs:
sg_obj = nc.security_group_find(sg)
res.append(sg_obj.id)
except exc.InternalError as ex:
return six.text_type(ex)
result[self.PORT_SECURITY_GROUPS] = res
return
def _check_network(self, nc, net, result):
"""Check the specified network.
:param nc: network driver connection.
:param net: the name or ID of network to check.
:param result: the result that is used as return value.
:returns: None if succeeded or an error message if things go wrong.
"""
if net is None:
return
try:
net_obj = nc.network_get(net)
if net_obj is None:
return _("The specified network %s could not be found.") % net
result[self.NETWORK] = net_obj.id
except exc.InternalError as ex:
return six.text_type(ex)
def _check_port(self, nc, port, result):
"""Check the specified port.
:param nc: network driver connection.
:param port: the name or ID of port to check.
:param result: the result that is used as return value.
:returns: None if succeeded or an error message if things go wrong.
"""
if port is None:
return
try:
port_obj = nc.port_find(port)
if port_obj.status != 'DOWN':
return _("The status of the port %(p)s must be DOWN"
) % {'p': port}
result[self.PORT] = port_obj.id
return
except exc.InternalError as ex:
return six.text_type(ex)
def _check_floating_ip(self, nc, net_spec, result):
"""Check floating IP and network, if specified.
:param nc: network driver connection.
:param net_spec: the specification to check.
:param result: the result that is used as return value.
:returns: None if succeeded or an error message if things go wrong.
"""
net = net_spec.get(self.FLOATING_NETWORK)
if net:
try:
net_obj = nc.network_get(net)
if net_obj is None:
return _("The floating network %s could not be found."
) % net
result[self.FLOATING_NETWORK] = net_obj.id
except exc.InternalError as ex:
return six.text_type(ex)
flt_ip = net_spec.get(self.FLOATING_IP)
if not flt_ip:
return
try:
# Find floating ip with this address
fip = nc.floatingip_find(flt_ip)
if fip:
if fip.status == 'ACTIVE':
return _('the floating IP %s has been used.') % flt_ip
result['floating_ip_id'] = fip.id
# Create a floating IP with address if floating ip unspecified
if not net:
return _('Must specify a network to create floating IP')
result[self.FLOATING_IP] = flt_ip
return
except exc.InternalError as ex:
return six.text_type(ex)
def _validate_network(self, obj, net_spec, reason=None):
def _verify(error):
if error is None:
return
if reason == 'create':
raise exc.EResourceCreation(type='server', message=error)
elif reason == 'update':
raise exc.EResourceUpdate(type='server', id=obj.physical_id,
message=error)
else:
raise exc.InvalidSpec(message=error)
nc = self.network(obj)
result = {}
# check network
net = net_spec.get(self.NETWORK)
error = self._check_network(nc, net, result)
_verify(error)
# check port
port = net_spec.get(self.PORT)
error = self._check_port(nc, port, result)
_verify(error)
if port is None and net is None:
_verify(_("One of '%(p)s' and '%(n)s' must be provided"
) % {'p': self.PORT, 'n': self.NETWORK})
fixed_ip = net_spec.get(self.FIXED_IP)
if fixed_ip:
if port is not None:
_verify(_("The '%(p)s' property and the '%(fip)s' property "
"cannot be specified at the same time"
) % {'p': self.PORT, 'fip': self.FIXED_IP})
result[self.FIXED_IP] = fixed_ip
# Check security_groups
error = self._check_security_groups(nc, net_spec, result)
_verify(error)
# Check floating IP
error = self._check_floating_ip(nc, net_spec, result)
_verify(error)
return result
def _get_port(self, obj, net_spec):
"""Fetch or create a port.
:param obj: The node object.
:param net_spec: The parameters to create a port.
:returns: Created port object and error message.
"""
port_id = net_spec.get(self.PORT, None)
if port_id:
try:
port = self.network(obj).port_find(port_id)
return port, None
except exc.InternalError as ex:
return None, ex
port_attr = {
'network_id': net_spec.get(self.NETWORK),
}
fixed_ip = net_spec.get(self.FIXED_IP, None)
if fixed_ip:
port_attr['fixed_ips'] = [fixed_ip]
security_groups = net_spec.get(self.PORT_SECURITY_GROUPS, [])
if security_groups:
port_attr['security_groups'] = security_groups
try:
port = self.network(obj).port_create(**port_attr)
return port, None
except exc.InternalError as ex:
return None, ex
def _delete_ports(self, obj, ports):
"""Delete ports.
:param obj: The node object
:param ports: A list of internal ports.
:returns: None for succeed or error for failure.
"""
pp = copy.deepcopy(ports)
for port in pp:
# remove port created by senlin
if port.get('remove', False):
try:
# remove floating IP created by senlin
if port.get('floating', None) and port[
'floating'].get('remove', False):
self.network(obj).floatingip_delete(
port['floating']['id'])
self.network(obj).port_delete(port['id'])
except exc.InternalError as ex:
return ex
ports.remove(port)
node_data = obj.data
node_data['internal_ports'] = ports
node_obj.Node.update(self.context, obj.id, {'data': node_data})
def _get_floating_ip(self, obj, fip_spec, port_id):
"""Find or Create a floating IP.
:param obj: The node object.
:param fip_spec: The parameters to create a floating ip
:param port_id: The port ID to associate with
:returns: A floating IP object and error message.
"""
floating_ip_id = fip_spec.get('floating_ip_id', None)
if floating_ip_id:
try:
fip = self.network(obj).floatingip_find(floating_ip_id)
if fip.port_id is None:
attr = {'port_id': port_id}
fip = self.network(obj).floatingip_update(fip, **attr)
return fip, None
except exc.InternalError as ex:
return None, ex
net_id = fip_spec.get(self.FLOATING_NETWORK)
fip_addr = fip_spec.get(self.FLOATING_IP)
attr = {
'port_id': port_id,
'floating_network_id': net_id,
}
if fip_addr:
attr.update({'floating_ip_address': fip_addr})
try:
fip = self.network(obj).floatingip_create(**attr)
return fip, None
except exc.InternalError as ex:
return None, ex
def _create_ports_from_properties(self, obj, networks, action_type):
"""Create or find ports based on networks property.
:param obj: The node object.
:param networks: The networks property used for node.
:param action_type: Either 'create' or 'update'.
:returns: A list of created port's attributes.
"""
internal_ports = obj.data.get('internal_ports', [])
if not networks:
return []
for net_spec in networks:
net = self._validate_network(obj, net_spec, action_type)
# Create port
port, ex = self._get_port(obj, net)
# Delete created ports before raise error
if ex:
d_ex = self._delete_ports(obj, internal_ports)
if d_ex:
raise d_ex
else:
raise ex
port_attrs = {
'id': port.id,
'network_id': port.network_id,
'security_group_ids': port.security_group_ids,
'fixed_ips': port.fixed_ips
}
if self.PORT not in net:
port_attrs.update({'remove': True})
# Create floating ip
if 'floating_ip_id' in net or self.FLOATING_NETWORK in net:
fip, ex = self._get_floating_ip(obj, net, port_attrs['id'])
if ex:
d_ex = self._delete_ports(obj, internal_ports)
if d_ex:
raise d_ex
else:
raise ex
port_attrs['floating'] = {
'id': fip.id,
'floating_ip_address': fip.floating_ip_address,
'floating_network_id': fip.floating_network_id,
}
if self.FLOATING_NETWORK in net:
port_attrs['floating'].update({'remove': True})
internal_ports.append(port_attrs)
if internal_ports:
node_data = obj.data
node_data.update(internal_ports=internal_ports)
node_obj.Node.update(self.context, obj.id, {'data': node_data})
return internal_ports
def _build_metadata(self, obj, usermeta):
"""Build custom metadata for server.
:param obj: The node object to operate on.
:return: A dictionary containing the new metadata.
"""
metadata = usermeta or {}
metadata['cluster_node_id'] = obj.id
if obj.cluster_id:
metadata['cluster_id'] = obj.cluster_id
metadata['cluster_node_index'] = six.text_type(obj.index)
return metadata
def _update_zone_info(self, obj, server):
"""Update the actual zone placement data.
:param obj: The node object associated with this server.
:param server: The server object returned from creation.
"""
if server.availability_zone:
placement = obj.data.get('placement', None)
if not placement:
obj.data['placement'] = {'zone': server.availability_zone}
else:
obj.data['placement'].setdefault('zone',
server.availability_zone)
# It is safe to use admin context here
ctx = context.get_admin_context()
node_obj.Node.update(ctx, obj.id, {'data': obj.data})
def do_create(self, obj):
"""Create a server for the node object.
:param obj: The node object for which a server will be created.
"""
kwargs = {}
for key in self.KEYS:
# context is treated as connection parameters
if key == | |
"""
StarGAN v2
Copyright (c) 2020-present NAVER Corp.
This work is licensed under the Creative Commons Attribution-NonCommercial
4.0 International License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""
import os
import json
from tqdm import tqdm
import ffmpeg
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.utils as vutils
from misc.mask_utils import label2mask
from misc.utils import human_format, scale_image, to_cuda, denorm, create_text
from misc.mask_utils import label2mask, label2mask_plain, scatterMask
import cv2
import warnings
from metrics.attribute_model import AttNet
from metrics.smileSYN import SMILE_SYN as SMILE
from metrics.segmentation_model import MaskNet, bisenet2sean
warnings.filterwarnings('ignore')
ATTRS = [
'skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear',
'ear_r', 'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth',
'hair', 'hat'
]
ATTRS = ['background'] + ATTRS
MASK_LABELS = {
'background': 0,
'skin': 1,
'nose': 2,
'eye_g': 3,
'l_eye': 4,
'r_eye': 5,
'l_brow': 6,
'r_brow': 7,
'l_ear': 8,
'r_ear': 9,
'mouth': 10,
'u_lip': 11,
'l_lip': 12,
'hair': 13,
'hat': 14,
'ear_r': 15,
'neck_l': 16,
'neck': 17,
'cloth': 18
}
def vis_parsing_maps(im,
parsing_anno,
stride,
show=False,
save_im=False,
save_path='vis_results/parsing_map_on_im.jpg',
SEAN_COLORS=False):
# Colors for all 20 parts
if not SEAN_COLORS:
# These colors are the output of the net which differs in the attribute
# order
part_colors = [[255, 255, 255],
[204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255],
[204, 0, 204], [0, 255, 255], [255, 204, 204],
[102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0],
[0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204],
[0, 51, 0], [255, 153, 51],
[0, 204, 0]] # These colors for CelebA_MASK
part_colors = [
part_colors[MASK_LABELS[ATTRS[i]]] for i in range(len(part_colors))
]
# cmap_colors = [(e[0] / 255.0, e[1] / 255.0, e[2] / 255.0) for e in part_colors]
# cm = LinearSegmentedColormap.from_list(
# 'CelebA_Mask', cmap_colors, N=19)
else:
# part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
# [255, 0, 85], [255, 0, 170],
# [0, 255, 0], [85, 255, 0], [170, 255, 0],
# [0, 255, 85], [0, 255, 170],
# [0, 0, 255], [85, 0, 255], [170, 0, 255],
# [0, 85, 255], [0, 170, 255],
# [255, 255, 0], [255, 255, 85], [255, 255, 170],
# [255, 0, 255], [255, 85, 255], [255, 170, 255],
# [0, 255, 255], [85, 255, 255], [170, 255, 255]]
# part_colors = [(e[2], e[1], e[0]) for e in part_colors]
part_colors = [(0, 0, 0), (204, 0, 0), (76, 153, 0), (204, 204, 0),
(51, 51, 255), (204, 0, 204), (0, 255, 255),
(51, 255, 255), (102, 51, 0),
(255, 0, 0), (102, 204, 0), (255, 255, 0), (0, 0, 153),
(0, 0, 204), (255, 51, 153), (0, 204, 204), (0, 51, 0),
(255, 153, 51), (0, 204, 0)]
# part_colors = [(e[2], e[1], e[0]) for e in part_colors]
part_colors.pop(0)
im = np.array(im)
vis_im = im.copy().astype(np.uint8)
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno,
None,
fx=stride,
fy=stride,
interpolation=cv2.INTER_NEAREST)
vis_parsing_anno_color = np.zeros(
(vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3))
for i in range(3):
vis_parsing_anno_color[:, :, i] += part_colors[0][i]
num_of_class = np.max(vis_parsing_anno)
for pi in range(1, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
# print(vis_parsing_anno_color.shape, vis_im.shape)
# return vis_parsing_anno_color
vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4,
vis_parsing_anno_color, 0.6, 0)
return vis_parsing_anno_color, vis_im
def save_json(json_file, filename):
with open(filename, 'w') as f:
json.dump(json_file, f, indent=4, sort_keys=False)
def print_network(network, name):
num_params = 0
for p in network.parameters():
num_params += p.numel()
# print(network)
print("Number of parameters of %s: %i (%s)" %
(name, num_params, human_format(num_params)))
def he_init(module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight,
mode='fan_in',
nonlinearity='relu')
if module.bias is not None:
nn.init.constant_(module.bias, 0)
if isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight,
mode='fan_in',
nonlinearity='relu')
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def denormalize(x):
out = (x + 1) / 2
return out.clamp_(0, 1)
def normalize(x):
out = (x * 2) - 1
return out.clamp_(-1, 1)
def save_image(x, ncol, filename):
x = denormalize(x)
vutils.save_image(x.cpu(), filename, nrow=ncol, padding=0)
def save_img(x, ncol, filename, denorm=True):
from torchvision.utils import save_image
if denorm:
x = denormalize(x)
save_image(x.cpu(), filename, nrow=ncol, padding=0)
@torch.no_grad()
def translate_and_reconstruct(nets,
args,
x_src,
y_src,
x_ref,
y_ref,
filename,
multidomain=(),
mask=None,
translate_all=False,
fill_rgb=False):
if mask is not None:
m_src, m_ref = mask
# style_semantics inverts m_src and x_src
N, C, H, W = x_src.size()
if args.STYLE_SEMANTICS:
fan = nets.FAN.get_heatmap(m_src) if args.FAN else None
else:
fan = nets.FAN.get_heatmap(x_src) if args.FAN else None
if fill_rgb:
# attnet = to_cuda(AttNet(verbose=False))
# masknet = to_cuda(MaskNet(verbose=False))
smile_syn = to_cuda(SMILE(args.image_size, verbose=False))
s_ref = nets.S(x_ref, y_ref)
s_src = nets.S(x_src, y_src)
_, domain = multidomain
domain_str = args.domains[domain] if not translate_all else 'ALL'
if not translate_all:
_s_ref = s_src.clone()
_s_ref[:, domain // 2] = s_ref[:, domain // 2]
s_ref = _s_ref
x_fake = nets.G(x_src, s_ref, fan=fan)
if args.STYLE_SEMANTICS:
if fill_rgb:
x_fake_rec = scatterMask(
label2mask_plain(x_fake)[:, 0], x_fake.size(1))
sty_ref_missing_part = smile_syn.model.encoder(m_ref, x_ref)
rgb = smile_syn.forward_from_tensor(x_fake_rec,
rgb_guide=m_src,
sem_guide=x_src,
style_ref=sty_ref_missing_part,
domain=domain_str)
x_fake = label2mask(x_fake, n=x_fake.size(1))
x_fake = scale_image(rgb, x_fake, None, size=64)
else:
x_fake_rec = scatterMask(
label2mask_plain(x_fake)[:, 0], x_fake.size(1))
x_rec = nets.G(x_fake_rec, s_src, fan=fan)
if fill_rgb:
x_fake_rec = scatterMask(label2mask_plain(x_rec)[:, 0], x_rec.size(1))
sty_ref_missing_part = smile_syn.model.encoder(m_src, x_src)
rgb = smile_syn.forward_from_tensor(x_fake_rec,
rgb_guide=m_src,
sem_guide=x_src,
style_ref=sty_ref_missing_part,
domain=domain_str)
x_rec = label2mask(x_rec, n=x_rec.size(1))
x_rec = scale_image(rgb, x_rec, None, size=64)
x_src = label2mask(x_src, n=x_src.size(1))
x_ref = label2mask(x_ref, n=x_ref.size(1))
x_src = scale_image(denorm(m_src), x_src, None, size=64)
x_ref = scale_image(denorm(m_ref), x_ref, None, size=64)
elif args.STYLE_SEMANTICS:
x_src = ((label2mask(x_src, n=x_src.size(1)) - 0.5) * 2.).clamp_(-1, 1)
x_ref = ((label2mask(x_ref, n=x_ref.size(1)) - 0.5) * 2.).clamp_(-1, 1)
x_fake = ((label2mask(x_fake, n=x_fake.size(1)) - 0.5) * 2.).clamp_(
-1, 1)
x_rec = ((label2mask(x_rec, n=x_rec.size(1)) - 0.5) * 2.).clamp_(-1, 1)
x_concat = [x_src, x_ref, x_fake, x_rec]
x_concat = torch.cat(x_concat, dim=0)
save_img(x_concat.cpu(), N, filename, denorm=not fill_rgb)
@torch.no_grad()
def translate_using_latent(nets,
args,
x_src,
y_trg_list,
z_trg_list,
psi,
filename,
multidomain=(),
mask=None,
translate_all=False,
fill_rgb=False):
if mask is not None:
m_src, m_ref = mask
N, C, H, W = x_src.size()
latent_dim = z_trg_list[0].size(1)
if args.STYLE_SEMANTICS:
x_src_show = ((label2mask(x_src, n=x_src.size(1)) - 0.5) * 2.).clamp_(
-1, 1)
else:
x_src_show = x_src
if fill_rgb:
# attnet = to_cuda(AttNet(verbose=False))
# masknet = to_cuda(MaskNet(verbose=False))
smile_syn = to_cuda(SMILE(args.image_size, verbose=False))
if args.STYLE_SEMANTICS:
x_src_show = scale_image(denorm(m_src),
denorm(x_src_show),
None,
size=64)
x_concat = [x_src_show]
if args.STYLE_SEMANTICS:
fan = nets.FAN.get_heatmap(m_src) if args.FAN else None
else:
fan = nets.FAN.get_heatmap(x_src) if args.FAN else None
y_src, domain = multidomain
domain_str = args.domains[domain] if not translate_all else 'ALL'
s_src = nets.S(x_src, y_src)
for i, y_trg in enumerate(y_trg_list):
z_many = torch.randn(y_trg.size(0) * 1000, latent_dim)
z_many = to_cuda(z_many)
y_many = y_trg.repeat(1000, 1)
s_many = nets.F(z_many, y_many)
s_avg = torch.mean(s_many, dim=0, keepdim=True)
s_avg = s_avg[:, domain // 2].repeat(N, 1)
for count, z_trg in enumerate(z_trg_list):
s_trg = nets.F(z_trg, y_trg)
if not translate_all:
s_trg = torch.lerp(s_avg, s_trg[:, domain // 2], psi)
_s_trg = s_src.clone()
_s_trg[:, domain // 2] = s_trg
s_trg = _s_trg
# import ipdb; ipdb.set_trace()
x_fake = nets.G(x_src, s_trg, fan=fan)
if fill_rgb:
x_fake_smile = scatterMask(
label2mask_plain(x_fake)[:, 0], x_fake.size(1))
# x_fake_smile = ((x_fake_smile - 0.5) * 2.).clamp_(-1, 1)
# import ipdb; ipdb.set_trace()
# rgb = smile_syn.forward_from_tensor(x_fake_smile, style_random=True, random_seed=count, random_across_batch=True, domain=domain_str)
sty_rec = smile_syn.model.encoder(m_src, x_src)
rgb = smile_syn.forward_from_tensor(x_fake_smile,
style_random=True,
random_seed=count,
random_across_batch=True,
style_ref=sty_rec,
domain=domain_str)
x_fake = label2mask(x_fake, n=x_fake.size(1))
x_fake = scale_image(rgb, x_fake, None, size=64)
elif args.STYLE_SEMANTICS:
x_fake = ((label2mask(x_fake, n=x_fake.size(1)) - 0.5) *
2.).clamp_(-1, 1)
x_concat += [x_fake]
x_concat = torch.cat(x_concat, dim=0)
save_img(x_concat.cpu(), N, filename, denorm=not fill_rgb)
# import ipdb; ipdb.set_trace()
@torch.no_grad()
def translate_using_reference(nets,
args,
x_src,
x_ref,
y_ref,
filename,
multidomain=(),
mask=None,
translate_all=False,
fill_rgb=False):
if mask is not None:
m_src, m_ref = mask
if args.STYLE_SEMANTICS:
x_src_show = ((label2mask(x_src, n=x_src.size(1)) - 0.5) * 2.).clamp_(
-1, 1)
x_ref_show = ((label2mask(x_ref, n=x_ref.size(1)) - 0.5) * 2.).clamp_(
-1, 1)
else:
x_src_show = x_src
x_ref_show = x_ref
if fill_rgb:
# attnet = to_cuda(AttNet(verbose=translate_all))
# masknet = to_cuda(MaskNet(verbose=translate_all))
smile_syn = to_cuda(SMILE(args.image_size, verbose=translate_all))
if args.STYLE_SEMANTICS:
# _img = scale_image(denorm(img), label2mask(sem_input[0].unsqueeze(0)), None)
x_src_show = scale_image(denorm(m_src),
denorm(x_src_show),
None,
size=64)
x_ref_show = scale_image(denorm(m_ref),
denorm(x_ref_show),
None,
size=64)
N, C, H, W = x_src_show.size()
wb = to_cuda(torch.ones(1, C, H, W))
x_src_with_wb = torch.cat([wb, x_src_show], dim=0)
if args.STYLE_SEMANTICS:
fan = nets.FAN.get_heatmap(m_src) if args.FAN else None
else:
fan = nets.FAN.get_heatmap(x_src) if args.FAN else None
s_ref = nets.S(x_ref, y_ref)
s_ref_list = s_ref.unsqueeze(1).repeat(1, N, 1, 1)
if x_src.size(0) == 4:
x_concat = []
N -= 1
else:
x_concat = [x_src_with_wb]
y_src, domain = multidomain
domain_str = | |
"""PJ-Link projector driver supporting all PJ-Link compatible projectors over ethernet
"""
import enum
import logging
import sys
from socket import socket, create_connection
from cave.utils import merge_dicts, key_for_value
from cave.drivers.projector import ProjectorInterface, ProjectorPowerState
from cave.errors import (OutOfRangeError, DeviceNotReadyError,
BadCommandError, CommandFailureError)
BUFF_SIZE = 512
logger = logging.getLogger('PJLink')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
file_handler = logging.FileHandler('cave.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class PJLink(ProjectorInterface):
"""A PJLink projector driver based on the PJLink specs v 2.00, dated
2017-1-31 (latest I could find)
https://pjlink.jbmia.or.jp/english/data_cl2/PJLink_5-1.pdf.
For controlling PJLink-compatible projectors over ethernet only.
Class attributes:
----------------
_default_inputs dict[str, bytes]
Default mapping of input names to input codes obtained from the manual.
"""
_default_inputs = {
"RGB_1": b'11',
"RGB_2": b'12',
"RGB_3": b'13',
"VIDEO_1": b'21',
"VIDEO_2": b'22',
"VIDEO_3": b'23',
"DIGITAL_1": b'31',
"DIGITAL_2": b'32',
"DIGITAL_3": b'33',
"STORAGE_1": b'41',
"STORAGE_2": b'42',
"NETWORK": b'51'
}
class Comms(ProjectorInterface.Comms):
"""Communication interface
"""
# socket connection
connection = None
ip_address = None
port = 4352
def send(self, data):
if isinstance(self.connection, socket):
return self.connection.send(data)
def recv(self, size=BUFF_SIZE):
if isinstance(self.connection, socket):
return self.connection.recv(size)
class PJLinkClass(enum.Enum):
"""PJLink class.
Class 2 devices support extended functionality.
"""
CLASS_1 = 1
CLASS_2 = 2
class Command(ProjectorInterface.Command):
"""Command strings.
"""
# All commands are class 1
# - parameterless commands
POWER_ON = b'%1POWR 1\x0d'
POWER_OFF = b'%1POWR 0\x0d'
POWER_STATUS = b'%1POWR ?\x0d'
INPUT_STATUS = b'%1INPT ?\x0d'
INPUT_LIST = b'%1INST ?\x0d'
LAMP_INFO = b'%1LAMP ?\x0d'
GET_ERRORS = b'%1ERST ?\x0d'
GET_CLASS = b'%1CLSS ?\x0d'
GET_MUTED = b'%1AVMT ?\x0d'
GET_MODEL = b'%1NAME ?\x0d'
# - commands with parameters
SWITCH_INPUT = b'%1INPT ' # + input number + \x0d
cmd_errors = {
b'ERR1': 'Unrecognized command',
b'ERR2': 'Parameter out of bounds',
b'ERR3': 'System unavailable',
b'ERR4': 'Failure to execute command'
}
power_state = {
0: 'Standby',
1: 'Power on',
2: 'Cooling',
3: 'Warming up'
}
input_types = {
b'1': 'RGB',
b'2': 'VIDEO',
b'3': 'DIGITAL',
b'4': 'STORAGE',
b'5': 'NETWORK'
}
# - 6 bytes - each byte represents a different type of error. 0 is false, 1 is true
error_codes = {
0: 'Fan error',
1: 'Lamp error',
2: 'Temperature error',
3: 'Lamp cover open',
4: 'Filter warning - clean filter',
5: 'Other (unknown) error'
}
mute_state = {
1: 'Video muted',
2: 'Audio muted',
3: 'All muted'
}
def __init__(self, ip_address=None, port=4352, inputs: dict = None, input_default=None):
"""Constructor
Create a PJLink projector driver instance and initialize a connection to the
projector over TCP (default port 4352).
:param str ip_address: IP address of the device
:param int port: Port to connect to. Defaults to 4352.
:param dict inputs: Custom mapping of input names to byte values.
Mapping should be {str, bytes}. If None, a default mapping is used.
:param str input_default: The default input (if any) to select after setup
"""
self.comms = self.Comms()
try:
if ip_address is not None:
self.comms.connection = create_connection((ip_address, port))
self.comms.ip_address = ip_address
self.comms.port = port
self.comms.connection.close()
# get custom input mapping
if inputs and isinstance(inputs, dict):
self.inputs = merge_dicts(inputs, self._default_inputs)
else:
self.inputs = self._default_inputs
self._input_default = input_default
else:
raise ValueError('no IP address specified')
except Exception as e:
logger.error('__init__(): Exception occurred: {}'.format(e.args), exc_info=True)
sys.exit(1)
def __del__(self):
"""Destructor.
Ensure that if a socket interface was opened, it is closed whenever
we destroy this object
"""
if self.comms.connection:
self.comms.connection.close()
def __cmd(self, cmd=Command.POWER_STATUS, *params):
"""Execute command
Excutes a given command, optionally with parameters and returns any
command output received.
:param PJLink.Command cmd: The command to execute
:param bytes params: Additional parameters to the command.
In reality, this should only be a single bytes object representing
an input terminal as select_input() is the only command with
a parameter.
:rtype: bytes
:returns: The response sent back by the projector
"""
cmd_bytes = cmd.value
if len(params) > 0:
for p in params:
if isinstance(p, bytes):
cmd_bytes += p
elif isinstance(p, str):
cmd_bytes += p.encode()
# all commands end with carriage return
cmd_bytes += b'\x0d'
try:
self.open_connection()
logger.debug('Sending: {}'.format(cmd_bytes.decode()))
self.comms.send(cmd_bytes)
# first thing returned is always some junk
# ("%1PJLINK" followed by 0 or 1 depending on whether authentication is enabled)
junk_data = self.comms.recv(BUFF_SIZE)
result = self.comms.recv(BUFF_SIZE)
logger.debug('Received: {}'.format(result.decode()))
# close the connection after each command
self.comms.connection.close()
if result:
# Check for potential errors and throw appropriate exceptions for them
if b'ERR1' in result:
raise BadCommandError('Error 1: Unrecognized command: {}'.format(cmd_bytes))
elif b'ERR2' in result:
raise OutOfRangeError('Error 2: Parameter out of range: {}'.format(params))
elif b'ERR3' in result:
raise DeviceNotReadyError('Error: Device unavailable. Is it powered on?')
elif b'ERR4' in result:
# Be more specific about this error: If we were attempting to switch inputs,
# it may be that the unit has not fully powered on yet and is still warming up.
if cmd == self.Command.SWITCH_INPUT:
raise CommandFailureError(
"Error: Can't switch input at this time. "
"Wait until projector is fully powered on. "
"(Is the light on?)"
)
else:
raise CommandFailureError(
'Error: Unable to execute command. Is the projector powered on?'
)
return result
except OSError as ose:
# An OSError implies a serious problem: communication is broken.
logger.error('__cmd(): Exception occurred: {}'.format(ose.args), exc_info=True)
# Propagate all exceptions upward and let the application decide what to do with them.
raise ose
except Exception as e:
raise e
finally:
self.close_connection()
def open_connection(self):
if self.comms.ip_address is not None:
self.comms.connection = create_connection(
(self.comms.ip_address, self.comms.port)
)
def close_connection(self):
if self.comms.connection:
self.comms.connection.close()
def get_pjlink_class(self):
"""Get what PJLink class this device supports
:rtype: PJLink.PJLinkClass
"""
try:
result = self.__cmd(cmd=self.Command.GET_CLASS)
except Exception as e:
logger.error('get_pjlink_class(): Exception occurred: {}'.format(e.args))
raise e
else:
data = int(result[7:].rstrip())
return self.PJLinkClass(data)
def power_on(self):
"""Power on the projector
:rtype: bool
:returns: True if successful
"""
try:
result = self.__cmd(cmd=self.Command.POWER_ON)
except Exception as e:
logger.error('power_on(): Exception occurred: {}'.format(e.args))
raise e
else:
return True
def power_off(self):
"""Power off the projector
:rtype: bool
:returns: True if successful
"""
try:
result = self.__cmd(cmd=self.Command.POWER_OFF)
except Exception as e:
logger.error('power_off(): Exception occurred: {}'.format(e.args))
raise e
else:
return True
def get_power_status(self) -> ProjectorPowerState:
"""Get the power state of the projector.
:rtype: ProjectorPowerState
:returns: ProjectorPowerState enum member
"""
try:
result = self.__cmd(cmd=self.Command.POWER_STATUS)
except Exception as e:
logger.error('Exception: {}'.format(e.args))
raise e
else:
# result is '%1POWR=0|1|2|3' for 0=Off, 1=On, 2=Cooling, 3=Warming up
data = int(result[7:].rstrip())
state = self.power_state.get(data, "unknown").casefold()
if "standby" in state:
return ProjectorPowerState.STANDBY
elif "on" in state:
return ProjectorPowerState.ON
elif "cooling" in state:
return ProjectorPowerState.COOLING
elif "warming" in state:
return ProjectorPowerState.WARMING
else:
return ProjectorPowerState.UNKNOWN
@property
def power_status(self):
return self.get_power_status()
def power_toggle(self):
"""Toggle the power on/off
:rtype: bool
:returns: True on success, False on failure such as the projector
being in a cooldown or warmup cycle.
"""
try:
power_status = self.get_power_status()
if power_status is not None:
if power_status == ProjectorPowerState.ON:
return self.power_off()
elif power_status == ProjectorPowerState.STANDBY:
return self.power_on()
elif power_status == ProjectorPowerState.UNKNOWN:
logger.warning('Check connection to projector. Unable to determine power state.')
else:
# status is cooling down or warming up, ignore this request
return False
except Exception as e:
logger.error('Exception: {}'.format(e.args))
raise e
def get_input_status(self):
"""Get the current input terminal
:rtype: str
:returns: Name of the current input terminal shown. Name will
be the one provided in the configuration if present there, otherwise
it will be the driver default name for the input terminal.
"""
try:
result = self.__cmd(cmd=self.Command.INPUT_STATUS)
except Exception as e:
logger.error('get_input_status(): Exception occurred: {}'.format(e.args))
raise e
else:
# result is b'%1INPT=##\r' where ## is the input terminal
data = result[7:].rstrip()
if data in self.inputs.values():
return key_for_value(self.inputs, data)
@property
def input_status(self):
return self.get_input_status()
def select_input(self, input_name):
"""Switch to an input terminal
:param str input_name: The name of the input to select.
:rtype: str
:returns: Name of input selected if successful. Name will be the one
provided in the configuration if present there, otherwise it will
be the driver default name for the input terminal.
"""
try:
if input_name not in self.inputs:
raise KeyError("Error: No input named '{}'".format(input_name))
result = self.__cmd(self.Command.SWITCH_INPUT, self.inputs[input_name])
except Exception as e:
logger.error('select_input(): Exception occurred: {}'.format(e.args))
raise e
else:
return key_for_value(self.inputs, self.inputs[input_name])
def get_lamp_info(self):
"""Get the lamp hours used.
:rtype: dict[str, int] | dict[str, list[int]]
:returns: A dictionary containing a "usage" key mapped | |
<gh_stars>0
# encoding: utf-8
# module Rhino.Input calls itself Input
# from RhinoCommon,Version=5.1.30000.16,Culture=neutral,PublicKeyToken=552281e97c755530
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class GetBoxMode(Enum, IComparable, IFormattable, IConvertible):
"""
Enumerates all Box getter modes.
enum GetBoxMode,values: All (0),Center (4),Corner (1),ThreePoint (2),Vertical (3)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
All = None
Center = None
Corner = None
ThreePoint = None
value__ = None
Vertical = None
class GetResult(Enum, IComparable, IFormattable, IConvertible):
"""
Possible results from GetObject.Get(),GetPoint.Get(),etc...
enum GetResult,values: Angle (20),Cancel (1),Circle (16),Color (5),CustomMessage (14),Cylinder (18),Direction (22),Distance (21),ExitRhino (268435455),Frame (23),Line2d (10),Miss (7),NoResult (0),Nothing (2),Number (4),Object (12),Option (3),Plane (17),Point (8),Point2d (9),Rectangle2d (11),Sphere (19),String (13),Timeout (15),Undo (6),User1 (4294967295),User2 (4294967294),User3 (4294967293),User4 (4294967292),User5 (4294967291)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Angle = None
Cancel = None
Circle = None
Color = None
CustomMessage = None
Cylinder = None
Direction = None
Distance = None
ExitRhino = None
Frame = None
Line2d = None
Miss = None
NoResult = None
Nothing = None
Number = None
Object = None
Option = None
Plane = None
Point = None
Point2d = None
Rectangle2d = None
Sphere = None
String = None
Timeout = None
Undo = None
User1 = None
User2 = None
User3 = None
User4 = None
User5 = None
value__ = None
class RhinoGet(object):
"""
Base class for GetObject,GetPoint,GetSphere,etc.
You will never directly create a RhinoGet but you will use its member
functions after calling GetObject::GetObjects(),GetPoint::GetPoint(),and so on.
Provides tools to set command prompt,set command options,and specify
if the "get" can optionally accept numbers,nothing (pressing enter),
and undo.
"""
@staticmethod
def Get2dRectangle(solidPen, rectangle, rectView):
"""
Get2dRectangle(solidPen: bool) -> (Result,Rectangle,RhinoView)
Gets a rectangle in view window coordinates.
solidPen: If true,a solid pen is used for drawing while the user selects a rectangle.
If
false,a dotted pen is used for drawing while the user selects a rectangle.
Returns: Success or Cancel.
"""
pass
@staticmethod
def GetAngle(
commandPrompt, basePoint, referencePoint, defaultAngleRadians, angleRadians
):
"""
GetAngle(commandPrompt: str,basePoint: Point3d,referencePoint: Point3d,defaultAngleRadians: float) -> (Result,float)
Allows user to interactively pick an angle
commandPrompt: if null,a default prompt will be displayed
"""
pass
@staticmethod
def GetArc(arc):
""" GetArc() -> (Result,Arc) """
pass
@staticmethod
def GetBool(prompt, acceptNothing, offPrompt, onPrompt, boolValue):
"""
GetBool(prompt: str,acceptNothing: bool,offPrompt: str,onPrompt: str,boolValue: bool) -> (Result,bool)
Easy to use bool getter.
prompt: Command prompt.
acceptNothing: If true,the user can press enter.
offPrompt: The 'false/off' message.
onPrompt: The 'true/on' message.
boolValue: Default bool value set to this and returned here.
Returns: The getter result based on user choice.
Commands.Result.Success - got
value.Commands.Result.Nothing - user pressed enter.Commands.Result.Cancel - user cancelled value
getting.
"""
pass
@staticmethod
def GetBox(
box, mode=None, basePoint=None, prompt1=None, prompt2=None, prompt3=None
):
"""
GetBox(mode: GetBoxMode,basePoint: Point3d,prompt1: str,prompt2: str,prompt3: str) -> (Result,Box)
Asks the user to select a Box in the viewport.
mode: A particular "get box" mode,or Rhino.Input.GetBoxMode.All.
basePoint: Optional base point. Supply Point3d.Unset if you don't want to use this.
prompt1: Optional first prompt. Supply null to use the default prompt.
prompt2: Optional second prompt. Supply null to use the default prompt.
prompt3: Optional third prompt. Supply null to use the default prompt.
Returns: Commands.Result.Success if successful.
GetBox() -> (Result,Box)
Asks the user to select a Box in the viewport.
Returns: Commands.Result.Success if successful.
"""
pass
@staticmethod
def GetCircle(circle):
""" GetCircle() -> (Result,Circle) """
pass
@staticmethod
def GetColor(prompt, acceptNothing, color):
"""
GetColor(prompt: str,acceptNothing: bool,color: Color) -> (Result,Color)
Easy to use color getter.
prompt: Command prompt.
acceptNothing: If true,the user can press enter.
color: Color value returned here. also used as default color.
Returns: Commands.Result.Success - got color.Commands.Result.Nothing - user pressed
enter.Commands.Result.Cancel - user cancel color getting.
"""
pass
@staticmethod
def GetFileName(mode, defaultName, title, parent):
""" GetFileName(mode: GetFileNameMode,defaultName: str,title: str,parent: IWin32Window) -> str """
pass
@staticmethod
def GetFileNameScripted(mode, defaultName):
""" GetFileNameScripted(mode: GetFileNameMode,defaultName: str) -> str """
pass
@staticmethod
def GetGrip(grip, prompt):
""" GetGrip(prompt: str) -> (Result,GripObject) """
pass
@staticmethod
def GetGrips(grips, prompt):
""" GetGrips(prompt: str) -> (Result,Array[GripObject]) """
pass
@staticmethod
def GetHelix(helix):
""" GetHelix() -> (Result,NurbsCurve) """
pass
@staticmethod
def GetInteger(
prompt, acceptNothing, outputNumber, lowerLimit=None, upperLimit=None
):
"""
GetInteger(prompt: str,acceptNothing: bool,outputNumber: int,lowerLimit: int,upperLimit: int) -> (Result,int)
Easy to use number getter.
prompt: The command prompt.
acceptNothing: If true,the user can press enter.
outputNumber: default number is set to this value and number value returned here.
lowerLimit: The minimum allowed value.
upperLimit: The maximum allowed value.
Returns: Commands.Result.Success - got number
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel number getting.
GetInteger(prompt: str,acceptNothing: bool,outputNumber: int) -> (Result,int)
Easy to use number getter.
prompt: command prompt.
acceptNothing: if true,the user can press enter.
outputNumber: default number is set to this value and number value returned here.
Returns: Commands.Result.Success - got number
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel number getting.
"""
pass
@staticmethod
def GetLine(line):
""" GetLine() -> (Result,Line) """
pass
@staticmethod
def GetLinearDimension(dimension):
""" GetLinearDimension() -> (Result,LinearDimension) """
pass
@staticmethod
def GetMultipleObjects(prompt, acceptNothing, filter, rhObjects):
"""
GetMultipleObjects(prompt: str,acceptNothing: bool,filter: GetObjectGeometryFilter) -> (Result,Array[ObjRef])
Easy to use object getter for getting multiple objects.
prompt: command prompt.
acceptNothing: if true,the user can press enter.
filter: geometry filter to use when getting objects.
Returns: Commands.Result.Success - got object
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel object getting.
GetMultipleObjects(prompt: str,acceptNothing: bool,filter: ObjectType) -> (Result,Array[ObjRef])
Easy to use object getter for getting multiple objects.
prompt: command prompt.
acceptNothing: if true,the user can press enter.
filter: geometry filter to use when getting objects.
Returns: Commands.Result.Success - got object
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel object getting.
"""
pass
@staticmethod
def GetNumber(
prompt, acceptNothing, outputNumber, lowerLimit=None, upperLimit=None
):
"""
GetNumber(prompt: str,acceptNothing: bool,outputNumber: float) -> (Result,float)
Easy to use number getter.
prompt: command prompt.
acceptNothing: if true,the user can press enter.
outputNumber: default number is set to this value and number value returned here.
Returns: Commands.Result.Success - got number
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel number getting.
GetNumber(prompt: str,acceptNothing: bool,outputNumber: float,lowerLimit: float,upperLimit: float) -> (Result,float)
Easy to use number getter.
prompt: The command prompt.
acceptNothing: If true,the user can press Enter.
outputNumber: Default number is set to this value and the return number value is assigned to this variable
during the call.
lowerLimit: The minimum allowed value.
upperLimit: The maximum allowed value.
Returns: Commands.Result.Success - got number.Commands.Result.Nothing - user pressed
enter.Commands.Result.Cancel - user cancel number getting.
"""
pass
@staticmethod
def GetOneObject(prompt, acceptNothing, filter, *__args):
"""
GetOneObject(prompt: str,acceptNothing: bool,filter: GetObjectGeometryFilter) -> (Result,ObjRef)
Easy to use object getter.
prompt: command prompt.
acceptNothing: if true,the user can press enter.
filter: geometry filter to use when getting objects.
Returns: Commands.Result.Success - got object
Commands.Result.Nothing - user pressed enter
Commands.Result.Cancel - user cancel object getting.
GetOneObject(prompt: str,acceptNothing: bool,filter: ObjectType) -> (Result,ObjRef)
Easy to use | |
Sample Details will become a Description"\
"\n4) Selected Documents will become TextSources"\
"\n4a) Any Document content will become a plain text file"\
"\n4b) Any Document notes will become Descriptions"\
"\n5) Codes will becoming Codes"\
"\n5a) Quotations will not be included"\
"\n5b) Any Code notes will become Descriptions"\
"\n5c) Selected Documents of a code will become PlainTextSelections that cover the full Documents"\
"\n5d) Selected Lines of Documents of a code will become PlainTextSelections that cover the selected text"\
"\n6) Themes will become Sets"\
"\n6a) All Themes will lose hierarchy information and be treated as independent"\
"\n6b) Quotations will not be included"\
"\n6c) Any Theme notes will become Descriptions"\
"\n6d) Code References will become MemberCodes"
EXPORT_PROJECT_SUCCESS = "Workspace was successfully exported as an REFI-QDA Project."
EXPORT_PROJECT_ERROR_NO_DATA = "Workspace has no data to export as a Project."
EXPORT_PROJECT_ERROR_XML = "XML Error Occured when checking created project."
EXPORT_PROJECT_ERROR_IO = "Cannot save specified project file.\nPlease check that you have write access to directory and if replacing a file it is not locked."
#About dialog labels
ABOUT_LABEL = ABOUT
ABOUT_VERSION_LABEL = "Version: "
ABOUT_OSF_LABEL = "OSF link"
ABOUT_OSF_URL = "https://osf.io/b72dm/"
ABOUT_GITHUB_LABEL = "Github link"
ABOUT_GITHUB_URL = "https://github.com/rpgauthier/ComputationalThematicAnalysisToolkit"
#new version dialog labels
NEW_VERSION_LABEL = "New Version Available"
CURRENT_VERSION_LABEL = "The toolkit you are using is Version "
LATEST_VERSION_LABEL = " is avaliable for download."
APP_INSTRUCTIONS = "To upgrade please perform the following steps:"
APP_INSTRUCTION1 = "1) Download the approriate installer from "
LATEST_RELEASE_LABEL = "Latest Release"
LATEST_RELEASE_URL = "https://github.com/rpgauthier/ComputationalThematicAnalysisToolkit/releases/latest"
APP_INSTRUCTION2 = "2) Close this application"
APP_INSTRUCTION3 = "3) Run the downloaded installer"
WORKSPACE_INSTRUCTIONS = "Once installed any workspace you load will be automatically upgraded."
#Module Labels
GENERAL_LABEL = "General"
COLLECTION_LABEL = "Data Collection"
FILTERING_LABEL = "Data Cleaning & Filtering"
FILTERING_MENU_LABEL = "Data Cleaning && Filtering"
SAMPLING_LABEL = "Modelling & Sampling"
SAMPLING_MENU_LABEL = "Modelling && Sampling"
CODING_LABEL = "Coding"
REVIEWING_LABEL = "Reviewing"
REPORTING_LABEL = "Reporting"
NOTES_LABEL = "Notes"
TWITTER_LABEL = "Twitter"
MULTIPROCESSING_LABEL = "Multiprocessing Options"
MAXIMUM_POOL_SIZE_LABEL = "Maximum Pool Size"
class Datasets(Common):
#common
DESCRIPTION = "Description"
DOCUMENT_NUM = "# of Documents"
RETRIEVED_ON = "Retrieved On"
IMPORTED_ON = "Imported On"
PREPARED_ON = "Prepared On"
LANGUAGE = "Language"
UTC = "UTC"
CUSTOMIZE_LABEL_FIELDS = "Customize Label Fields"
CUSTOMIZE_COMPUTATIONAL_FIELDS = "Customize Computational Fields"
DATASET_DELETE_TOOLTIP = "Delete this dataset from the workspace"
REFRESHING_DATASETS_BUSY_STEP = "Refreshing Data for Dataset: "
#Common Dialog
DATASET_CONFIGURATIONS = "Dataset Configurations"
NAME_TOOLTIP = "Choose a unique name for the new dataset"
NAME_EXISTS_ERROR = "Name must be unique"
TYPE_ERROR = "Please select a Dataset Type"
DATA_CONSTRAINTS = "Data Constraints"
START_DATE = "Start Date"
#START_DATETIME = "Start Date & Time"
START_DATE_TOOLTIP = "Needs to be less than of equal to End Date"
END_DATE = "End Date"
#END_DATETIME = "End Date & Time"
END_DATE_TOOLTIP = "Needs to be greater than of equal to Start Date"
DATE_ERROR = "Start Date needs to be before End Date"
SPECIAL_DATA_FIELDS = "Special Data Fields"
ETHICAL_CONSIDERATIONS = "Ethical Considerations"
LABEL_FIELDS = "Label Fields"
LABEL_FIELDS_TOOLTIP = "Choose additional fields you need to use when identifying and interpreting the data."\
"\nIf a field occurs multiple times for the same id, the first occurance will be used."
COMBINED_LABEL_FIELDS = "Combined Label Fields"
COMBINED_LABEL_FIELDS_TOOLTIP = "Choose additional fields you need to use when identifying and interpreting the data."\
"\nIf a field occurs multiple times for the same id, it's content's will be concatinated."
COMPUTATIONAL_FIELDS = "Computational Fields"
COMPUTATIONAL_FIELDS_TOOLTIP = "Choose fields you want computational methods to use when identifing samples of interest from the data."\
"\nIf a field occurs multiple times for the same id, the first occurance will be used."
COMBINED_COMPUTATIONAL_FIELDS = "Combined Computational Fields"
COMBINED_COMPUTATIONAL_FIELDS_TOOLTIP = "Choose fields you want machine learning to use when identifing samples of interest from the data."\
"\nIf a field occurs multiple times for the same id, it's content's will be concatinated."
#Reddit Specific Dialog
REDDIT_RETRIEVE_LABEL = "Retrieve New Reddit Dataset"
REDDIT_RETRIEVED_LABEL = "Retrieved Reddit Dataset Details"
REDDIT_LABEL = "Reddit"
REDDIT_SUBREDDIT = "www.reddit.com/r/"
REDDIT_SUBREDDIT_TOOLTIP = "Exact case-sensitive spelling of the subreddit for retrieval."\
"\nIf you require multiple subreddits in the same dataset then seperate the subreddit names by comma."
REDDIT_SUBREDDIT_MISSING_ERROR = "Please enter a Subreddit."
REDDIT_SEARCH_BY = "Search by"
REDDIT_CONTAINS_TEXT = "Contains Text"
REDDIT_DISCUSSIONS = "Discussions"
REDDIT_DISCUSSIONS_TOOLTIP = "Will group any Reddit submissions and/or comments retrieved into discussions"
REDDIT_SUBMISSIONS = "Submissions"
REDDIT_SUBMISSIONS_TOOLTIP = "Will retrieve any Reddit submissions between the start and end dates"
REDDIT_COMMENTS = "Comments"
REDDIT_COMMENTS_TOOLTIP = "Will retrieve any Reddit comments between the start and end dates"
REDDIT_SUBMISSIONS_NUM = "# of Submissions"
REDDIT_COMMENTS_NUM = "# of Comments"
REDDIT_ARCHIVED_TOOLTIP = "Use the local subreddit archive to create the dataset."
REDDIT_ARCHIVED = "Local Subreddit Archive"
REDDIT_UPDATE_PUSHSHIFT = "Local Subreddit Archive updated using Pushshift.io"
REDDIT_UPDATE_PUSHSHIFT_TOOLTIP = "For any part of the period between the start and end dates that the local subreddit archive does not have data,"\
"update the archive using pushshift.io API"\
"\nThen use the archive to create the dataset."\
"\nWARNING: This operation may take between several minutes to hours depending on sizze of existing local subreddit archive"
REDDIT_FULL_PUSHSHIFT = "Full retrieval from Pushshift.io"
REDDIT_FULL_PUSHSHIFT_TOOLTIP = "Remove any existing local subreddit archive."\
"Then retrieve a new archive from pushshift.io API for the period between the start and end dates."\
"Then use the archive to create the dataset"\
"\nWARNING: This operation is a slow and may take several hours"
REDDIT_UPDATE_REDDITAPI = "Local Subreddit Archive and updated using Pushshift.io and Reddit API"
REDDIT_UPDATE_REDDITAPI_TOOLTIP = "For any part of the period between the start and end dates that the local subreddit archive does not have data,"\
"update the archive using pushshift.io API"\
"Then update the local subreddit archive for the period between the start and end dates using the Reddit API."\
"Then use the updated archive to create the dataset"\
"\nWARNING: This operation is slow and may take several hours"
REDDIT_FULL_REDDITAPI = "Full retrieved from Pushshift.io and updated using Reddit API"
REDDIT_FULL_REDDITAPI_TOOLTIP = "Remove any existing local subreddit archive."\
"Then retrieve a new archive from pushshift.io API for the period between the start and end dates."\
"Then update the archive for the period between the start and end dates using the Reddit API."\
"Then use the updated archive to create the dataset"\
"\nWARNING: This operation is slow and may take several hours"
#Twitter Specific Dialog
TWITTER_RETRIEVE_LABEL = "Retrieve New Twitter Dataset"
TWITTER_RETRIEVED_LABEL = "Retrieved Twitter Dataset Details"
TWITTER_LABEL = "Twitter"
CONSUMER_KEY_TOOLTIP = "The API key of a project created in the Twitter Developer portal. Do not include quotes."
CONSUMER_SECRET_TOOLTIP = "The API secret of a project created in the Twitter Developer portal. Do not include quotes."
TWITTER_TWEETS = "Tweets"
TWITTER_TWEETS_NUM = "# of Tweets"
TWITTER_QUERY = "Query"
TWITTER_QUERY_HYPERLINK = "https://developer.twitter.com/en/docs/twitter-api/v1/tweets/search/guides/standard-operators"
TWITTER_QUERY_TOOLTIP = "Query needs to follow these rules: " + TWITTER_QUERY_HYPERLINK
TWITTER_QUERY_RADIOBUTTON_TOOLTIP = "Use a Twitter query to create the dataset, using these rules: " + TWITTER_QUERY_HYPERLINK
TWITTER_QUERY_MISSING_ERROR = "You need to enter a query."
TWITTER_QUERY_PLACEHOLDER = "ex. life OR technology from:google"
TWITTER_TWEET_ATTRIBUTES = "Tweet Attributes"
TWITTER_TWEET_ATTRIBUTES_RADIOBUTTON_TOOLTIP = "Use specific tweet attributes to create the dataset."
TWITTER_KEYWORDS = "Keywords"
TWITTER_KEYWORDS_PLACEHOLDER = "ex. COVID vaccine, health, safety"
TWITTER_HASHTAGS = "Hashtags"
TWITTER_HASHTAGS_PLACEHOLDER = "ex. #toronto, #raptors"
TWITTER_ACCOUNTS = "Accounts"
TWITTER_ACCOUNT_PLACEHOLDER = "ex. JustinTrudeau"
#CSV Specific Dialog
CSV_RETRIEVE_LABEL = "Retrieve New CSV Dataset"
CSV_RETRIEVED_LABEL = "Retrieved CSV Dataset Details"
CSV_LABEL = "CSV"
CSV_ROWS_NUM = "# of Rows"
CSV_DATASETFIELD = "Dataset Field"
CSV_DATASETFIELD_TOOLTIP = "If the csv file contains data from a single source leave this field blank."\
"If the csv file contains multiple sources please choose a field differentiates those sources.\n"\
"This is important if dealing with multiple languages as different processing may be required."
CSV_IDFIELD = "Id Field"
CSV_IDFIELD_TOOLTIP = "Choose a field to use as id to documents.\n"\
"If id is unique then every row will be treated as a document.\n"\
"If id is not unique, rows with the same id will be | |
Get random bucket data.
:param buckets: The list of buckets.
:param min_count: The minimum number of samples that will be sampled if no exact count is given.
:param max_count: The maximum number of samples that will be sampled if no exact count is given.
:param bucket_counts: For each bucket an optional exact example count can be given. If it is not given it will be
sampled.
:return: The random source, target and label arrays.
"""
if bucket_counts is None:
bucket_counts = [None for _ in buckets]
bucket_counts = [random.randint(min_count, max_count) if given_count is None else given_count
for given_count in bucket_counts]
source = [mx.nd.array(np.random.randint(0, 10, (count, random.randint(1, bucket[0]), 1))) for count, bucket in
zip(bucket_counts, buckets)]
target = [mx.nd.array(np.random.randint(0, 10, (count, random.randint(2, bucket[1]), 1))) for count, bucket in
zip(bucket_counts, buckets)]
return source, target
def test_parallel_data_set():
buckets = data_io.define_parallel_buckets(100, 100, 10, 1, 1.0)
source, target = _get_random_bucketed_data(buckets, min_count=0, max_count=5)
def check_equal(arrays1, arrays2):
assert len(arrays1) == len(arrays2)
for a1, a2 in zip(arrays1, arrays2):
assert np.array_equal(a1.asnumpy(), a2.asnumpy())
with TemporaryDirectory() as work_dir:
dataset = data_io.ParallelDataSet(source, target)
fname = os.path.join(work_dir, 'dataset')
dataset.save(fname)
dataset_loaded = data_io.ParallelDataSet.load(fname)
check_equal(dataset.source, dataset_loaded.source)
check_equal(dataset.target, dataset_loaded.target)
def test_parallel_data_set_fill_up():
batch_size = 32
buckets = data_io.define_parallel_buckets(100, 100, 10, 1, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=1, max_count=5))
dataset_filled_up = dataset.fill_up(bucket_batch_sizes)
assert len(dataset_filled_up.source) == len(dataset.source)
assert len(dataset_filled_up.target) == len(dataset.target)
for bidx in range(len(dataset)):
bucket_batch_size = bucket_batch_sizes[bidx].batch_size
assert dataset_filled_up.source[bidx].shape[0] == bucket_batch_size
assert dataset_filled_up.target[bidx].shape[0] == bucket_batch_size
def test_get_permutations():
data = [list(range(3)), list(range(1)), list(range(7)), []]
bucket_counts = [len(d) for d in data]
permutation, inverse_permutation = data_io.get_permutations(bucket_counts)
assert len(permutation) == len(inverse_permutation) == len(bucket_counts) == len(data)
for d, p, pi in zip(data, permutation, inverse_permutation):
p = p.asnumpy().astype(np.int)
pi = pi.asnumpy().astype(np.int)
p_set = set(p)
pi_set = set(pi)
assert len(p_set) == len(p)
assert len(pi_set) == len(pi)
assert p_set - pi_set == set()
if d:
d = np.array(d)
assert (d[p][pi] == d).all()
else:
assert len(p_set) == 1
def test_parallel_data_set_permute():
batch_size = 5
buckets = data_io.define_parallel_buckets(100, 100, 10, True, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5)).fill_up(
bucket_batch_sizes)
permutations, inverse_permutations = data_io.get_permutations(dataset.get_bucket_counts())
assert len(permutations) == len(inverse_permutations) == len(dataset)
dataset_restored = dataset.permute(permutations).permute(inverse_permutations)
assert len(dataset) == len(dataset_restored)
for buck_idx in range(len(dataset)):
num_samples = dataset.source[buck_idx].shape[0]
if num_samples:
assert (dataset.source[buck_idx] == dataset_restored.source[buck_idx]).asnumpy().all()
assert (dataset.target[buck_idx] == dataset_restored.target[buck_idx]).asnumpy().all()
else:
assert not dataset_restored.source[buck_idx]
assert not dataset_restored.target[buck_idx]
def test_get_batch_indices():
max_bucket_size = 50
batch_size = 10
buckets = data_io.define_parallel_buckets(100, 100, 10, True, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets=buckets,
min_count=1,
max_count=max_bucket_size))
indices = data_io.get_batch_indices(dataset, bucket_batch_sizes=bucket_batch_sizes)
# check for valid indices
for buck_idx, start_pos in indices:
assert 0 <= buck_idx < len(dataset)
assert 0 <= start_pos < len(dataset.source[buck_idx]) - batch_size + 1
# check that all indices are used for a filled-up dataset
dataset = dataset.fill_up(bucket_batch_sizes)
indices = data_io.get_batch_indices(dataset, bucket_batch_sizes=bucket_batch_sizes)
all_bucket_indices = set(list(range(len(dataset))))
computed_bucket_indices = set([i for i, j in indices])
assert not all_bucket_indices - computed_bucket_indices
@pytest.mark.parametrize("buckets, expected_default_bucket_key",
[([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], (50, 50)),
([(5, 10), (10, 20), (15, 30), (25, 50), (20, 40)], (25, 50))])
def test_get_default_bucket_key(buckets, expected_default_bucket_key):
default_bucket_key = data_io.get_default_bucket_key(buckets)
assert default_bucket_key == expected_default_bucket_key
get_parallel_bucket_tests = [([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 50, 50, 4, (50, 50)),
([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 50, 10, 4, (50, 50)),
([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 20, 10, 1, (20, 20)),
([(10, 10)], 20, 10, None, None),
([], 20, 10, None, None),
([(10, 11)], 11, 10, None, None),
([(11, 10)], 11, 10, 0, (11, 10))]
@pytest.mark.parametrize("buckets, source_length, target_length, expected_bucket_index, expected_bucket",
get_parallel_bucket_tests)
def test_get_parallel_bucket(buckets, source_length, target_length, expected_bucket_index, expected_bucket):
bucket_index, bucket = data_io.get_parallel_bucket(buckets, source_length, target_length)
assert bucket_index == expected_bucket_index
assert bucket == expected_bucket
@pytest.mark.parametrize("sources, targets, expected_num_sents, expected_mean, expected_std",
[([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]],
[[[1, 1, 1], [2, 2, 2], [3, 3, 3]]], 3, 1.0, 0.0),
([[[1, 1], [2, 2], [3, 3]]],
[[[1, 1, 1], [2, 2, 2], [3, 3, 3]]], 3, 1.5, 0.0),
([[[1, 1, 1], [2, 2], [3, 3, 3, 3, 3, 3, 3]]],
[[[1, 1, 1], [2], [3, 3, 3]]], 2, 0.75, 0.25)])
def test_calculate_length_statistics(sources, targets, expected_num_sents, expected_mean, expected_std):
length_statistics = data_io.calculate_length_statistics(sources, targets, 5, 5)
assert len(sources[0]) == len(targets[0])
assert length_statistics.num_sents == expected_num_sents
assert np.isclose(length_statistics.length_ratio_mean, expected_mean)
assert np.isclose(length_statistics.length_ratio_std, expected_std)
@pytest.mark.parametrize("sources, targets",
[
([[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
[[1, 1, 1], [2, 2], [3, 3, 3]]],
[[[1, 1, 1], [2, 2, 2], [3, 3, 3]]])
])
def test_non_parallel_calculate_length_statistics(sources, targets):
with pytest.raises(SockeyeError):
data_io.calculate_length_statistics(sources, targets, 5, 5)
def test_get_training_data_iters():
train_line_count = 100
train_line_count_empty = 0
train_max_length = 30
dev_line_count = 20
dev_max_length = 30
expected_mean = 1.0
expected_std = 0.0
test_line_count = 20
test_line_count_empty = 0
test_max_length = 30
batch_size = 5
num_source_factors = num_target_factors = 1
with tmp_digits_dataset("tmp_corpus",
train_line_count, train_line_count_empty, train_max_length - C.SPACE_FOR_XOS,
dev_line_count, dev_max_length - C.SPACE_FOR_XOS,
test_line_count, test_line_count_empty,
test_max_length - C.SPACE_FOR_XOS) as data:
# tmp common vocab
vcb = vocab.build_pruned_vocab(vocab.build_from_paths([data['train_source'], data['train_target']]))
train_iter, val_iter, config_data, data_info = data_io.get_training_data_iters(
sources=[data['train_source']],
targets=[data['train_target']],
validation_sources=[data['dev_source']],
validation_targets=[data['dev_target']],
source_vocabs=[vcb],
target_vocabs=[vcb],
source_vocab_paths=[None],
target_vocab_paths=[None],
shared_vocab=True,
batch_size=batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
max_seq_len_source=train_max_length,
max_seq_len_target=train_max_length,
bucketing=True,
bucket_width=10)
assert isinstance(train_iter, data_io.ParallelSampleIter)
assert isinstance(val_iter, data_io.ParallelSampleIter)
assert isinstance(config_data, data_io.DataConfig)
assert data_info.sources == [data['train_source']]
assert data_info.targets == [data['train_target']]
assert data_info.source_vocabs == [None]
assert data_info.target_vocabs == [None]
assert config_data.data_statistics.max_observed_len_source == train_max_length
assert config_data.data_statistics.max_observed_len_target == train_max_length
assert np.isclose(config_data.data_statistics.length_ratio_mean, expected_mean)
assert np.isclose(config_data.data_statistics.length_ratio_std, expected_std)
assert train_iter.batch_size == batch_size
assert val_iter.batch_size == batch_size
assert train_iter.default_bucket_key == (train_max_length, train_max_length)
assert val_iter.default_bucket_key == (dev_max_length, dev_max_length)
assert train_iter.dtype == 'float32'
# test some batches
bos_id = vcb[C.BOS_SYMBOL]
eos_id = vcb[C.EOS_SYMBOL]
expected_first_target_symbols = np.full((batch_size, 1), bos_id, dtype='float32')
for epoch in range(2):
while train_iter.iter_next():
batch = train_iter.next()
assert isinstance(batch, data_io.Batch)
source = batch.source.asnumpy()
target = batch.target.asnumpy()
label = batch.labels[C.TARGET_LABEL_NAME].asnumpy() # TODO: still 2-shape: (batch, length)
length_ratio_label = batch.labels[C.LENRATIO_LABEL_NAME].asnumpy()
assert source.shape[0] == target.shape[0] == label.shape[0] == batch_size
assert source.shape[2] == target.shape[2] == num_source_factors == num_target_factors
# target first symbol should be BOS
# each source sequence contains one EOS symbol
assert np.sum(source == eos_id) == batch_size
assert np.array_equal(target[:, 0], expected_first_target_symbols)
# label first symbol should be 2nd target symbol
assert np.array_equal(label[:, 0], target[:, 1, 0])
# each label sequence contains one EOS symbol
assert np.sum(label == eos_id) == batch_size
train_iter.reset()
def _data_batches_equal(db1: data_io.Batch, db2: data_io.Batch) -> bool:
equal = True
equal = equal and np.allclose(db1.source.asnumpy(), db2.source.asnumpy())
equal = equal and np.allclose(db1.source_length.asnumpy(), db2.source_length.asnumpy())
equal = equal and np.allclose(db1.target.asnumpy(), db2.target.asnumpy())
equal = equal and np.allclose(db1.target_length.asnumpy(), db2.target_length.asnumpy())
equal = equal and db1.labels.keys() == db2.labels.keys()
equal = equal and db1.samples == db2.samples
equal = equal and db1.tokens == db2.tokens
return equal
def test_parallel_sample_iter():
batch_size = 2
buckets = data_io.define_parallel_buckets(100, 100, 10, True, 1.0)
# The first bucket is going to be empty:
bucket_counts = [0] + [None] * (len(buckets) - 1)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
it = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
with TemporaryDirectory() as work_dir:
# Test 1
it.next()
expected_batch = it.next()
fname = os.path.join(work_dir, "saved_iter")
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 2
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 3
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
while it.iter_next():
it.next()
it_loaded.next()
assert not it_loaded.iter_next()
def test_sharded_parallel_sample_iter():
batch_size = 2
buckets = data_io.define_parallel_buckets(100, 100, 10, 1, 1.0)
# The first bucket is going to be empty:
bucket_counts = [0] + [None] * (len(buckets) - 1)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_type=C.BATCH_TYPE_SENTENCE,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset1 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
dataset2 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
with TemporaryDirectory() as work_dir:
shard1_fname = os.path.join(work_dir, 'shard1')
shard2_fname = os.path.join(work_dir, 'shard2')
dataset1.save(shard1_fname)
dataset2.save(shard2_fname)
shard_fnames = [shard1_fname, shard2_fname]
it = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes)
# Test 1
it.next()
expected_batch = it.next()
fname = os.path.join(work_dir, "saved_iter")
it.save_state(fname)
it_loaded = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 2
it.reset()
expected_batch = it.next()
| |
#!/usr/bin/env python
# coding: utf-8
# # Analyzing Student's Behavior and Model suggestion for classification levels
# ### <NAME>
# > #### This Data Science project was made under Capstone Data Science IBM Certification Program.
# ## Table of contents
# * [Introduction: Business Problem](#introduction)
# * [Data](#data)
# * [Methodology](#methodology)
# * [Analysis](#analysis)
# * [Results and Discussion](#results)
# * [Conclusion](#conclusion)
# # 1. Introduction <a name="introduction"></a>
# A description of the problem and a discussion of the background
#
# The Internet revolution brought more than social medias and faster information exchanges. It brought also a generation of people who studies through the digital environments. Under this context, the online education evolved quickly and the transformation of the societies really started. Nowadays, people in distant places, poor countries can benefit from technology to achieve information and in this case, the Massive Open Online Courses, MOOCs had a major role.
# MOOCs can join people all around the world to achieve understand in a wide range of areas, delivering science and culture.
#
# It is known, also, that online learning suffers massive unenrollment. The logical border and the lack of motivation can make the students leave. Under this context, what are the related features which causes it? How understand the student scenario and predict his churn or low grades?
# I think that is a relevant point. If MOOCs platforms achieve student understanding and predicting, I think it's possible to menage the student's churn and find a way to give them the needed motivation.
#
# With this set in mind, I started a search for MOOCs generated Students Data to investigate and prepare some conclusions about the theme.
#
# # 2. Data
# A description of the data and how it will be used to solve the problem
#
# To guide my investigation, I was looking for a Set to help to understand the student's behavior, motivation and correlated characteristics in order to better understand why or how is the result of an enrollment. So, it is important to find a dataset with some key features like grade, gender, enrollment levels, and so on. Location data is also important to understand cultural marks, which will be explored by locations APIs.
# Guided by the analysis exploration, I'll be able to build a model to predict student's behavior or results.
# After querying correlated datasets in order to find those with better columns, I found a nice DataSet from Kaggle called "Students' Academic Performance Dataset". You can check it here https://www.kaggle.com/aljarah/xAPI-Edu-Data.
# <p> The data compounds 16 columns with aggregated informations about over 480 students of a Learning Platform called Kalboard360. The datails will be shown next section.
#
# ## 2.1 Data Structure
# As previously mentioned, this dataset includes 16 columns:
#
# 1. Gender - student's gender (nominal: 'Male' or 'Female’)
#
# 2. Nationality- student's nationality (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 3. Place of birth- student's Place of birth (nominal:’ Kuwait’,’ Lebanon’,’ Egypt’,’ SaudiArabia’,’ USA’,’ Jordan’,’ Venezuela’,’ Iran’,’ Tunis’,’ Morocco’,’ Syria’,’ Palestine’,’ Iraq’,’ Lybia’)
#
# 4. Educational Stages- educational level student belongs (nominal: ‘lowerlevel’,’MiddleSchool’,’HighSchool’)
#
# 5. Grade Levels- grade student belongs (nominal: ‘G-01’, ‘G-02’, ‘G-03’, ‘G-04’, ‘G-05’, ‘G-06’, ‘G-07’, ‘G-08’, ‘G-09’, ‘G-10’, ‘G-11’, ‘G-12 ‘)
#
# 6. Section ID- classroom student belongs (nominal:’A’,’B’,’C’)
#
# 7. Topic- course topic (nominal:’ English’,’ Spanish’, ‘French’,’ Arabic’,’ IT’,’ Math’,’ Chemistry’, ‘Biology’, ‘Science’,’ History’,’ Quran’,’ Geology’)
#
# 8. Semester- school year semester (nominal:’ First’,’ Second’)
#
# 9. Parent responsible for student (nominal:’mom’,’father’)
#
# 10. Raised hand- how many times the student raises his/her hand on classroom (numeric:0-100)
#
# 11. Visited resources- how many times the student visits a course content(numeric:0-100)
#
# 12. Viewing announcements-how many times the student checks the new announcements(numeric:0-100)
#
# 13. Discussion groups- how many times the student participate on discussion groups (numeric:0-100)
#
# 14. Parent Answering Survey- parent answered the surveys which are provided from school or not (nominal:’Yes’,’No’)
#
# 15. Parent School Satisfaction- the Degree of parent satisfaction from school(nominal:’Yes’,’No’)
#
# 16. Student Absence Days-the number of absence days for each student (nominal: above-7, under-7)
#
# The most important characteristic of this dataset is that it has included the parent's data, which is a nice approach to understand the student.
# # 3. Methodology
#
# The first steps are the data exploration and insight-taking approach in order to better understand the data and the columns. The purpose of this exploratory analysis is to identify hidden features and understand the relations between the features.
# Next, I'll do a descritive analysis by building a dataset for a clustering algorithm. This way, the data understanding will become a more powerfull decision making, focused on student's behaviors.
# Finally, I'll create a my predictive analysis by building a dataset with the best features for a supervised learning algorithm to predict the student's beahvior under certain conditions, which will achieve my final objective.
# # 4. Analysis
# As mentioned, this section will understand the data in order to compose the clustering dataset.
# ### 4.1 Exploratory Analysis
# In[110]:
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# In[111]:
dataset = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv")
dataset.head(5)
# In the context to understand the student and his results, setting up a dataframe with certain columns
# In[112]:
df = dataset[['gender','PlaceofBirth','StageID','Topic','raisedhands','VisITedResources','AnnouncementsView','Discussion', 'ParentAnsweringSurvey','ParentschoolSatisfaction','StudentAbsenceDays', 'Class']]
df.head()
# Try to understand the results from countries
# In[113]:
df.groupby(['ParentschoolSatisfaction'])['Class'].value_counts(normalize=True)
# In[114]:
df.groupby(['ParentAnsweringSurvey'])['ParentschoolSatisfaction'].value_counts(normalize=True)
# It seems that parents which aren't envolved in answering the scholar's surveys are likely to become unsatisfied with the School. This can mean that well informed parents can better understand the student's enrollment and reality and are better satisfied.
# ### Question: What is the relation between active parents and student's classification?
# In[115]:
df.groupby(['ParentAnsweringSurvey'])['Class'].value_counts(normalize=True)
# So, definitively parent's active behavior has an important role on student's growth.
# ## Understanding student's behavior
# Next, it is important to know what characteristics are linked to students sucess. So, we're going to test the features related.
# In[116]:
df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']]
df2.head()
# ### Question: What's the relation between raising hands and classification?
# In[117]:
df2['raisedhands'] = pd.cut(df2.raisedhands, bins=3, labels=np.arange(3), right=False)
df2.groupby(['raisedhands'])['Class'].value_counts(normalize=True)
# So, it seems that students which has low levels of raising hands are most likely to have Low classification. In the otherside, high frequency of raising hands are linked to higher classification.
# Next, we're going to check the act of visiting the course resources.
# In[118]:
df2['VisITedResources'] = pd.cut(df2.VisITedResources, bins=3, labels=np.arange(3), right=False)
df2.groupby(['VisITedResources'])['Class'].value_counts(normalize=True)
# Low levels of resource exploring means lower levels of classification. High levels of visiting resources are linked to higher classification.
# In[119]:
df2['AnnouncementsView'] = pd.cut(df2.AnnouncementsView, bins=3, labels=np.arange(3), right=False)
df2.groupby(['AnnouncementsView'])['Class'].value_counts(normalize=True)
# The act of visualizing the announcements makes the students more prepared for the tasks and they are most likely to plan the assessments of the week. High visualization frequency is lined, indeed, to better classifications.
# In[120]:
df2['Discussion'] = pd.cut(df2.Discussion, bins=3, labels=np.arange(3), right=False)
df2.groupby(['Discussion'])['Class'].value_counts(normalize=True)
# Suprisingly, discussion frequency is weakly linked to higher results, at least, directly. Of course, there are higher interactions levels ocrring with Higher graded students but the data shows that discussion is a secondary act.
# Concluding this step on analysis, we're going to understand the absence rate with the grade level
# In[121]:
df2.groupby(['StudentAbsenceDays'])['Class'].value_counts(normalize=True)
# As expected, the lower the absence of the student, the higher tends to become their classification. Let's keep this feature.
# ### 4.1.1 Clustering DataSet
# Now that we know what are the important features to understand the student's behavior and classification, we're going to build a dataset for a K-Means algorithm, which will show the student's cluster.
# To make the construction process easiest to understand, we're going to reimplement the dataset building phases.
# In[122]:
df2 = dataset[['gender','raisedhands','VisITedResources','AnnouncementsView','Discussion','StudentAbsenceDays', 'Class']]
df2.tail()
# Let's identify the correlations between the student's actions
# In[123]:
correlation = df2[['raisedhands','VisITedResources','AnnouncementsView','Discussion']].corr(method='pearson')
correlation
# This made clear that our best correlated features are raisedHands and visitedResources, which will compose our model dataset further.
# So, we need an <b>one hot encoding</b> on columns gender,absence and class
# In[124]:
df2 = pd.concat([df2,pd.get_dummies(df2['gender'], prefix='gender_')], axis=1)
df2 = pd.concat([df2,pd.get_dummies(df2['StudentAbsenceDays'], prefix='absence_')], axis=1)
df2 = pd.concat([df2,pd.get_dummies(df2['Class'], prefix='class_')], axis=1)
df2.drop(['gender'], axis = 1,inplace=True)
df2.drop(['StudentAbsenceDays'], axis = | |
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = | |
<reponame>kontexia/sogam<gh_stars>0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from src.am_graph import AMGraph, EdgeType, Por, NodeId, NodeKey, get_node_id
from src.normalise_amgraph import NormaliseAMGraph
from typing import Optional, Set, Tuple
import random
NeuronId = NodeId
""" unique identifier for a neuron within the gas """
NeuronKey = NodeKey
""" json compatible unique identifier of neuron within the gas"""
class AMGas(object):
def __init__(self,
fabric_name: str,
domain: str,
anomaly_threshold_factor: float = 4.0,
fast_alpha: float = 0.7,
prune_threshold: float = 0.01,
audit: bool = False,
normalise: bool = True,
delete_old_neurons: bool = False):
self.fabric_name: str = fabric_name
self.domain: str = domain
self.anomaly_threshold_factor: float = anomaly_threshold_factor
self.fast_alpha = fast_alpha
self.slow_alpha = 1 - fast_alpha
self.anomaly_threshold: float = 0.0
self.anomalies: dict = {}
self.prune_threshold: float = prune_threshold
self.neural_gas: AMGraph = AMGraph(directional=False)
self.update_id: int = 0
self.next_neuron_id: int = 0
self.last_bmu_key: Optional[NeuronKey] = None
self.ema_error: Optional[float] = None
self.ema_variance: float = 0.0
self.motif_threshold: float = 0.0
self.motifs: dict = {}
self.search_edge_types: set = set()
self.audit: bool = audit
self.updated: bool = True
self.normaliser: Optional[NormaliseAMGraph] = None
self.delete_old_neurons = delete_old_neurons
if normalise:
self.normaliser = NormaliseAMGraph()
def to_dict(self, denormalise: bool = False) -> dict:
d_gas = {'fabric_name': self.fabric_name,
'domain': self.domain,
'fast_alpha': self.fast_alpha,
'slow_alpha': self.slow_alpha,
'prune_threshold': self.prune_threshold,
'update_id': self.update_id,
'next_neuron_id': self.next_neuron_id,
'last_bmu_key': self.last_bmu_key,
'ema_error': self.ema_error,
'ema_variance': self.ema_variance,
'anomaly_threshold_factor': self.anomaly_threshold_factor,
'anomaly_threshold': self.anomaly_threshold,
'anomalies': self.anomalies,
'motif_threshold': self.motif_threshold,
'motifs': self.motifs,
'search_edge_types': self.search_edge_types,
'audit': self.audit,
'normaliser': None,
'delete_old_neurons': self.delete_old_neurons
}
if self.normaliser is not None:
d_gas['normaliser'] = self.normaliser.to_dict()
if denormalise:
d_gas['neural_gas'] = self.normaliser.denormalise(graph=self.neural_gas).to_dict(denormaliser=self.normaliser)
else:
d_gas['neural_gas'] = self.neural_gas.to_dict()
else:
d_gas['neural_gas'] = self.neural_gas.to_dict()
return d_gas
def from_dict(self, d_gas: dict, normalise: bool = False):
self.fabric_name = d_gas['fabric_name']
self.domain = d_gas['domain']
self.fast_alpha = d_gas['fast_alpha']
self.slow_alpha = d_gas['slow_alpha']
self.prune_threshold = d_gas['prune_threshold']
self.next_neuron_id = d_gas['next_neuron_id']
self.last_bmu_key = d_gas['last_bmu_key']
self.ema_error = d_gas['ema_error']
self.ema_variance = d_gas['ema_variance']
self.anomaly_threshold_factor = d_gas['anomaly_threshold_factor']
self.anomaly_threshold = d_gas['anomaly_threshold']
self.anomalies = d_gas['anomalies']
self.motif_threshold = d_gas['motif_threshold']
self.motifs = d_gas['motifs']
self.search_edge_types = d_gas['search_edge_types']
self.audit = d_gas['audit']
self.delete_old_neurons = d_gas['delete_old_neurons']
if d_gas['normaliser'] is not None:
self.normaliser = NormaliseAMGraph(normalise_amgraph=d_gas['normaliser'])
if normalise:
self.neural_gas = self.normaliser.normalise(graph=AMGraph(directional=False, graph=d_gas['neural_gas']))
else:
self.neural_gas = AMGraph(directional=False, graph=d_gas['neural_gas'])
else:
self.normaliser = None
self.neural_gas = AMGraph(directional=False, graph=d_gas['neural_gas'])
def update_gas_error(self, bmu_key: NeuronKey, bmu_distance: float, ref_id: str) -> Tuple[bool, bool]:
# update the ema error and variance using the slow_alpha
#
if self.ema_error is None:
self.ema_error = bmu_distance
else:
self.ema_error += (bmu_distance - self.ema_error) * self.slow_alpha
self.ema_variance += (pow((bmu_distance - self.ema_error), 2) - self.ema_variance) * self.slow_alpha
# record breaches of anomaly threshold
#
report: dict = {'bmu_key': bmu_key, 'mapped': self.update_id, 'error': bmu_distance, 'ref_id': ref_id}
anomaly = False
motif = False
# determine if anomaly or motif detected
#
if bmu_distance > self.anomaly_threshold:
self.anomalies[str(self.update_id)] = report
anomaly = True
elif self.motif_threshold is not None and bmu_distance <= self.motif_threshold:
self.motifs[str(self.update_id)] = report
motif = True
# update threshold for next training data
#
stdev = pow(self.ema_variance, 0.5)
self.anomaly_threshold = self.ema_error + (self.anomaly_threshold_factor * stdev)
self.motif_threshold = max(self.ema_error - (2.0 * stdev), 0.0)
return anomaly, motif
def calc_communities(self):
self.neural_gas.calc_communities(community_edge_type='NN', weight_field='_numeric', inverse=True)
def add_neuron(self, graph: AMGraph, distance_threshold: float = 0.0) -> NeuronKey:
neuron_id = f'{self.next_neuron_id}'
self.next_neuron_id += 1
update_id = str(self.update_id)
neuron_key = self.neural_gas.set_node(node=('NEURON', neuron_id),
domain=self.domain,
update_id=update_id,
threshold=distance_threshold,
ema_error=None,
generalised_graph=graph,
n_bmu=1,
last_bmu=update_id,
n_runner_up=0,
last_runner_up=None,
activation=1.0,
# set the learning rate for the next update
#
learn_rate=self.fast_alpha
)
self.last_bmu_key = neuron_key
return neuron_key
def train(self,
training_graph: AMGraph,
ref_id: str,
search_edge_types: Set[EdgeType],
learn_edge_types: Set[EdgeType]) -> Por:
por: Por = {'fabric': self.fabric_name,
'domain': self.domain,
'ref_id': ref_id,
'bmu_key': None,
'bmu_distance': None,
'bmu_distance_threshold': 0.0,
'new_neuron_key': None,
'nn_neurons': [],
'anomaly': False,
'motif': False,
'ema_error': self.ema_error,
'ema_variance': self.ema_variance,
'anomaly_threshold': self.anomaly_threshold,
'motif_threshold': self.motif_threshold,
'deleted_neuron_key': None
}
self.update_id += 1
self.updated = True
self.search_edge_types.update(search_edge_types)
if self.normaliser is not None:
t_graph, renormalise = self.normaliser.normalise(graph=training_graph)
else:
t_graph = AMGraph(graph=training_graph)
renormalise = False
# if there are no neurons then add first one
#
if len(self.neural_gas.nodes) == 0:
# neuron will have a distance threshold of 0.0 which will force the next training data point to be represented in another neuron
#
new_neuron_key = self.add_neuron(graph=t_graph, distance_threshold=0.0)
por['new_neuron_key'] = new_neuron_key
else:
if renormalise:
# renormalise all existing neurons - use comprehension to make it quicker
#
_ = [self.normaliser.renormalise(graph=self.neural_gas.nodes[neuron_key]['generalised_graph'], create_new=False)
for neuron_key in self.neural_gas.nodes]
# calc the distance of the training graph to the existing neurons
#
distances = [(neuron_key,
self.neural_gas.nodes[neuron_key]['generalised_graph'].compare_graph(graph_to_compare=t_graph,
compare_edge_types=self.search_edge_types),
self.neural_gas.nodes[neuron_key]['n_bmu'])
for neuron_key in self.neural_gas.nodes]
# sort in ascending order of distance and descending order of number of times bmu
#
distances.sort(key=lambda x: (x[1][0], -x[2]))
# the bmu is the closest and thus the top of the list
#
bmu_key = distances[0][0]
bmu_id = get_node_id(node_key=bmu_key)
bmu_distance = distances[0][1][0]
por['bmu_key'] = bmu_key
por['bmu_distance'] = bmu_distance
por['bmu_distance_threshold'] = self.neural_gas.nodes[bmu_key]['threshold']
# if the distance is larger than the neuron's threshold then add a new neuron
#
if bmu_distance > self.neural_gas.nodes[bmu_key]['threshold']:
# add new neuron
# distance threshold is mid point between new neuron and bmu
#
distance_threshold = bmu_distance / 2.0
new_neuron_key = self.add_neuron(graph=t_graph, distance_threshold=distance_threshold)
new_neuron_id = get_node_id(node_key=new_neuron_key)
por['new_neuron_key'] = new_neuron_key
# connect the new neuron to the bmu
#
self.neural_gas.set_edge(triple=(bmu_id, ('NN', None, None), new_neuron_id), numeric=bmu_distance)
# increase the distance threshold of the existing (bmu neuron) if required
#
if distance_threshold > self.neural_gas.nodes[bmu_key]['threshold']:
self.neural_gas.nodes[bmu_key]['threshold'] = distance_threshold
if self.delete_old_neurons:
# get first neuron that has aged enough to be deleted
#
neuron_to_deactivate = []
for neuron_key in self.neural_gas.nodes:
if neuron_key not in [new_neuron_key, bmu_key]:
# decay the activation with rate the depends on its current learn_rate and the slow_alpha
#
self.neural_gas.nodes[neuron_key]['activation'] -= (self.neural_gas.nodes[neuron_key]['activation'] * self.slow_alpha * self.neural_gas.nodes[neuron_key]['learn_rate'])
if self.neural_gas.nodes[neuron_key]['activation'] < self.prune_threshold:
neuron_to_deactivate.append(neuron_key)
# only need first 1 so beak out of loop
#
break
if len(neuron_to_deactivate) > 0:
self.neural_gas.remove_node(neuron_to_deactivate[0])
por['deleted_neuron_key'] = neuron_to_deactivate[0]
else:
# the data is close enough to the bmu to be mapped
# so update the bmu neuron attributes
#
self.neural_gas.nodes[bmu_key]['n_bmu'] += 1
self.neural_gas.nodes[bmu_key]['last_bmu'] = self.update_id
# a neuron's error for mapped data is the exponential moving average of the distance.
#
if self.neural_gas.nodes[bmu_key]['ema_error'] is None:
self.neural_gas.nodes[bmu_key]['ema_error'] = bmu_distance
else:
self.neural_gas.nodes[bmu_key]['ema_error'] += ((bmu_distance - self.neural_gas.nodes[bmu_key]['ema_error']) * self.slow_alpha)
# reduce the distance threshold towards the error average
#
self.neural_gas.nodes[bmu_key]['threshold'] += (self.neural_gas.nodes[bmu_key]['ema_error'] - self.neural_gas.nodes[bmu_key]['threshold']) * self.slow_alpha
# learn the generalised graph
#
self.neural_gas.nodes[bmu_key]['generalised_graph'].learn_graph(graph_to_learn=t_graph,
learn_rate=self.neural_gas.nodes[bmu_key]['learn_rate'],
learn_edge_types=learn_edge_types,
prune_threshold=self.prune_threshold,
audit=self.audit)
# reset the bmu activation to full strength
#
self.neural_gas.nodes[bmu_key]['activation'] = 1.0
updated_neurons = set()
updated_neurons.add(bmu_key)
if len(distances) > 1:
nn_idx = 1
finished = False
while not finished:
nn_key = distances[nn_idx][0]
nn_distance = distances[nn_idx][1][0]
# if the neuron is close enough to the incoming data
#
if nn_distance < self.neural_gas.nodes[nn_key]['threshold']:
updated_neurons.add(nn_key)
por['nn_neurons'].append({'nn_distance': nn_distance, 'nn_key': nn_key, 'nn_distance_threshold': self.neural_gas.nodes[nn_key]['threshold']})
self.neural_gas.nodes[nn_key]['n_runner_up'] += 1
self.neural_gas.nodes[nn_key]['last_runner_up'] = self.update_id
# reset the neighbour activation to full strength
#
self.neural_gas.nodes[nn_key]['activation'] = 1.0
# the learning rate for a neighbour needs to be much less that the bmu - hence the product of learning rates and 0.1 factor
#
nn_learn_rate = self.neural_gas.nodes[bmu_key]['learn_rate'] * self.neural_gas.nodes[nn_key]['learn_rate'] * 0.1
# learn the generalised graph
#
self.neural_gas.nodes[nn_key]['generalised_graph'].learn_graph(graph_to_learn=t_graph,
learn_rate=nn_learn_rate,
learn_edge_types=learn_edge_types,
prune_threshold=self.prune_threshold,
audit=self.audit)
nn_idx += 1
if nn_idx >= len(distances):
finished = True
else:
finished = True
# recalculate the distances between updated neurons
#
triples_to_process = set()
for neuron_key in updated_neurons:
for triple_key in self.neural_gas.nodes[neuron_key]['_edges']:
if triple_key not in triples_to_process:
if self.neural_gas.edges[triple_key]['_source'] != neuron_key:
nn_key = self.neural_gas.edges[triple_key]['_source']
else:
nn_key = self.neural_gas.edges[triple_key]['_target']
distance = self.neural_gas.nodes[neuron_key]['generalised_graph'].compare_graph(graph_to_compare=self.neural_gas.nodes[nn_key]['generalised_graph'],
compare_edge_types=self.search_edge_types)
self.neural_gas.edges[triple_key]['_numeric'] = distance[0]
triples_to_process.add(triple_key)
# decay the learning rate so that this neuron learns more slowly the more it gets mapped too
#
self.neural_gas.nodes[bmu_key]['learn_rate'] -= self.neural_gas.nodes[bmu_key]['learn_rate'] * self.slow_alpha
anomaly, motif = self.update_gas_error(bmu_key=bmu_key, bmu_distance=bmu_distance, ref_id=ref_id)
por['anomaly'] = anomaly
por['motif'] = motif
por['nos_neurons'] = len(self.neural_gas.nodes)
return por
def query(self, query_graph, bmu_only: bool = True) -> Por:
if self.normaliser is not None:
q_graph, _ = self.normaliser.normalise(graph=query_graph)
else:
q_graph = query_graph
# get the types of edges to search for
#
search_edge_types = {q_graph.edges[triple_key]['_type'] for triple_key in q_graph.edges}
# calc the distance of the training graph to the existing neurons
#
distances = [(neuron_key,
self.neural_gas.nodes[neuron_key]['generalised_graph'].compare_graph(graph_to_compare=q_graph,
compare_edge_types=search_edge_types),
self.neural_gas.nodes[neuron_key]['n_bmu'],
self.neural_gas.nodes[neuron_key]['generalised_graph'],
self.neural_gas.nodes[neuron_key]['threshold'])
for neuron_key in self.neural_gas.nodes]
# sort in ascending order of distance and descending order of number of times bmu
#
distances.sort(key=lambda x: (x[1][0], -x[2]))
# get closest neuron and all | |
<reponame>qbox/k8s-policy<gh_stars>0
# Copyright (c) 2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import Queue
import requests
import sys
import simplejson as json
import time
from threading import Thread
from pycalico.datastore import DatastoreClient
from pycalico.datastore_datatypes import Rules, Rule
from handlers.network_policy import (add_update_network_policy,
delete_network_policy)
from handlers.namespace import add_update_namespace, delete_namespace
from handlers.pod import add_pod, update_pod, delete_pod
from constants import *
from version import VERSION
_log = logging.getLogger(__name__)
# Raised upon receiving an error from the Kubernetes API.
class KubernetesApiError(Exception):
pass
class Controller(object):
def __init__(self):
self._event_queue = Queue.Queue(maxsize=MAX_QUEUE_SIZE)
"""
Queue to populate with events from API watches.
"""
self.k8s_api = os.environ.get("K8S_API", DEFAULT_API)
"""
Scheme, IP and port of the Kubernetes API.
"""
self.auth_token = os.environ.get("K8S_AUTH_TOKEN", read_token_file())
"""
Auth token to use when accessing the API.
"""
_log.debug("Using auth token: %s", self.auth_token)
self.ca_crt_exists = os.path.exists(CA_CERT_PATH)
"""
True if a CA cert has been mounted by Kubernetes.
"""
self._client = DatastoreClient()
"""
Client for accessing the Calico datastore.
"""
self._leader_election_url = os.environ.get("ELECTION_URL",
"http://127.0.0.1:4040/")
"""
Use this URL to get leader election status from the sidecar container.
"""
elect = os.environ.get("LEADER_ELECTION", "false")
self._leader_elect = elect.lower() == "true"
"""
Whether or not leader election is enabled. If set to False, this
policy controller will assume it is the only instance.
"""
self._handlers = {}
"""
Keeps track of which handlers to execute for various events.
"""
# Handlers for NetworkPolicy events.
self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_ADDED,
add_update_network_policy)
self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_MODIFIED,
add_update_network_policy)
self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_DELETED,
delete_network_policy)
# Handlers for Namespace events.
self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_ADDED,
add_update_namespace)
self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_MODIFIED,
add_update_namespace)
self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_DELETED,
delete_namespace)
# Handlers for Pod events.
self.add_handler(RESOURCE_TYPE_POD, TYPE_ADDED,
add_pod)
self.add_handler(RESOURCE_TYPE_POD, TYPE_MODIFIED,
update_pod)
self.add_handler(RESOURCE_TYPE_POD, TYPE_DELETED,
delete_pod)
self._last_resource_version = {}
"""
Keeps track of last received version for each resource type.
"""
def add_handler(self, resource_type, event_type, handler):
"""
Adds an event handler for the given event type (ADD, DELETE) for the
given resource type.
:param resource_type: The type of resource that this handles.
:param event_type: The type of event that this handles.
:param handler: The callable to execute when events are received.
:return None
"""
_log.debug("Setting %s %s handler: %s",
resource_type, event_type, handler)
key = (resource_type, event_type)
self._handlers[key] = handler
def get_handler(self, resource_type, event_type):
"""
Gets the correct handler.
:param resource_type: The type of resource that needs handling.
:param event_type: The type of event that needs handling.
:return None
"""
key = (resource_type, event_type)
_log.debug("Looking up handler for event: %s", key)
return self._handlers[key]
def run(self):
"""
Controller.run() is called at program init to spawn watch threads,
Loops to read responses from the Queue as they come in.
"""
_log.info("Leader election enabled? %s", self._leader_elect)
if self._leader_elect:
# Wait until we've been elected leader to start.
self._wait_for_leadership()
self._start_leader_thread()
# Remove old tier if it exists
try:
_log.debug("Attempting to remove old tier k8s-network-policy")
self._client.delete_policy_tier("k8s-network-policy")
except KeyError:
pass
# Ensure the tier exists.
metadata = {"order": NET_POL_TIER_ORDER}
self._client.set_policy_tier_metadata("default", metadata)
# Ensure the backstop policy exists. This policy forwards
# any traffic to Kubernetes pods which doesn't match another policy
# to the next-tier (i.e the per-namespace Profiles).
selector = "has(%s)" % K8S_NAMESPACE_LABEL
rules = Rules(inbound_rules=[Rule(action="next-tier")],
outbound_rules=[Rule(action="next-tier")])
self._client.create_policy("default",
"k8s-policy-no-match",
selector,
order=NET_POL_NO_MATCH_ORDER,
rules=rules)
# Read initial state from Kubernetes API.
self.start_workers()
# Loop and read updates from the queue.
self.read_updates()
def _wait_for_leadership(self):
"""
Loops until this controller has been elected leader.
"""
_log.info("Waiting for this controller to be elected leader")
while True:
try:
is_leader = self._is_leader()
except requests.exceptions.ConnectionError:
# During startup, the leader election container
# might not be up yet. Handle this case gracefully.
_log.info("Waiting for leader election container")
else:
# Successful response from the leader election container.
# Check if we are the elected leader.
if is_leader:
_log.info("We have been elected leader")
break
time.sleep(1)
def _start_leader_thread(self):
"""
Starts a thread which periodically checks if this controller is the leader.
If determined that we are no longer the leader, exit.
"""
t = Thread(target=self._watch_leadership)
t.daemon = True
t.start()
_log.info("Started leader election watcher")
def _watch_leadership(self):
"""
Watches to see if this policy controller is still the elected leader.
If no longer the elected leader, exits.
"""
_log.info("Watching for leader election changes")
while True:
try:
if not self._is_leader():
_log.warning("No longer the elected leader - exiting")
os._exit(1)
time.sleep(1)
except Exception:
_log.exception("Exception verifying leadership - exiting")
os._exit(1)
def start_workers(self):
"""
Starts the worker threads which manage each Kubernetes
API resource.
"""
resources = [RESOURCE_TYPE_NETWORK_POLICY,
RESOURCE_TYPE_NAMESPACE,
RESOURCE_TYPE_POD]
# For each resource type, start a thread which syncs it from the
# kubernetes API.
for resource_type in resources:
t = Thread(target=self._manage_resource, args=(resource_type,))
t.daemon = True
t.start()
_log.info("Started worker thread for: %s", resource_type)
def read_updates(self):
"""
Reads from the update queue.
An update on the queue must be a tuple of:
(event_type, resource_type, resource)
Where:
- event_type: Either "ADDED", "MODIFIED", "DELETED", "ERROR"
- resource_type: e.g "Namespace", "Pod", "NetworkPolicy"
- resource: The parsed json resource from the API matching
the given resource_type.
"""
while True:
try:
# Wait for an update on the event queue.
_log.debug("Reading from event queue")
update = self._event_queue.get(block=True)
event_type, resource_type, resource = update
# We've recieved an update - process it.
_log.debug("Read event: %s, %s, %s",
event_type,
resource_type,
json.dumps(resource, indent=2))
self._process_update(event_type,
resource_type,
resource)
except KeyError:
# We'll hit this if we fail to parse an invalid update.
_log.exception("Invalid update: %s", update)
finally:
self._event_queue.task_done()
# Log out when the queue is empty.
if self._event_queue.empty():
_log.debug("Emptied the event queue")
def _process_update(self, event_type, resource_type, resource):
"""
Takes an event updates our state accordingly.
"""
_log.debug("Processing '%s' for kind '%s'", event_type, resource_type)
# Determine the key for this object using namespace and name.
# This is simply used for easy identification in logs, etc.
name = resource["metadata"]["name"]
namespace = resource["metadata"].get("namespace")
key = (namespace, name)
# Call the right handler.
try:
handler = self.get_handler(resource_type, event_type)
except KeyError:
_log.warning("No %s handlers for: %s",
event_type, resource_type)
else:
try:
handler(resource)
_log.info("Handled %s for %s: %s",
event_type, resource_type, key)
except KeyError:
_log.exception("Invalid %s: %s", resource_type,
json.dumps(resource, indent=2))
def _manage_resource(self, resource_type):
"""
Routine for a worker thread. Syncs with API for the given resource
and starts a watch. If an error occurs within the watch, will resync
with the API and restart the watch.
"""
sync_needed = True
while True:
try:
if sync_needed:
# Sync existing resources for this type.
self._sync_resources(resource_type)
# There are many exception conditions below for which we would
# need to sync again. Even though sync isn't needed in the
# most mainline case - read timeout - we save some lines of
# code by setting sync_needed True here, and resetting it below
# in the cases where it isn't needed.
sync_needed = True
# Start a watch from the latest resource_version.
self._watch_resource(resource_type)
except requests.exceptions.ConnectTimeout as e:
_log.warning("Connection attempt timed out: %s ...%s", resource_type, e)
except requests.ConnectionError as e:
if "Read timed out" in str(e):
_log.debug("Normal read time out for %s", resource_type)
sync_needed = False
else:
_log.warning("Connection error: %s ...%s", resource_type, e)
except requests.exceptions.ChunkedEncodingError:
_log.exception("Read error querying: %s", resource_type)
except requests.HTTPError:
_log.exception("HTTP error querying: %s", resource_type)
except KubernetesApiError:
_log.debug("Kubernetes API error managing %s", resource_type)
except Queue.Full:
_log.exception("Event queue full")
except Exception:
_log.exception("Unhandled exception killed %s manager", resource_type)
finally:
if sync_needed:
# Sleep for a second so that we don't tight-loop.
_log.info("Restarting watch on resource: %s",
resource_type)
time.sleep(1)
else:
_log.debug("Restarting watch on resource: %s",
resource_type)
def _watch_resource(self, resource_type):
"""
Watch the given resource type starting at the given resource version.
Add any events to the event queue.
"""
path = WATCH_URLS[resource_type] % self.k8s_api
_log.debug("Starting watch on: %s", path)
while True:
# Attempt to stream API resources.
response = self._api_get(
path,
stream=True,
resource_version=self._last_resource_version[resource_type]
| |
<reponame>xuptWeCloud/WeCloud<filename>file/views.py
from django.http import HttpResponse, JsonResponse, FileResponse
# Create your views here.
from rest_framework.views import APIView
from api.serializers import *
from api.models import *
import hashlib
from rest_framework.parsers import MultiPartParser
import hashlib
import datetime
import os
def getHash(f):
line = f.readline()
hash = hashlib.md5()
while (line):
hash.update(line)
line = f.readline()
return hash.hexdigest()
# Create your views here.
class BaseResponse(object):
def __init__(self):
self.code = 200
self.msg = ""
self.data = None
@property
def dict(self):
return self.__dict__
class UpFile(APIView):
parser_classes = (MultiPartParser,)
def get(self, request):
response = BaseResponse()
try:
user_id = request.session['userid']
# user_id = request.query_params.dict()["user_id"]
filename = request.query_params.dict()["filename"]
response.code = "200"
response.msg = "ok"
if user_id:
type = request.query_params.dict()["type"]
if type == "img":
img_list = Img.objects.filter(user_id=user_id, filename=filename)
if img_list:
response.code = "201"
response.msg = 'no'
elif type == "doc":
doc_list = Doc.objects.filter(user_id=user_id, filename=filename)
if doc_list:
response.code = "201"
response.msg = 'no'
elif type == "radio":
radio_list = Radio.objects.filter(user_id=user_id, filename=filename)
if radio_list:
response.code = "201"
response.msg = 'no'
elif type == "video":
video_list = Video.objects.filter(user_id=user_id, filename=filename)
if video_list:
response.code = "201"
response.msg = 'no'
elif type == "coffer":
coffer_list = Coffer.objects.filter(user_id=user_id, filename=filename)
if coffer_list:
response.code = "201"
response.msg = 'no'
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.msg = 'no'
response.code = '201'
response.data = ""
return JsonResponse(response.dict)
def post(self, request):
response = BaseResponse()
try:
type = request.data.get("type")
user_id = request.session['userid']
# user_id = request.data.get('user_id')
filename = request.data.get("filename")
file = request.FILES.getlist("file")
for afile in file:
md5 = getHash(afile)
response.code = "201"
response.msg = "no"
if type == 'img':
if Img.objects.filter(filename=filename, md5_id=md5).count():
file_path = Img.objects.filter(filename=filename, md5_id=md5).first().path
Img.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type=type,
size=len(file),
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Img.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type=type,
size=len(file),
date=datetime.datetime.now())
response.code = "200"
response.msg = "ok"
response.data = "null"
elif type == 'doc':
if Doc.objects.filter(filename=filename, md5_id=md5).count():
file_path = Doc.objects.filter(filename=filename, md5_id=md5).first().path
Doc.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type=type,
size=len(file),
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Doc.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type=type,
size=len(file),
date=datetime.datetime.now())
response.code = "200"
response.msg = "ok"
response.data = "null"
elif type == 'radio':
if Radio.objects.filter(filename=filename, md5_id=md5).count():
file_path = Radio.objects.filter(filename=filename, md5_id=md5).first().path
Radio.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type=type,
size=len(file),
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Radio.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type=type,
size=len(file),
date=datetime.datetime.now())
response.code = "200"
response.msg = "ok"
response.data = "null"
elif type == 'video':
if Video.objects.filter(filename=filename, md5_id=md5).count():
file_path = Video.objects.filter(filename=filename, md5_id=md5).first().path
Video.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type=type,
size=len(file),
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Video.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type=type,
size=len(file),
date=datetime.datetime.now())
response.code = "200"
response.msg = "ok"
response.data = "null"
# elif type == 'coffer':
# if Coffer.objects.filter(filename=filename, md5_id=md5).count():
# file_path = Coffer.objects.filter(filename=filename, md5_id=md5).first().path
# Coffer.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type=type,
# size=len(file),
# date=datetime.datetime.now())
# else:
# Md5.objects.create(md5=md5, filename=filename)
# Coffer.objects.create(filename=filename, md5_id=md5, user_id=user_id,
# path=request.FILES.get('file'), type=type,
# size=len(file),
# date=datetime.datetime.now())
# response.code = "200"
# response.msg = "ok"
# response.data = "null"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.msg = 'no'
response.code = '201'
response.data = "null"
return JsonResponse(response.dict)
class FileDownload(APIView):
def post(self, request):
response = BaseResponse()
type = request.data.get("type")
user_id = request.session['userid']
filename = request.data.get("filename")
# user_id = request.data.get("user_id")
try:
if type == "img":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = filename
file_response['code'] = "200"
file_response['msg'] = "ok"
return file_response
elif type == "doc":
file_path = Doc.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = filename
file_response['code'] = "200"
file_response['msg'] = "ok"
return file_response
elif type == "radio":
file_path = Radio.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = filename
file_response['code'] = "200"
file_response['msg'] = "ok"
return file_response
elif type == "video":
file_path = Video.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = filename
file_response['code'] = "200"
file_response['msg'] = "ok"
return file_response
# elif type == "coffer":
# file_path = Coffer.objects.filter(user_id=user_id, filename=filename).first().path
# file = open(str(file_path), 'rb')
# file_response = FileResponse(file)
# file_response['Content-Type'] = 'application/octet-stream'
# file_response['Content-Disposition'] = filename
# file_response['code'] = "200"
# file_response['msg'] = "ok"
# return file_response
elif type == "trash":
file_path = Trash.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = filename
file_response['code'] = "200"
file_response['msg'] = "ok"
return file_response
except Exception as e:
print(e)
response.msg = 'no'
response.code = '201'
response.data = "null"
return JsonResponse(response.dict)
class GetFileByTime(APIView):
def get(self, request):
response = BaseResponse()
try:
data = []
user_id = request.session['userid']
# user_id = request.query_params.dict()["user_id"]
if user_id:
type = request.query_params.dict()["type"]
if type == "img":
img_list = Img.objects.filter(user_id=user_id).order_by('-date')
img_list = ImgSerializer(img_list, many=True)
data.append(img_list.data)
response.data = data
elif type == "doc":
doc_list = Doc.objects.filter(user_id=user_id).order_by('-date')
doc_list = DocSerializer(doc_list, many=True)
data.append(doc_list.data)
response.data = data
elif type == "radio":
radio_list = Radio.objects.filter(user_id=user_id).order_by('-date')
radio_list = RadioSerializer(radio_list, many=True)
data.append(radio_list.data)
response.data = data
elif type == "video":
video_list = Video.objects.filter(user_id=user_id).order_by('-date')
video_list = VideoSerializer(video_list, many=True)
data.append(video_list.data)
response.data = data
elif type == "coffer":
coffer_list = Coffer.objects.filter(user_id=user_id).order_by('-date')
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
elif type == "all":
img_list = Img.objects.filter(user_id=user_id).order_by('-date')
doc_list = Doc.objects.filter(user_id=user_id).order_by('-date')
radio_list = Radio.objects.filter(user_id=user_id).order_by('-date')
video_list = Video.objects.filter(user_id=user_id).order_by('-date')
img_list = ImgSerializer(img_list, many=True)
doc_list = DocSerializer(doc_list, many=True)
radio_list = RadioSerializer(radio_list, many=True)
video_list = VideoSerializer(video_list, many=True)
data.append(img_list.data)
data.append(doc_list.data)
data.append(radio_list.data)
data.append(video_list.data)
if all == {}:
response.data = "null"
else:
response.data = data
elif type == "trash":
trash_list = Trash.objects.filter(user_id=user_id).order_by('-date')
trash_list = TrashSerializer(trash_list, many=True)
data.append(trash_list.data)
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
response.data = "null"
return JsonResponse(response.dict)
class InsertCoffer(APIView):
def get(self, request):
response = BaseResponse()
try:
type = request.query_params.dict()["type"]
user_id = request.session['userid']
filename = request.query_params.dict()["filename"]
data = []
if user_id:
if type == "img":
file_path = Img.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Img.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Img.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
elif type == "doc":
file_path = Doc.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Doc.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Doc.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
elif type == "radio":
file_path = Radio.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Radio.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Radio.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
elif type == "video":
file_path = Video.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Video.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Video.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
elif type == "note":
file_path = Note.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Note.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Note.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
data.append(coffer_list.data)
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class Restore(APIView):
def get(self, request):
response = BaseResponse()
try:
type = request.query_params.dict()["type"]
user_id = request.session['userid']
filename = request.query_params.dict()["filename"]
Coffer.objects.filter(type=type, user_id=user_id, filename=filename).delete()
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class delNote(APIView):
def get(self, request):
response = BaseResponse()
data = []
user_id = request.session['userid']
try:
file_id = request.query_params.dict()["file_id"]
Note.objects.filter(file_id=file_id, user_id=user_id).delete()
note_list = Note.objects.all()
note_list = NoteSerializer(note_list)
data.append(note_list.data)
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class GetList(APIView):
def get(self, request):
response = BaseResponse()
user_id = request.session['userid']
data = []
try:
note_list = Note.objects.filter(user_id)
note_list = NoteSerializer(note_list)
data.append(note_list.data)
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class InsertNote(APIView):
def get(self, request):
response = BaseResponse()
data = []
try:
user_id = request.session['userid']
file_id = request.query_params.dict()["file_id"]
title = request.query_params.dict()["title"]
content = request.query_params.dict()["content"]
need = request.query_params.dict()["need"]
if need == 'update':
Note.objects.filter(file_id=file_id, user_id=user_id).update(title=title, content=content)
elif need == 'insert':
Note.objects.create(file_id=file_id, title=title, content=content, user_id=user_id,
data=datetime.datetime.now())
note_list = Note.objects.all()
note_list = NoteSerializer(note_list)
data.append(note_list.data)
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class GetNote(APIView):
def get(self, request):
response = BaseResponse()
data | |
#Reader for the coco panoptic data set for pointer based image segmentation
import numpy as np
import os
import scipy.misc as misc
import random
import cv2
import json
import threading
############################################################################################################
def rgb2id(color): # Convert annotation map from 3 channel RGB to instance
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.uint32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return color[0] + 256 * color[1] + 256 * 256 * color[2]
#########################################################################################################################
#########################################################################################################################
class Reader:
# Initiate reader and define the main parameters for the data reader
def __init__(self, ImageDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/train2017",AnnotationDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017/panoptic_train2017", DataFile="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017.json",MaxBatchSize=100,MinSize=250,MaxSize=800,MaxPixels=800*800*5, AnnotationFileType="png", ImageFileType="jpg",UnlabeledTag=0,Suffle=True,TrainingMode=True):
self.ImageDir=ImageDir # Image dir
self.AnnotationDir=AnnotationDir # File containing image annotation
self.MaxBatchSize=MaxBatchSize # Max number of image in batch
self.MinSize=MinSize # Min image width and hight in pixels
self.MaxSize=MaxSize #Max image width and hight in pixels
self.MaxPixels=MaxPixels # Max number of pixel in all the batch (reduce to solve oom out of memory issues)
self.AnnotationFileType=AnnotationFileType # What is the the type (ending) of the annotation files
self.ImageFileType=ImageFileType # What is the the type (ending) of the image files
self.DataFile=DataFile # Json File that contain data on the annotation of each image
self.UnlabeledTag=UnlabeledTag # Value of unlabled region in the annotation map (usually 0)
self.ReadStuff = True # Read things that are not instace object (like sky or grass)
self.SplitThings = False#True # Split instance of things (object) to connected component region and use each connected region as an instance
self.SplitStuff = True # Split instance of things (object) to connected component region and use each connected region as instance
self.SplitCrowd = True # Split areas marked as Crowds using connected componennt
self.IgnoreCrowds = True # Ignore areas marked as crowd
self.PickBySize = True # Pick instances of with probablity proportional to their sizes
self.StuffAreaFactor=0.225 # Since we pick segments according to their size stuf segments (ground, sky) will have higher probability to be chosen compare to things (objets) this factor balance this
self.MinSegSize=100 # Ignore segments which are smaller then this size in pixel
self.Epoch = 0 # Training Epoch
self.itr = 0 # Training iteratation
self.suffle=Suffle # Suffle list of file
#........................Read data file................................................................................................................
with open(DataFile) as json_file:
self.AnnData=json.load(json_file)
#-------------------Get All files in folder--------------------------------------------------------------------------------------
self.FileList=[]
for FileName in os.listdir(AnnotationDir):
if AnnotationFileType in FileName:
self.FileList.append(FileName)
if self.suffle:
random.shuffle(self.FileList)
if TrainingMode: self.StartLoadBatch()
##############################################################################################################################################
#Get annotation data for specific nmage from the json file
def GetAnnnotationData(self, AnnFileName):
for item in self.AnnData['annotations']: # Get Annotation Data
if (item["file_name"] == AnnFileName):
return(item['segments_info'])
############################################################################################################################################
#Get information for specific catagory/Class id
def GetCategoryData(self,ID):
for item in self.AnnData['categories']:
if item["id"]==ID:
return item["name"],item["isthing"]
##########################################################################################################################################3333
#Split binary mask correspond to a singele segment into connected components
def GetConnectedSegment(self, Seg):
[NumCCmp, CCmpMask, CCompBB, CCmpCntr] = cv2.connectedComponentsWithStats(Seg.astype(np.uint8)) # apply connected component
Mask=np.zeros([NumCCmp,Seg.shape[0],Seg.shape[1]],dtype=bool)
BBox=np.zeros([NumCCmp,4])
Sz=np.zeros([NumCCmp],np.uint32)
for i in range(1,NumCCmp):
Mask[i-1] = (CCmpMask == i)
BBox[i-1] = CCompBB[i][:4]
Sz[i-1] = CCompBB[i][4] #segment Size
return Mask,BBox,Sz,NumCCmp-1
#################################################################################################################################################
# Pick and return random segment from the list and remove it from the segment list
def PickRandomSegment(self,Sgs,SumAreas):
if self.PickBySize: # Pick random segment with probability proportional to size
r = np.random.randint(SumAreas) + 1
TotAreas=0
for ind in range(Sgs.__len__()):
TotAreas+=Sgs[ind]['Area']
if TotAreas>=r:
break
else: ind=np.random.randint(len(Sgs)) #Pick Random segment with equal probability
# print("ind" + str(ind))
SelectedSg=Sgs.pop(ind)
SumAreas-=SelectedSg["Area"]
return SelectedSg,SumAreas
##########################################################################################################################
# Pick set of segments from the list and generate random ROI map by taking the inverse of the region define by these segments area
def GenerateRandomROIMask(self, Sgs, SumAreas):
ROI = np.ones(Sgs[0]["Mask"].shape)
if SumAreas<=0 and np.random.randint(6)==0: return ROI
r = np.random.randint(SumAreas) + 1
while (SumAreas>r):
SumAreasOld=SumAreas
SelectedSg, SumAreas=self.PickRandomSegment( Sgs, SumAreas)
# misc.imshow(SelectedSg["Mask"].astype(float))
if SumAreas>r:
ROI[SelectedSg["Mask"]]=0
# misc.imshow(ROI.astype(float))
else:
if np.random.randint(SumAreas,SumAreasOld)>r:# and (SumAreas>1000):
ROI[SelectedSg["Mask"]] = 0
else:
Sgs.append(SelectedSg)
return(ROI)
#############################################################################################################################
############################################################################################################################
#Pick random point from segment given as a binary mask
def PickRandomPointInSegment(self,Seg,ErodeMask=10):
x0 = int(np.floor(Seg["BBox"][0])) # Bounding box x position
Wbox = int(np.floor(Seg["BBox"][2])) # Bounding box width
y0 = int(np.floor(Seg["BBox"][1])) # Bounding box y position
Hbox = int(np.floor(Seg["BBox"][3])) # Bounding box height
if ErodeMask:
Msk = cv2.erode(Seg["Mask"].astype(np.uint8), np.ones((3, 3), np.uint8), iterations=ErodeMask)
if Msk.sum()==0: Msk=Seg["Mask"]
else:
Msk = Seg["Mask"]
while(True):
x = np.random.randint(Wbox) + x0
y = np.random.randint(Hbox) + y0
if (Msk[y,x])==1:
return x,y
##############################################################################################################################
# Display loaded data on screen (for debuging)
def DisplayTrainExample(self,Img2,ROI2,Segment2,SelectedPoint2):
Img=Img2.copy()
ROI=ROI2.copy()
Segment=Segment2.copy()
SelectedPoint=SelectedPoint2.copy()
misc.imshow(Img)
SelectedPoint = cv2.dilate(SelectedPoint.astype(np.uint8), np.ones((3, 3), np.uint8), iterations=1)
Img[SelectedPoint][:]=[255,0,0]
Img[:, :, 0] = SelectedPoint.astype(np.uint8)*255+ (1-SelectedPoint.astype(np.uint8))*Img[:, :, 0]
Img[:, :, 1] *= 1-SelectedPoint.astype(np.uint8)
Img[:, :, 2] *= 1-SelectedPoint.astype(np.uint8)
Img[ :, :, 0] *= 1-(ROI.astype(np.uint8)-Segment.astype(np.uint8))
#Img[:, :, 1] += ROI.astype(np.uint8)*40
Img[ :, :, 2] *= 1 - Segment.astype(np.uint8)
# misc.imshow(Img)
#print(ROI.mean())
ROI[0,0]=0
misc.imshow(ROI.astype(float))
misc.imshow( Segment.astype(float))
misc.imshow(SelectedPoint.astype(float))
misc.imshow(Img)
#############################################################################################################################
# Crop and resize image and mask and ROI to feet batch size
def CropResize(self,Img, Mask,bbox,ROImask,Px,Py,Hb,Wb):
# ========================resize image if it too small to the batch size==================================================================================
[h, w, d] = Img.shape
Rs = np.max((Hb / h, Wb / w))
Wbox = int(np.floor(bbox[2])) # Segment Bounding box width
Hbox = int(np.floor(bbox[3])) # Segment Bounding box height
Bs = np.min((Hb / Hbox, Wb / Wbox))
if Rs > 1 or Bs<1 or np.random.rand()<0.3: # Resize image and mask to batch size if mask is smaller then batch or if segment bounding box larger then batch image size
h = int(np.max((h * Rs, Hb)))
w = int(np.max((w * Rs, Wb)))
Img = cv2.resize(Img, dsize=(w, h), interpolation=cv2.INTER_LINEAR)
Mask = cv2.resize(Mask.astype(float), dsize=(w, h), interpolation=cv2.INTER_NEAREST)
ROImask = cv2.resize(ROImask.astype(float), dsize=(w, h), interpolation=cv2.INTER_NEAREST)
bbox = (np.float32(bbox) * Rs.astype(np.float)).astype(np.int64)
Px = int(float(Px) * Rs)
Py = int(float(Py) * Rs)
if Px>=w:
Px=w-1
if Py>=h:
Py=h-1
# =======================Crop image to fit batch size===================================================================================
x1 = int(np.floor(bbox[0])) # Bounding box x position
Wbox = int(np.floor(bbox[2])) # Bounding box width
y1 = int(np.floor(bbox[1])) # Bounding box y position
Hbox = int(np.floor(bbox[3])) # Bounding box height
if Wb > Wbox:
Xmax = np.min((w - Wb, x1))
Xmin = np.max((0, x1 - (Wb - Wbox)-1))
else:
Xmin = x1
Xmax = np.min((w - Wb, x1 + (Wbox - Wb)+1))
if Hb > Hbox:
Ymax = np.min((h - Hb, y1))
Ymin = np.max((0, y1 - (Hb - Hbox)-1))
else:
Ymin = y1
Ymax = np.min((h - Hb, y1 + (Hbox - Hb)+1))
if Ymax<=Ymin: y0=Ymin
else:
while(True):
y0 = np.random.randint(low=Ymin, high=Ymax + 1)
if (y0 <= Py) and Py < (y0 + Hb): break
if Xmax<=Xmin: x0=Xmin
else:
while (True):
x0 = np.random.randint(low=Xmin, high=Xmax + 1)
if (x0 <= Px) and Px < (x0 + Wb): break
# Img[:,:,1]*=Mask
# misc.imshow(Img)
Px-=x0
Py-=y0
Img = Img[y0:y0 + Hb, x0:x0 + Wb, :]
Mask = Mask[y0:y0 + Hb, x0:x0 + Wb]
ROImask = ROImask[y0:y0 + Hb, x0:x0 + Wb]
#------------------------------------------Verify shape match the batch shape----------------------------------------------------------------------------------------
if not (Img.shape[0] == Hb and Img.shape[1] == Wb): Img = cv2.resize(Img, dsize=(Wb, Hb),interpolation=cv2.INTER_LINEAR)
if not (Mask.shape[0] == Hb and Mask.shape[1] == Wb):Mask = cv2.resize(Mask.astype(float), dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
if not (ROImask.shape[0] == Hb and ROImask.shape[1] == Wb): ROImask = cv2.resize(ROImask.astype(float), dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
#-----------------------------------------------------------------------------------------------------------------------------------
return Img,Mask,ROImask,Px,Py
# misc.imshow(Img)
######################################################################################################
#Generate list of all segments in the image
# Given the annotation map a json data file create list of all segments and instance with info on each segment
#--------------------------Generate list of all segments--------------------------------------------------------------------------------
def GeneratListOfAllSegments(self,Ann,Ann_name,AddUnLabeled=False,IgnoreSmallSeg=True):
AnnList = self.GetAnnnotationData(Ann_name)
Sgs = [] # List of segments and their info
SumAreas=0 # Sum areas of all segments up to image
for an in AnnList:
an["name"], an["isthing"] = self.GetCategoryData(an["category_id"])
if (an["iscrowd"] and self.IgnoreCrowds) or (not an["isthing"] and not self.ReadStuff):
Ann[Ann == an['id']] = self.UnlabeledTag
continue
if (an["isthing"] and self.SplitThings) or (an["isthing"]==False and self.SplitStuff) or (an["iscrowd"] and self.SplitCrowd): #Things are objects that have instances
TMask, TBBox, TSz, TNm = self.GetConnectedSegment(Ann == an['id']) # Split to connected components
for i in range(TNm):
seg={}
seg["Mask"]=TMask[i]
seg["BBox"]=TBBox[i]
seg["Area"]=TSz[i]
if (not an["isthing"]): seg["Area"]*=self.StuffAreaFactor
if seg["Area"] < self.MinSegSize and IgnoreSmallSeg:
Ann[Ann == an['id']] = self.UnlabeledTag
continue
seg["NumParts"] =TNm
seg["IsSplit"]=TNm>1
seg["IsThing"]=an["isthing"]
seg["Name"]=an["name"]
seg["IsCrowd"]=an["iscrowd"]
seg["CatId"]=an["category_id"]
seg["IsLabeled"] = True
SumAreas+=seg["Area"]
Sgs.append(seg)
else: # none object classes such as sky
seg = {}
seg["Mask"] = (Ann == an['id'])
seg["BBox"] = an["bbox"]
seg["Area"] = an["area"]
if (not an["isthing"]): seg["Area"] *= self.StuffAreaFactor
if seg["Area"] < self.MinSegSize and | |
from __future__ import print_function, absolute_import
from __future__ import with_statement, nested_scopes, division, generators
import os
import gc
import time
import numpy as np
import pandas as pd
import obspy
import scipy
from obspy.core import UTCDateTime
from obspy.core import AttribDict
from obspy.taup import TauPyModel
from obspy.signal.util import next_pow_2, util_geo_km
from obspy.signal.util import util_geo_km
from obspy.signal.headers import clibsignal
from obspy.signal.invsim import cosine_taper
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.cm as cm
from itertools import zip_longest
try:
import cPickle
except ImportError:
import pickle as cPickle
# from memory_profiler import profile
from gpar.util.rolling_window import rolling_window
PI = np.pi
deg2rad = PI/180.0
rad2deg = 180.0/PI
deg2km = 2*PI*6371.0/360.0
import gpar
from gpar.header import clibarray
##### Beamforming functions and class ######
class Array(object):
"""
Array object that contains multiple earthquake objects
"""
def __init__(self,arrayName,refPoint,eqDF,staDf,
coordsys='lonlat',beamphase='PKiKP',
isDoublet=False,
phase_list=['P','PP','PcP','ScP','PKiKP','SP','ScS']
,**kwargs):
self.name = arrayName
self.refPoint = refPoint
self.coordsys = coordsys
self.getGeometry(staDf,refPoint,coordsys=coordsys)
if not isDoublet:
self.events = [0]*len(eqDF)
for ind, row in eqDF.iterrows():
self.events[ind] = Earthquake(self, row, beamphase=beamphase,phase_list=phase_list)
else:
self.doublet = [0]*len(eqDF)
for ind, row in eqDF.iterrows():
self.doublet[ind] = Doublet(self, row, tphase=beamphase,phase_list=phase_list,**kwargs)
def getGeometry(self,staDF,refPoint,coordsys='lonlat'):
"""
Return nested list containing the array geometry using for waveform shifting
"""
# row = eqDF.iloc[0]
# stream = row.Stream
# nstat = len(staDF)
geometry = pd.DataFrame()
# for i, tr in enumerate(stream):
# if coordsys == 'lonlat':
# sta = tr.stats.station
# lat = tr.stats.sac.stla
# lon = tr.stats.sac.stlo
# dis_in_degree, az,baz = gpar.getdata.calc_Dist_Azi(refPoint[0],refPoint[1],lat,lon)
# dis_in_km = dis_in_degree * deg2km
# X = dis_in_km*np.sin(az*deg2rad)
# Y = dis_in_km*np.cos(az*deg2rad)
# newRow = {'STA':sta,'LAT':lat,'LON':lon,'X':X,'Y':Y}
# geometry = geometry.append(newRow,ignore_index=True)
# elif coordsys == 'xy':
# sta = tr.stats.station
# X = tr.stats.coordinates.x - refPoint[0]
# Y = tr.stats.coordinates.y - refPoint[1]
# newRow = {'STA':sta,'X':X,'Y':Y}
# geometry =geometry.append(newRow,ignore_index=True)
if coordsys == 'lonlat':
dis_in_degree, baz, az = gpar.getdata.calc_Dist_Azi(staDF.LAT, staDF.LON, refPoint[0], refPoint[1])
dis_in_km = dis_in_degree * deg2km
staDF['DEL'] = dis_in_degree
staDF['DIS'] = dis_in_km
RX = dis_in_degree * np.sin(az*deg2rad)
RY = dis_in_degree * np.cos(az*deg2rad)
X = dis_in_km * np.sin(az*deg2rad)
Y = dis_in_km * np.cos(az*deg2rad)
staDF['X'] = X
staDF['Y'] = Y
staDF['RX'] = RX
staDF['RY'] = RY
geometry = staDF
elif coordsys == 'xy':
X = staDF.X - refPoint[0]
Y = staDF.Y - refPoint[1]
staDF['X'] = X
staDF['Y'] = Y
geometry = staDF
else:
msg('Not a valid option, select from lonlat or xy')
gpar.log(__name__, msg, level='error', pri=True)
self.geometry = geometry
def calARF(self,tsx, tsy,
freq=1.0,sll_x=-15,sll_y=-15,
sl_s=0.1,grdpts_x=301,grdpts_y=301,
):
'''
Function to calculate certain array response for certain frequency
Parameters:
tsx: float, target x slowness, s/deg
tsy: float, target y slowness, s/deg
freq: float, target frequency in Hz
sll_x: float, minimum x slowness, s/deg
sll_y: float, minimum y slowness, s/deg
sl_s: float, increment in slowness, s/deg
grdpts_x: int, total points in x slowness
grdpts_y: ind, total points in y slowness
'''
sx = sll_x + np.arange(grdpts_x) * sl_s
sy = sll_y + np.arange(grdpts_y) * sl_s
delta_x = sx - tsx
delta_y = sy - tsy
mx = np.outer(self.geometry['RX'],delta_x)
my = np.outer(self.geometry['RY'],delta_y)
timeTable = np.require(mx[:,:,np.newaxis].repeat(grdpts_y,axis=2) +
my[:,np.newaxis,:].repeat(grdpts_x,axis=1))
tcos = np.mean(np.cos(2.0*PI*freq*timeTable),axis=0)
tsin = np.mean(np.sin(2.0*PI*freq*timeTable),axis=0)
val = 10.0*np.log10(tcos**2 + tsin**2)
arf = {'tsx':tsx, 'tsy':tsy,'freq':freq,
'sll_x':sll_x, 'sll_y':sll_y,'sl_s':sl_s,
'grdpts_x':grdpts_x,'grdpts_y':grdpts_y,
'arf':val}
self.arf = arf
def plotARF(self):
arf = self.arf
xmax = arf['sll_x'] + arf['sl_s'] * arf['grdpts_x']
ymax = arf['sll_y'] + arf['sl_s'] * arf['grdpts_y']
extent = [arf['sll_x'], xmax, arf['sll_y'], ymax]
fig, ax = plt.subplots(figsize=(8,6))
a = ax.imshow(arf['arf'], extent=extent, aspect='auto', cmap='Reds_r')
ax.set_title('Array response for %s in frequency %s\ntargeting sx=%.2f, sy=%.2f'
%(self.name, arf['freq'], arf['tsx'],arf['tsy']))
fig.colorbar(a,ax=ax)
ax.set_ylabel('Slowness Y')
ax.set_xlabel('Slowness X')
plt.show()
def getTimeTable(self,sll_x=-15.0,sll_y=-15.0,sl_s=0.1,grdpts_x=301,grdpts_y=301,unit='deg'):
"""
Return timeshift table for given array geometry, modified from obsy
"""
geometry = self.geometry
self.timeTable = getTimeTable(geometry,sll_x,sll_y,sl_s,grdpts_x,grdpts_y,unit)
def beamforming(self,filts={'filt_1':[1,2,4,True],'filt_2':[2,4,4,True],'filt_3':[1,3,4,True]},
starttime=0,winlen=1800.0,
stack='linear',unit='deg',write=True):
"""
Function to do beamforming for all earthquakes in the array
"""
for eq in self.events:
eq.beamforming(geometry=self.geometry,arrayName=self.name,starttime=starttime,
winlen=winlen,filts=filts,unit=unit,
stack=stack, write=write)
def slideBeam(self,filts={'filt_1':[1,2,4,True],'filt_2':[2,4,4,True],'filt_3':[1,3,4,True]},
grdpts_x=301,grdpts_y=301,sflag=2,stack='linear',
sll_x=-15.0,sll_y=-15.0,sl_s=0.1,refine=True,
starttime=400.0,endtime=1400.0, unit='deg',
winlen=2.0,overlap=0.5,write=False, **kwargs):
for eq in self.events:
eq.slideBeam(geometry=self.geometry,timeTable=self.timeTable,arrayName=self.name,
grdpts_x=grdpts_x,grdpts_y=grdpts_y,
filts=filts,
sflag=sflag,stack=stack,
sll_x=sll_x,sll_y=sll_y,sl_s=sl_s,refine=refine,
starttime=starttime,endtime=endtime, unit=unit,
winlen=winlen,overlap=overlap,write=write, **kwargs)
def slideFK(self, winlen=2.0, overlap=0.5, grdpts_x=301.0, grdpts_y=301.0,
sll_x=-15.0, sll_y=-15.0, sl_s=0.1, starttime=500, endtime=1000,
method=0, prewhiten=0, freqmin=1, freqmax=2, write=False, **kwargs):
for eq in self.events:
eq.slideFK(timeTable=self.timeTable, arrayName=self.name,
winlen=winlen, overlap=overlap,
sll_x=sll_x, sll_y=sll_y, sl_s=sl_s,
grdpts_x=grdpts_x, grdpts_y=grdpts_y,
freqmin=freqmin, freqmax=freqmax,
starttime=starttime, endtime=endtime,
prewhiten=prewhiten, method=method, write=write,
**kwargs)
def vespectrum(self,grdpts=401,
filts={'filt_1':[1,2,4,True],'filt_2':[2,4,4,True],'filt_3':[1,3,4,True]},
stack='linear',
sl_s=0.1, vary='slowness',sll=-20.0,
starttime=400.0,endtime=1400.0, unit='deg',
**kwargs ):
for eq in self.events:
eq.vespectrum(geometry=self.geometry,arrayName=self.name,grdpts=grdpts,
filts=filts, stack=stack,
sl_s=sl_s, vary=vary,sll=sll,
starttime=starttime,endtime=endtime, unit=unit,
**kwargs)
def write(self,fileName=None):
"""
Write instance into file (name is the same with array's name)
"""
if fileName == None:
fileName = self.name +'.pkl'
fileName = os.path.join(self.name, fileName)
msg = 'writing array instance %s as %s' % (self.name, fileName)
gpar.log(__name__,msg,level='info',pri=True)
cPickle.dump(self,open(fileName,'wb'))
class Earthquake(object):
"""
Earthquake object
"""
def __init__(self, array, row, beamphase='PKiKP',phase_list=['P','PP','PcP','ScP','PKiKP','SP','ScS']):
"""
Earthquake basic information including ray parameters for specific phase
defualt is for PKiKP
"""
# self.time = UTCDateTime(row.TIME)
# self.ID = row.DIR
# self.lat = row.LAT
# self.lon = row.LON
# self.dep = row.DEP
# self.mw = row.Mw
# self.dis = row.Del
# self.az = row.Az
# self.baz = row.Baz
# self.bb = row.BB
# self.rayp = row.Rayp
# self.takeOffAngle = row.Angle
# self._defOri()
# self._updateOri(row)
self.__dict__.update((k.lower(), v) for k,v in row.items())
self.ID = row.DIR
self.beamphase = beamphase
self.phase_list = phase_list
# self.stream = row.Stream
self.ntr = len(row.Stream)
self.delta = row.Stream[0].stats.delta
self._checkInputs()
# def _defOri(self):
# self.time = -12345
# self.ID = -12345
# self.lat = -12345
# self.lon = -12345
# self.dep = -12345
# self.mw = -12345
# self.dis = -12345
# self.az = -12345
# self.baz = -12345
# self.bb = -12345
# self.rayp = -12345
# self.takeOffAngle = -12345
def _updateOri(self, row):
self.__dict__.update((k, v) for k,v in row.items())
def _checkInputs(self):
if not isinstance(self.time, UTCDateTime):
self.time = UTCDateTime(self.time)
if not isinstance(self.stream, obspy.core.stream.Stream):
msg = ('Waveform data for %s is not stream, stop running' % self.ID)
gpar.log(__name__,msg,level='error',e='ValueError',pri=True)
def getArrival(self,phase_list=None, model='ak135'):
"""
Function to get theoritcal arrival times for the events
Parameters:
phase: str or list. Phase name for travel time calculating
model: str. Model using in taup.
"""
model = TauPyModel(model)
if phase_list == None:
phase_list = self.phase_list
if not hasattr(self, 'dep') or self.dis==-12345:
msg = "Depth or distance for %s is not defined"%self.ID
gpar.log(__name__, msg, level='error', pri=True)
arrivals = model.get_travel_times(source_depth_in_km=self.dep,distance_in_degree=self.dis,phase_list=phase_list)
phases = {}
for arr in arrivals:
pha = arr.name
times = {'UTC':self.time + arr.time,
'TT':arr.time,
'RP':arr.ray_param_sec_degree}
phases[pha] = times
self.arrivals = phases
msg = ('Travel times for %s for earthquake %s in depth of %.2f in distance of %.2f' % (phase_list, self.ID, self.dep, self.dis))
gpar.log(__name__,msg,level='info',pri=True)
def beamforming(self, geometry, arrayName, starttime=0.0, winlen=1800.0,
filts={'filt_1':[1,2,4,True],'filt_2':[2,4,4,True],'filt_3':[1,3,4,True]},
stack='linear',unit='deg',write=True,**kwargs):
"""
Function to get beamforming for the phase in self.phase. Default is for PKiKP
Only coding linear stacking now
Parameters:
-------------------
geometry: DataFrame that contains geometry infromation for the array, as returns by getGeometry
arrayName: str, name of array
starttime: float, startting time for the trace, relative to the original time
winle: window length in sec for beamforming
filt: list, filter parameters for waveform filtering
stack: str, wave to stack shifted wavefrom.
linear: linear stacking
psw: phase weight stacking
root: root mean stack
unit: str, units for the ray parameter.
deg: s/degree
km: s/km
rad: s/radian
write: boot, if True write a sac file for beamforming trace, store into the directory where the event waveforms are.
"""
# stime = time.time()
if not hasattr(self, 'rayp') or not hasattr(self, 'baz'):
msg = "Ray parameter or back azimuth is not defined for %s"%self.ID
gpar.log(__name__, msg, level='error', pri=True)
tsDF = getTimeShift(self.rayp,self.baz,geometry,unit=unit)
self.timeshift = tsDF
stalist = tsDF.STA
st = self.stream.copy()
if len(st) < len(tsDF):
for s in stalist:
tmp_st = st.select(station=s)
if len(tmp_st) == 0:
msg = 'Station %s is missing for event %s in array %s, dropping station'%(s, self.ID, arrayName)
gpar.log(__name__, msg, level='info', pri=True)
tsDF = tsDF[~(tsDF.STA == s)]
ntr = self.ntr
delta = self.delta
st.detrend('demean')
# st.detrend('demean')
DT = tsDF.TimeShift - (tsDF.TimeShift/delta).astype(int)*delta
lag = (tsDF.TimeShift/delta).astype(int)+1
tsDF['DT'] = DT
tsDF['LAG'] = lag
npt = int(winlen/delta) + 1
beamSt = obspy.core.stream.Stream()
for name, filt in filts.items():
msg = ('Calculating beamforming for earthquake %s in filter %s - %s' % (self.ID, name, filt))
gpar.log(__name__,msg,level='info',pri=True)
tmp_st = st.copy()
tmp_st.filter('bandpass',freqmin=filt[0],freqmax=filt[1],corners=filt[2],zerophase=filt[3])
beamTr = beamForming(tmp_st, tsDF, npt, starttime, stack=stack)
beamTr.stats.starttime = self.time
bpfilt = str(filt[0]) +'-'+str(filt[1])
beamTr.stats.network = 'beam'
beamTr.stats.channel = bpfilt
beamTr.stats.station = name
sac = AttribDict({'b':starttime,'e':starttime + (npt-1)*delta,
'evla':self.lat,'evlo':self.lon,'evdp':self.dep,
'delta':delta,
'nzyear':self.time.year,'nzjday':self.time.julday,
'nzhour':self.time.hour,'nzmin':self.time.minute,
'nzsec':self.time.second,'nzmsec':self.time.microsecond/1000})
beamTr.stats.sac = sac
if write:
name = 'beam.' + self.ID + '.'+stack +'.'+bpfilt+'.sac'
name = os.path.join('./',arrayName,'Data',self.ID,name)
beamTr.write(name,format='SAC')
beamSt.append(beamTr)
self.beam = beamSt
# etime = time.time()
# print(etime - stime)
def slideBeam(self,geometry,timeTable,arrayName,grdpts_x=301,grdpts_y=301,
filts={'filt_1':[1,2,4,True],'filt_2':[2,4,4,True],'filt_3':[1,3,4,True]},
sflag=1,stack='linear',
sll_x=-15.0,sll_y=-15.0,sl_s=0.3,refine=True,
starttime=400.0,endtime=1400.0, unit='deg',
winlen=2.0,overlap=0.5,write=False, **kwargs):
"""
Function to get beamforming for the phase in self.phase. Default is for PKiKP
Only coding linear stacking now
Parameters:
-------------------
geometry: DataFrame that contains geometry infromation for the array, as returns by getGeometry
timeTable: 3D numpy array that contain time shift for select slowness grids for all stations in the array,
created by getTimeTable
arrayName: str, name of array
sflag: int, options for calculating maximum value of the beamforming traces.
1: return the maximum amplitude of the traces
2: return the mean of the trace
3: return the root-mean-sqaure of the trace
filts: dict, filters parameters for waveform filtering
stack: str, wave to stack shifted wavefrom.
linear: linear stacking
psw: phase weight stacking
root: root mean stack
unit: str, units for the ray parameter.
deg: s/degree
km: s/km
rad: s/radian
grd_x: int, grid numbers for slowness in X direction
grd_y: int, gid numbers for slowness in Y direction
sll_x: float, starting point of X slowness
sll_y: float, startting point of Y slowness
sl_s: float, step size for slowness
refine: boot, if True, will do the refine frid search near the best fitting slowness with the same grid steps.
starttime: float, starttime of the time window to do slide beamforming, related to the starttime of the Streams
endtime: float, endtime of the time window to do slide beamforming, related to the starttime of the Streams
winlen: float, window size in second for the beamforming segment
overlap: float, from 0-1, the step in percent of winlen to move the beamforming time window
write: boot, if True write a sac file for beamforming trace, store into the directory where the event waveforms are.
Retrun:
Stream store in slideSt of the Class
"""
self.slideSt = {}
for name, filt in filts.items():
msg | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
r"""Importing this file includes common utility methods and base clases for
checking quantization api and properties of resulting modules.
"""
import io
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.testing._internal.common_utils import TestCase
from torch.quantization import QuantWrapper, QuantStub, DeQuantStub, \
default_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \
propagate_qconfig_, convert
from torch.quantization.default_mappings import DEFAULT_DYNAMIC_MODULE_MAPPING
def test_only_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
total, correct = 0, 0
for data, target in calib_data:
output = model(data)
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
_default_loss_fn = torch.nn.CrossEntropyLoss()
def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):
r"""
Default train function takes a torch.utils.data.Dataset and train the model
on the dataset
"""
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
for i in range(10):
model.train()
for data, target in train_data:
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
def convert_dynamic(module):
convert(module, DEFAULT_DYNAMIC_MODULE_MAPPING, inplace=True)
def prepare_dynamic(model, qconfig_dict=None):
propagate_qconfig_(model, qconfig_dict)
# QuantizationTestCase used as a base class for testing quantization on modules
class QuantizationTestCase(TestCase):
def setUp(self):
super(QuantizationTestCase, self).setUp()
self.calib_data = [(torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)) for _ in range(2)]
self.train_data = [(torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)) for _ in range(2)]
self.img_data = [(torch.rand(2, 3, 10, 10, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long))
for _ in range(2)]
def checkNoPrepModules(self, module):
r"""Checks the module does not contain child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertFalse(hasattr(module, 'quant'))
self.assertFalse(hasattr(module, 'dequant'))
def checkHasPrepModules(self, module):
r"""Checks the module contains child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertTrue(hasattr(module, 'module'))
self.assertTrue(hasattr(module, 'quant'))
self.assertTrue(hasattr(module, 'dequant'))
def checkObservers(self, module):
r"""Checks the module or module's leaf descendants
have observers in preperation for quantization
"""
if hasattr(module, 'qconfig') and module.qconfig is not None and \
len(module._modules) == 0 and not isinstance(module, torch.nn.Sequential):
self.assertTrue(hasattr(module, 'activation_post_process'),
'module: ' + str(type(module)) + ' do not have observer')
for child in module.children():
self.checkObservers(child)
def checkQuantDequant(self, mod):
r"""Checks that mod has nn.Quantize and
nn.DeQuantize submodules inserted
"""
self.assertEqual(type(mod.quant), nnq.Quantize)
self.assertEqual(type(mod.dequant), nnq.DeQuantize)
def checkWrappedQuantizedLinear(self, mod):
r"""Checks that mod has been swapped for an nnq.Linear
module, the bias is qint32, and that the module
has Quantize and DeQuantize submodules
"""
self.assertEqual(type(mod.module), nnq.Linear)
self.checkQuantDequant(mod)
def checkQuantizedLinear(self, mod):
self.assertEqual(type(mod), nnq.Linear)
def checkDynamicQuantizedLinear(self, mod, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
self.assertEqual(type(mod), nnqd.Linear)
self.assertEqual(mod._packed_params.dtype, dtype)
def checkLinear(self, mod):
self.assertEqual(type(mod), torch.nn.Linear)
# calib_data follows the same schema as calib_data for
# test_only_eval_fn, i.e. (input iterable, output iterable)
def checkScriptable(self, orig_mod, calib_data, check_save_load=False):
scripted = torch.jit.script(orig_mod)
self._checkScriptable(orig_mod, scripted, calib_data, check_save_load)
# Use first calib_data entry as trace input
#
# TODO: Trace checking is blocked on this issue:
# https://github.com/pytorch/pytorch/issues/23986
#
# Once that's resolved we can remove `check_trace=False`
traced = torch.jit.trace(orig_mod, calib_data[0][0], check_trace=False)
self._checkScriptable(orig_mod, traced, calib_data, check_save_load)
# Call this twice: once for a scripted module and once for a traced module
def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load):
self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data)
# Test save/load
buffer = io.BytesIO()
torch.jit.save(script_mod, buffer)
buffer.seek(0)
loaded_mod = torch.jit.load(buffer)
# Pending __get_state_ and __set_state__ support
# See tracking task https://github.com/pytorch/pytorch/issues/23984
if check_save_load:
self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data)
def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data):
for (inp, _) in calib_data:
ref_output = orig_mod(inp)
scripted_output = test_mod(inp)
self.assertEqual(scripted_output, ref_output)
# Below are a series of neural net models to use in testing quantization
# Single layer models
class SingleLayerLinearModel(torch.nn.Module):
def __init__(self):
super(SingleLayerLinearModel, self).__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class AnnotatedSingleLayerLinearModel(torch.nn.Module):
def __init__(self):
super(AnnotatedSingleLayerLinearModel, self).__init__()
self.qconfig = default_qconfig
self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def forward(self, x):
x = self.fc1(x)
return x
class SingleLayerLinearDynamicModel(torch.nn.Module):
def __init__(self):
super(SingleLayerLinearDynamicModel, self).__init__()
self.qconfig = default_qconfig
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class LSTMDynamicModel(torch.nn.Module):
def __init__(self):
super(LSTMDynamicModel, self).__init__()
self.qconfig = default_qconfig
self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x):
x = self.lstm(x)
return x
class ConvModel(torch.nn.Module):
def __init__(self):
super(ConvModel, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
class AnnotatedConvModel(torch.nn.Module):
def __init__(self):
super(AnnotatedConvModel, self).__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class ConvBnModel(torch.nn.Module):
def __init__(self):
super(ConvBnModel, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class AnnotatedConvBnModel(torch.nn.Module):
def __init__(self):
super(AnnotatedConvBnModel, self).__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.dequant(x)
return x
class TwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super(TwoLayerLinearModel, self).__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class AnnotatedTwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super(AnnotatedTwoLayerLinearModel, self).__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float))
self.fc2.qconfig = torch.quantization.get_default_qconfig("fbgemm")
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class LinearReluModel(torch.nn.Module):
def __init__(self):
super(LinearReluModel, self).__init__()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
class NestedModel(torch.nn.Module):
def __init__(self):
super(NestedModel, self).__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedNestedModel(torch.nn.Module):
def __init__(self):
super(AnnotatedNestedModel, self).__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc1.qconfig = default_per_channel_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedSubNestedModel(torch.nn.Module):
def __init__(self):
super(AnnotatedSubNestedModel, self).__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedCustomConfigNestedModel(torch.nn.Module):
def __init__(self):
super(AnnotatedCustomConfigNestedModel, self).__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
custom_options = {
'dtype': torch.quint8,
'qscheme': torch.per_tensor_affine
}
custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options),
weight=default_weight_observer)
self.sub2.fc1.qconfig = custom_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class QuantSubModel(torch.nn.Module):
def __init__(self):
super(QuantSubModel, self).__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.sub2.qconfig = default_qconfig
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.fc3.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class InnerModule(torch.nn.Module):
def __init__(self):
super(InnerModule, self).__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
return self.relu(self.fc2(self.relu(self.fc1(x))))
class SkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self):
super(SkipQuantModel, self).__init__()
self.qconfig = default_qconfig
self.sub = QuantWrapper(InnerModule())
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
# don't quantize this fc
self.fc.qconfig = None
def forward(self, x):
return self.fc(self.sub(x))
class QuantStubModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self):
super(QuantStubModel, self).__init__()
self.qconfig = torch.quantization.get_default_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc(x)
return self.dequant(x)
class ManualLinearQATModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self):
super(ManualLinearQATModel, self).__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig("fbgemm")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class ManualConvLinearQATModel(torch.nn.Module):
r"""A module with manually inserted `QuantStub` and `DeQuantStub`
and contains both linear and conv modules
"""
def __init__(self):
super(ManualConvLinearQATModel, self).__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(-1, 64).contiguous()
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class SubModelForFusion(nn.Module):
def __init__(self):
super(SubModelForFusion, self).__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class SubModelWithoutFusion(nn.Module):
def __init__(self):
super(SubModelWithoutFusion, self).__init__()
| |
<filename>app/clients/YouTubePlaylistsHandler.py
from urllib.parse import urlencode
import requests
import pandas as pd
from typing import Dict
import googleapiclient.discovery
import google.oauth2.credentials
from flask import session
import os
# -----------------------------------------------------------
# Данный класс позволяет получать основную информацию о каждом видео
# из неприватного плейлиста YouTube в формате pd.DataFrame:
# номер видео в плейлисте, ссылку на видео, ссылку на изображение превью видео,
# название видео, описание видео, ник автора видео.
# Класс использует один API сервис:
# YouTube Data API v3
# (https://developers.google.com/youtube/v3/getting-started)
# для работы с PlaylistItems - получение информации о видео из плейлиста
# (https://developers.google.com/youtube/v3/docs/playlistItems)
# и для работы с Playlists - проверка доступности плейлиста
# (https://developers.google.com/youtube/v3/docs/playlists)
# (Бесплатное использование c ограничениями 10,000 запросов в день.
# Доступ по api-key.)
# -----------------------------------------------------------
class YouTubePlaylistsHandler:
""" Класс получения основной информации о плейлисте. """
class CannotGetError(Exception):
""" Класс исключения, информирующий о том,
что невозможно получить информацию о плейлисте. """
message = \
'Нельзя получить информацию о данном плейлисте:' \
' он не существует или имеет приватные настройки доступа.' \
' <br>' \
'Также проверьте правильность youtube-api-key.'
class UndefinedError(Exception):
""" Класс исключения, информирующий о том,
что невозможно связаться с API. """
message = \
'Не можем связаться с API для работы с youtube: ' \
' <br>' \
' проверьте свой api-key,' \
' попробуйте повторить свои действия или подождать.'
@staticmethod
def get_playlist_id(playlist_url_or_id: str) -> str:
"""
Функция получения id плейлиста по ссылке.
:param playlist_url_or_id: ссылка или id плейлиста
:return: id плейлиста
"""
playlist_id = playlist_url_or_id
if 'youtube' in playlist_url_or_id:
playlist_id = playlist_url_or_id.split('=')[-1]
return playlist_id
def __init__(self, youtube_api_key: str, client_secret=None):
"""
:param youtube_api_key: api-key для доступа к YouTube Data API v3
:param client_secret: путь к файлу, где лежит client_secret для OAuth
"""
self.youtube_api_key = youtube_api_key
self.client_secret = client_secret
self.df_searcher = None # класс для поиска по плейлисту
self.work_playlist_id = None # id плейлиста с которым работает класс
@staticmethod
def yt_api_key_from_file(api_key_file_path: str, client_secret=None) \
-> 'YouTubePlaylistsHandler':
"""
Функция инициализации класса через путь к файлу, где лежит api-key.
:param api_key_file_path: путь к файлу, где лежит api-key
:param client_secret: путь к файлу, где лежит client_secret для OAuth
:return: YouTubePlaylistsHandler
"""
with open(api_key_file_path) as f:
youtube_api_key = f.read()
if client_secret is not None and not os.path.exists(client_secret):
client_secret = None
return YouTubePlaylistsHandler(youtube_api_key=youtube_api_key,
client_secret=client_secret)
def __call__(self, playlist_url_or_id: str, oauth=False) -> pd.DataFrame:
"""
Основная функция получения информации о видео в плейлисте.
:param playlist_url_or_id: ссылка или id плейлиста
:param oauth: авторизирован ли пользователь (default False)
:return: pd.DataFrame с основной информацией,
о каждом видео из плейлиста:
'ind' - номера видео в плейлисте
'url' - ссылки на видео
'img_url' - ссылки на изображения превью видео
'title' - названия видео
'description' - описания видео
'author_url' - ссылки на авторов видео
'author' - ники авторов видео
"""
# получение id плейлиста
playlist_id = self.get_playlist_id(playlist_url_or_id)
if oauth:
# пользователь авторизован
data_frame = self._oauth_get_all_data_frame(playlist_id)
else:
# пользователь не авторизован
self._check_playlist(playlist_id) # проверка доступности плейлиста
data_frame = self._get_all_data_frame(playlist_id)
# обновляем id плейлиста, с которым работает класс
self.work_playlist_id = playlist_id
# таблица pd.DataFrame с информации о видео в плейлисте
return data_frame
def _check_playlist(self, playlist_id: str) -> None:
"""
Проверка доступности плейлиста с помощью Playlists.
:param playlist_id: id плейлиста
:return вызывает ошибку, если плейлист недоступен
"""
url = 'https://www.googleapis.com/youtube/v3/playlists'
params = {
'id': playlist_id,
'part': 'status',
'key': self.youtube_api_key
}
req_url = url + '?' + urlencode(params) # делаем ссылку
response = requests.get(req_url)
# невозможно получить доступ
if response.status_code != 200 \
or response.json()['pageInfo']['totalResults'] == 0:
raise self.CannotGetError
def _get_all_data_frame(self, playlist_id: str) -> pd.DataFrame:
"""
Функция получения основной информации
о каждом видео из плейлиста в формате pd.DataFrame.
:param playlist_id: id плейлиста
:return: pd.DataFrame с основной информацией,
о каждом видео из плейлиста
"""
status_ok = True
# создаем словарь информации
info = self._create_info()
# получение первой страницы
response = self._get_page_response(playlist_id)
# сбор информации с первой страницы
status_ok &= self._get_info(info, response)
# проход по всем страницами плейлиста
while 'nextPageToken' in response.json():
# получение номера следующей страницы
page_token = response.json()['nextPageToken']
response = self._get_page_response(playlist_id, page_token)
status_ok &= self._get_info(info, response)
if not status_ok: # что-то пошло не так
raise self.UndefinedError
# получаем таблицу pd.DataFrame
return self._info_to_data_frame(info)
def _get_page_response(self, playlist_id: str,
page_token=None) -> 'Response':
"""
Функция получения ответа от PlaylistItems
- информация о 50 видео на n "странице" плейлиста.
(API позволяет получить максимально 50 видео,
храня все в связном списке из страниц)
:param playlist_id: id плейлиста
:param page_token: "<PASSWORD>" страницы (default None - первая страница)
:return: Response от playlistItems с информацией о 50 видео
"""
url = 'https://www.googleapis.com/youtube/v3/playlistItems'
params = {
'playlistId': playlist_id,
'part': 'snippet, status',
'maxResults': 50,
'key': self.youtube_api_key
}
if page_token is not None:
params['pageToken'] = page_token
req_url = url + '?' + urlencode(params) # делаем ссылку
return requests.get(req_url)
@staticmethod
def _create_info() -> Dict:
"""
Функция создания словаря,
где будут храниться списки информации о каждом видео.
:return: словарь с информацией о каждом видео
"""
return {
'curr': 0, # текущий номер в плейлисте
'indexes': [], # номера видео в плейлисте
'urls': [], # ссылки на видео
'img_urls': [], # ссылки на изображения превью видео
'titles': [], # названия видео
'descriptions': [], # описания видео
'author_urls': [], # ссылка на автора видео
'authors': [] # ники авторов видео
}
def _get_info(self, info, response) -> bool:
"""
Функция получения основной информации,
о каждом видео из ответа PlaylistItems.
:param info: словарь, где хранятся списки информации о каждом видео
:param response: ответ от PlaylistItems
:return: прошло ли все хорошо или произошла ошибка
"""
if response.status_code != 200: # невозможно получить ответ
return False
items = response.json()['items']
for i in range(len(items)):
# текущий номер в плейлисте
info['curr'] += 1
status = items[i]['status']['privacyStatus']
# видео удалено или имеет приватные настройки доступа
if status != 'public' and status != 'unlisted':
continue
# номер видео в плейлисте
info['indexes'].append(info['curr'])
# подробная информация об одном видео
item = items[i]['snippet']
# зполнение словаря с информацией
self._fill_info(info, item)
return True
@staticmethod
def _fill_info(info: Dict, item: Dict) -> None:
"""
Функция заполнения подробной информации от одного видео.
:param info: словарь, где хранятся списки информации о каждом видео
:param item: подробная информация
об одном видео из ответа PlaylistItems
"""
video_id = item['resourceId']['videoId']
# ссылка на видео
video_url = f'https://www.youtube.com/watch?v={video_id}'
info['urls'].append(video_url)
# ссылка на изображение превью видео
video_img_url = f'https://img.youtube.com/vi/{video_id}/0.jpg'
info['img_urls'].append(video_img_url)
# название видео
info['titles'].append(item['title'])
# описание видео
info['descriptions'].append(item['description'])
# ссылка на автора видео
author_id = item['videoOwnerChannelId']
author_url = f'https://www.youtube.com/channel/{author_id}'
info['author_urls'].append(author_url)
# ник автора видео
info['authors'].append(item['videoOwnerChannelTitle'])
@staticmethod
def _info_to_data_frame(info: Dict):
"""
Функция создания pd.DataFrame с основной информацией
о каждом видео из плейлиста на снове словаря info
:param info: словарь, где хранятся списки информации о каждом видео
:return: pd.DataFrame
"""
return pd.DataFrame(
{
# номера видео в плейлисте
'ind': info['indexes'],
# ссылки на видео
'url': info['urls'],
# ссылки на изображения превью видео
'img_url': info['img_urls'],
# названия видео
'title': info['titles'],
# описания видео
'description': info['descriptions'],
# ссылки на авторов видео
'author_url': info['author_urls'],
# ники авторов видео
'author': info['authors']
}
)
# -----------------------------------------------------------
# Функции и классы для работы с OAuth.
# (https://developers.google.com/youtube/v3/guides/auth/server-side-web-apps)
# Для работы надо провести настройку в консоле API.
# Подробнее написано в README.md.
# -----------------------------------------------------------
class OAuthUndefinedError(Exception):
""" Класс исключения, информирующий о том,
что невозможно связаться с OAuth или API. """
message = \
' Что-то пошло не так:' \
' проверьте, что Вы правильно настроили OAuth' \
' и имеете доступ к плейлисту.'
def _oauth_get_all_data_frame(self, playlist_id: str) -> pd.DataFrame:
"""
Функция - аналог функции _get_all_data_frame, но для работы с OAuth.
Функция получения основной информации
о каждом видео из плейлиста в формате pd.DataFrame.
:param playlist_id: id плейлиста
:return: pd.DataFrame с основной информацией,
о каждом видео из плейлиста
"""
# Загружаем учетные данные из сеанса.
credentials = \
google.oauth2.credentials.Credentials(**session['credentials'])
# Класс для работы с запросами.
self.youtube = googleapiclient.discovery.build(
'youtube',
'v3',
credentials=credentials)
try:
# создаем словарь информации
info = self._create_info()
# получение первой страницы для авторизованного пользователя
response = self._oauth_get_page_response(playlist_id)
# сбор информации с первой страницы
# для авторизованного пользователя
self._oauth_get_info(info, response)
# проход по всем страницами плейлиста
while 'nextPageToken' in response.execute():
# получение номера следующей страницы
page_token = response.execute()['nextPageToken']
response = self._oauth_get_page_response(playlist_id,
page_token)
self._oauth_get_info(info, response)
# что-то пошло не так
except googleapiclient.errors.HttpError:
raise self.OAuthUndefinedError
# получаем | |
from django.conf import settings
from django.http import HttpResponse
from django.template import TemplateDoesNotExist, TemplateSyntaxError, Context, RequestContext
from mako.exceptions import TopLevelLookupException, TemplateLookupException, CompileException, SyntaxException, html_error_template
from mako.lookup import TemplateLookup
from mako.template import Template
import mako.runtime
from .exceptions import RedirectException
from .signals import dmp_signal_pre_render_template, dmp_signal_post_render_template, dmp_signal_redirect_exception
from .util import get_dmp_instance, log, DMP_OPTIONS
import io
import logging
import mimetypes
import os
import os.path
import sys
class DMPTemplateLookup(TemplateLookup):
'''Small extension to Mako's template lookup to provide a link back to the MakoTemplateLoader'''
def __init__(self, template_loader, *args, **kwargs):
super(DMPTemplateLookup, self).__init__(*args, **kwargs)
self.template_loader = template_loader
class MakoTemplateLoader(object):
'''Finds Mako templates for a Django app.'''
def __init__(self, app_path, template_subdir='templates'):
'''
The loader looks in the app_path/templates directory unless
the template_subdir parameter overrides this default.
You should not normally create this object because it bypasses
the DMP cache. Instead, call get_template_loader() or
get_template_loader_for_path().
'''
self.app_path = app_path
# calculate the template directory and check that it exists
if template_subdir is None: # None skips adding the template_subdir
self.template_dir = os.path.abspath(app_path)
else:
self.template_dir = os.path.abspath(os.path.join(app_path, template_subdir))
# I used to check for the existence of the template dir here, but it caused error
# checking at engine load time (too soon). I now wait until get_template() is called,
# which fails with a TemplateDoesNotExist exception if the template_dir doesn't exist.
# calculate the cache root and template search directories
self.cache_root = os.path.join(self.template_dir, DMP_OPTIONS['TEMPLATES_CACHE_DIR'])
self.template_search_dirs = [ self.template_dir ]
self.template_search_dirs.extend(DMP_OPTIONS['TEMPLATES_DIRS'])
# Mako doesn't allow parent directory inheritance, such as <%inherit file="../../otherapp/templates/base.html"/>
# including the project base directory allows this through "absolute" like <%inherit file="/otherapp/templates/base.html"/>
# (note the leading slash, which means BASE_DIR)
self.template_search_dirs.append(settings.BASE_DIR)
# create the actual Mako TemplateLookup, which does the actual work
self.tlookup = DMPTemplateLookup(self, directories=self.template_search_dirs, imports=DMP_OPTIONS['RUNTIME_TEMPLATE_IMPORTS'], module_directory=self.cache_root, collection_size=2000, filesystem_checks=settings.DEBUG, input_encoding=DMP_OPTIONS['DEFAULT_TEMPLATE_ENCODING'])
def get_template(self, template, def_name=None):
'''Retrieve a *Django* API template object for the given template name, using the app_path and template_subdir
settings in this object. This method still uses the corresponding Mako template and engine, but it
gives a Django API wrapper around it so you can use it the same as any Django template.
If def_name is provided, template rendering will be limited to the named def/block (see Mako docs).
This method corresponds to the Django templating system API.
A Django exception is raised if the template is not found or cannot compile.
'''
try:
# wrap the mako template in an adapter that gives the Django template API
return MakoTemplateAdapter(self.get_mako_template(template), def_name)
except (TopLevelLookupException, TemplateLookupException) as e: # Mako exception raised
raise TemplateDoesNotExist('Template "%s" not found in search path: %s.' % (template, self.template_search_dirs))
except (CompileException, SyntaxException) as e: # Mako exception raised
raise TemplateSyntaxError('Template "%s" raised an error: %s' % (template, e))
def get_mako_template(self, template, force=False):
'''Retrieve the real *Mako* template object for the given template name without any wrapper,
using the app_path and template_subdir settings in this object.
This method is an alternative to get_template(). Use it when you need the actual Mako template object.
This method raises a Mako exception if the template is not found or cannot compile.
If force is True, an empty Mako template will be created when the file does not exist.
This option is used by the providers part of DMP and normally be left False.
'''
if template is None:
raise TemplateLookupException('Template "%s" not found in search path: %s.' % (template, self.template_search_dirs))
# get the template
try:
template_obj = self.tlookup.get_template(template)
except TemplateLookupException:
if not force:
raise
template_obj = Template('', filename=os.path.join(self.template_dir, template))
# get the template
return template_obj
class MakoTemplateAdapter(object):
'''A thin wrapper for a Mako template object that provides the Django API methods.'''
def __init__(self, mako_template, def_name=None):
'''
Creates an adapter that corresponds to the Django API.
If def_name is provided, template rendering will be limited to the named def/block (see Mako docs).
This can also be provided in the call to render().
'''
self.mako_template = mako_template
self.def_name = def_name
@property
def engine(self):
'''Returns the DMP engine (method required by Django specs)'''
return get_dmp_instance()
def render(self, context=None, request=None, def_name=None):
'''
Renders a template using the Mako system. This method signature conforms to
the Django template API, which specifies that template.render() returns a string.
@context A dictionary of name=value variables to send to the template page. This can be a real dictionary
or a Django Context object.
@request The request context from Django. If this is None, any TEMPLATE_CONTEXT_PROCESSORS defined in your settings
file will be ignored but the template will otherwise render fine.
@def_name Limits output to a specific top-level Mako <%block> or <%def> section within the template.
If the section is a <%def>, any parameters must be in the context dictionary. For example,
def_name="foo" will call <%block name="foo"></%block> or <%def name="foo()"></def> within
the template.
Returns the rendered template as a unicode string.
The method triggers two signals:
1. dmp_signal_pre_render_template: you can (optionally) return a new Mako Template object from a receiver to replace
the normal template object that is used for the render operation.
2. dmp_signal_post_render_template: you can (optionally) return a string to replace the string from the normal
template object render.
'''
# set up the context dictionary, which is the variables available throughout the template
context_dict = {}
# if request is None, add some default items because the context processors won't happen
if request is None:
context_dict['settings'] = settings
context_dict['STATIC_URL'] = settings.STATIC_URL
# let the context_processors add variables to the context.
if not isinstance(context, Context):
context = Context(context) if request is None else RequestContext(request, context)
with context.bind_template(self):
for d in context:
context_dict.update(d)
context_dict.pop('self', None) # some contexts have self in them, and it messes up render_unicode below because we get two selfs
# send the pre-render signal
if DMP_OPTIONS['SIGNALS'] and request is not None:
for receiver, ret_template_obj in dmp_signal_pre_render_template.send(sender=self, request=request, context=context, template=self.mako_template):
if ret_template_obj is not None:
if isinstance(ret_template_obj, MakoTemplateAdapter):
self.mako_template = ret_template_obj.mako_template # if the signal function sends a MakoTemplateAdapter back, use the real mako template inside of it
else:
self.mako_template = ret_template_obj # if something else, we assume it is a mako.template.Template, so use it as the template
# do we need to limit down to a specific def?
# this only finds within the exact template (won't go up the inheritance tree)
# I wish I could make it do so, but can't figure this out
render_obj = self.mako_template
if def_name is None:
def_name = self.def_name
if def_name: # do we need to limit to just a def?
render_obj = self.mako_template.get_def(def_name)
# PRIMARY FUNCTION: render the template
template_name = '%s::%s' % (self.mako_template.filename or 'string', def_name or 'body')
if log.isEnabledFor(logging.INFO):
log.info('rendering template %s', template_name)
if settings.DEBUG:
try:
content = render_obj.render_unicode(**context_dict)
except Exception as e:
log.exception('exception raised during template rendering: %s', e) # to the console
content = html_error_template().render_unicode() # to the browser
else: # this is outside the above "try" loop because in non-DEBUG mode, we want to let the exception throw out of here (without having to re-raise it)
content = render_obj.render_unicode(**context_dict)
# send the post-render signal
if DMP_OPTIONS['SIGNALS'] and request is not None:
for receiver, ret_content in dmp_signal_post_render_template.send(sender=self, request=request, context=context, template=self.mako_template, content=content):
if ret_content is not None:
content = ret_content # sets it to the last non-None return in the signal receiver chain
# return
return content
def render_to_response(self, context=None, request=None, def_name=None, content_type=None, status=None, charset=None):
'''
Renders the template and returns an HttpRequest object containing its content.
This method returns a django.http.Http404 exception if the template is not found.
If the template raises a django_mako_plus.RedirectException, the browser is redirected to
the given page, and a new request from the browser restarts the entire DMP routing process.
If the template raises a django_mako_plus.InternalRedirectException, the entire DMP
routing process is restarted internally (the browser doesn't see the redirect).
@request The request context from Django. If this is None, any TEMPLATE_CONTEXT_PROCESSORS defined in your settings
file will be ignored but the template will otherwise render fine.
@template The template file path to render. This is relative to the app_path/controller_TEMPLATES_DIR/ directory.
For example, to | |
"""
xml_file.py
Copyright 2006 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import sys
import time
import base64
import jinja2
import subprocess32 as subprocess
import lz4.frame
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from unicodedata import category
from tempfile import NamedTemporaryFile
from functools import wraps
import w3af.core.data.kb.config as cf
import w3af.core.data.kb.knowledge_base as kb
import w3af.core.controllers.output_manager as om
from w3af import ROOT_PATH
from w3af.core.controllers.plugins.output_plugin import OutputPlugin
from w3af.core.controllers.misc import get_w3af_version
from w3af.core.controllers.exceptions import DBException
from w3af.core.controllers.misc.temp_dir import get_temp_dir
from w3af.core.data.db.url_tree import URLTree
from w3af.core.data.db.history import HistoryItem, TraceReadException
from w3af.core.data.db.disk_list import DiskList
from w3af.core.data.options.opt_factory import opt_factory
from w3af.core.data.options.option_types import OUTPUT_FILE
from w3af.core.data.options.option_list import OptionList
from w3af.core.data.misc.encoding import smart_str_ignore, smart_unicode
from w3af.core.data.misc.dotdict import dotdict
from w3af.core.data.constants.encodings import DEFAULT_ENCODING
TIME_FORMAT = '%a %b %d %H:%M:%S %Y'
TEMPLATE_ROOT = os.path.join(ROOT_PATH, 'plugins/output/xml_file/')
def took(func):
"""
A decorator that will print how long a function was running
to the debug output. This is useful for measuring performance
in production.
"""
@wraps(func)
def func_wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
spent = time.time() - start
# Log things which take more than 0.5 seconds
if spent > 0.5:
msg = '[xml_file.flush()] %s took %.2f seconds to run.'
function_name = func.__name__
args = (function_name, spent)
om.out.debug(msg % args)
return result
return func_wrapper
class xml_file(OutputPlugin):
"""
Print all messages to a xml file.
:author: <NAME> (<EMAIL>)
"""
XML_OUTPUT_VERSION = '2.8'
def __init__(self):
OutputPlugin.__init__(self)
# User configured parameters
self._file_name = '~/report.xml'
self._timestamp = str(int(time.time()))
self._long_timestamp = str(time.strftime(TIME_FORMAT, time.localtime()))
# Set defaults for scan metadata
self._plugins_dict = {}
self._options_dict = {}
self._scan_targets = None
# Keep internal state
self._is_working = False
self._jinja2_env = self._get_jinja2_env()
# List with additional xml elements
self._errors = DiskList()
def do_nothing(self, *args, **kwds):
pass
debug = information = vulnerability = console = log_http = do_nothing
def error(self, message, new_line=True):
"""
This method is called from the output object. The output object was
called from a plugin or from the framework. This method should take an
action for error messages.
"""
#
# Note that while the call to "get_caller()" is costly, it only happens
# when an error occurs, so it shouldn't impact performance
#
error_data = (message, self.get_caller())
self._errors.append(error_data)
def set_options(self, option_list):
"""
Sets the Options given on the OptionList to self. The options are the
result of a user entering some data on a window that was constructed
using the XML Options that was retrieved from the plugin using
get_options()
This method MUST be implemented on every plugin.
:return: No value is returned.
"""
self._file_name = option_list['output_file'].get_value()
def get_options(self):
"""
:return: A list of option objects for this plugin.
"""
ol = OptionList()
d = 'Output file name where to write the XML data'
o = opt_factory('output_file', self._file_name, d, OUTPUT_FILE)
ol.add(o)
return ol
def log_enabled_plugins(self, plugins_dict, options_dict):
"""
This method is called from the output manager object. This method should
take an action for the enabled plugins and their configuration. Usually,
write the info to a file or print it somewhere.
:param plugins_dict: A dict with all the plugin types and the enabled
plugins for that type of plugin.
:param options_dict: A dict with the options for every plugin.
"""
# See doc for _log_enabled_plugins_to_xml to understand why we don't write
# to the XML just now.
self._plugins_dict = plugins_dict
self._options_dict = options_dict
def end(self):
"""
This method is called when the scan has finished.
"""
self.flush()
# Free some memory and disk space
self._plugins_dict = {}
self._options_dict = {}
self._scan_targets = None
self._errors.cleanup()
self._jinja2_env = None
def flush(self):
"""
Write the XML to the output file
:return: None
"""
# Create the cache path
CachedXMLNode.create_cache_path()
FindingsCache.create_cache_path()
# Create the context
context = dotdict({})
try:
self._add_scan_status_to_context(context)
except RuntimeError, rte:
# In some very strange scenarios we get this error:
#
# Can NOT call get_run_time before start()
#
# Just "ignore" this call to flush and write the XML in the next call
msg = 'xml_file.flush() failed to add scan status to context: "%s"'
om.out.debug(msg % rte)
return
self._add_root_info_to_context(context)
self._add_scan_info_to_context(context)
self._add_findings_to_context(context)
self._add_errors_to_context(context)
# Write to file
self._write_context_to_file(context)
@took
def _add_root_info_to_context(self, context):
context.start_timestamp = self._timestamp
context.start_time_long = self._long_timestamp
context.xml_version = self.XML_OUTPUT_VERSION
context.w3af_version = get_w3af_version.get_w3af_version()
@took
def _add_scan_info_to_context(self, context):
if self._scan_targets is None:
self._scan_targets = ','.join([t.url_string for t in cf.cf.get('targets')])
scan_info = ScanInfo(self._jinja2_env,
self._scan_targets,
self._plugins_dict,
self._options_dict)
context.scan_info = scan_info.to_string()
@took
def _add_scan_status_to_context(self, context):
om.out.debug('[xml_file.flush()] _add_scan_status_to_context() start')
status = self.get_w3af_core().status.get_status_as_dict()
om.out.debug('[xml_file.flush()] _add_scan_status_to_context() read status')
all_known_urls = kb.kb.get_all_known_urls()
total_urls = len(all_known_urls)
om.out.debug('[xml_file.flush()] _add_scan_status_to_context() read total_urls')
known_urls = self._get_known_urls(all_known_urls)
om.out.debug('[xml_file.flush()] _add_scan_status_to_context() read generated URLTree')
scan_status = ScanStatus(self._jinja2_env, status, total_urls, known_urls)
context.scan_status = scan_status.to_string()
om.out.debug('[xml_file.flush()] _add_scan_status_to_context() rendered')
def _get_known_urls(self, all_known_urls):
"""
This method calls kb.get_all_known_urls() to retrieve the URLs,
then it structures them into a tree which has some helper methods
to allow us to easily print them using jinja2 templates.
:return: A URLTree instance
"""
url_tree = URLTree()
for url in all_known_urls:
url_tree.add_url(url)
return url_tree
@took
def _add_errors_to_context(self, context):
context.errors = self._errors
def findings(self):
"""
A small generator that queries the findings cache and yields all the
findings so they get written to the XML.
:yield: Strings representing the findings as XML
"""
cache = FindingsCache()
cached_nodes = cache.list()
processed_uniq_ids = []
om.out.debug('[xml_file.flush()] Starting findings()')
start = time.time()
#
# This for loop is a performance improvement which should yield
# really good results, taking into account that get_all_uniq_ids_iter
# will only query the DB and yield IDs, without doing any of the
# CPU-intensive cPickle.loads() done in get_all_findings_iter()
# which we do below.
#
# Ideally, we're only doing a cPickle.loads() once for each finding
# the rest of the calls to flush() will load the finding from the
# cache in this loop, and use the exclude_ids to prevent cached
# entries from being queried
#
# What this for loop also guarantees is that we're not simply
# reading all the items from the cache and putting them into the XML,
# which would be incorrect because some items are modified in the
# KB (which changes their uniq id)
#
for uniq_id in kb.kb.get_all_uniq_ids_iter(include_ids=cached_nodes):
node = cache.get_node_from_cache(uniq_id)
# cached_nodes can be (), this means that get_all_uniq_ids_iter()
# will return *all* findings, some might not be in the cache. When
# that happens, the cache returns None
if node is not None:
yield node
processed_uniq_ids.append(uniq_id)
msg = '[xml_file.flush()] findings() processed %s cached nodes in %.2f seconds'
spent = time.time() - start
args = (len(processed_uniq_ids), spent)
om.out.debug(msg % args)
start = time.time()
#
# This for loop is getting all the new findings that w3af has found
# In this context "new" means that the findings are not in the cache
#
new_findings = 0
for finding in kb.kb.get_all_findings_iter(exclude_ids=cached_nodes):
uniq_id = finding.get_uniq_id()
processed_uniq_ids.append(uniq_id)
node = Finding(self._jinja2_env, finding).to_string()
cache.save_finding_to_cache(uniq_id, node)
new_findings += 1
yield node
msg = '[xml_file.flush()] findings() processed %s new findings in %.2f seconds'
spent = time.time() - start
args = (new_findings, spent)
om.out.debug(msg % args)
start = time.time()
#
# Now that we've finished processing all the new findings we can
# evict the findings that were removed from the KB from the cache
#
evicted_findings = 0
for cached_finding in cached_nodes:
if cached_finding not in processed_uniq_ids:
cache.evict_from_cache(cached_finding)
evicted_findings += 1
msg = '[xml_file.flush()] findings() evicted %s findings from cache in %.2f seconds'
spent = time.time() - start
args = (evicted_findings, spent)
om.out.debug(msg % args)
@took
def _add_findings_to_context(self, context):
context.findings = (f for f in self.findings())
def _get_jinja2_env(self):
"""
Creates the jinja2 environment which will be | |
at least one NA will be skipped completely.
If the test dataset has missing values, then those predictors are omitted in the probability
calculation during prediction.
"""
if algo == "pca":
return """
Principal component analysis of an H2O data frame
Principal components analysis of an H2O data frame using the power method
to calculate the singular value decomposition of the Gram matrix.
"""
if algo == "svd":
return """
Singular value decomposition of an H2O data frame using the power method
"""
if algo == "word2vec":
return """
Trains a word2vec model on a String column of an H2O data frame
"""
if algo == "coxph":
return """
Trains a Cox Proportional Hazards Model (CoxPH) on an H2O dataset
"""
if algo == "isolationforest":
return """
Trains an Isolation Forest model
"""
if algo == "generic":
return """
Imports a generic model into H2O. Such model can be used then used for scoring and obtaining
additional information about the model. The imported model has to be supported by H2O.
"""
def help_details_for(algo):
if algo == "naivebayes":
return """The naive Bayes classifier assumes independence between predictor variables conditional
on the response, and a Gaussian distribution of numeric predictors with mean and standard
deviation computed from the training dataset. When building a naive Bayes classifier,
every row in the training dataset that contains at least one NA will be skipped completely.
If the test dataset has missing values, then those predictors are omitted in the probability
calculation during prediction."""
def help_return_for(algo):
if algo == "drf":
return "#' @return Creates a \linkS4class{H2OModel} object of the right type."
if algo == "glm":
return """#' @return A subclass of \code{\linkS4class{H2OModel}} is returned. The specific subclass depends on the machine
#' learning task at hand (if it's binomial classification, then an \code{\linkS4class{H2OBinomialModel}} is
#' returned, if it's regression then a \code{\linkS4class{H2ORegressionModel}} is returned). The default print-
#' out of the models is shown, but further GLM-specifc information can be queried out of the object. To access
#' these various items, please refer to the seealso section below. Upon completion of the GLM, the resulting
#' object has coefficients, normalized coefficients, residual/null deviance, aic, and a host of model metrics
#' including MSE, AUC (for logistic regression), degrees of freedom, and confusion matrices. Please refer to the
#' more in-depth GLM documentation available here:
#' \\url{https://h2o-release.s3.amazonaws.com/h2o-dev/rel-shannon/2/docs-website/h2o-docs/index.html#Data+Science+Algorithms-GLM}
"""
if algo == "kmeans":
return "#' @return Returns an object of class \linkS4class{H2OClusteringModel}."
if algo == "naivebayes":
return """#' @return Returns an object of class \linkS4class{H2OBinomialModel} if the response has two categorical levels,
#' and \linkS4class{H2OMultinomialModel} otherwise."""
if algo in ["glrm", "pca", "svd"]:
return "#' @return Returns an object of class \linkS4class{H2ODimReductionModel}."
def help_epilogue_for(algo):
if algo == "glm":
return """\code{\link{predict.H2OModel}} for prediction, \code{\link{h2o.mse}}, \code{\link{h2o.auc}}, \code{\link{h2o.confusionMatrix}}, \code{\link{h2o.performance}}, \code{\link{h2o.giniCoef}}, \code{\link{h2o.logloss}}, \code{\link{h2o.varimp}}, \code{\link{h2o.scoreHistory}}"""
if algo == "glrm":
return """\code{\link{h2o.kmeans}, \link{h2o.svd}}, \code{\link{h2o.prcomp}}"""
if algo == "kmeans":
return """\code{\link{h2o.cluster_sizes}}, \code{\link{h2o.totss}}, \code{\link{h2o.num_iterations}}, \code{\link{h2o.betweenss}}, \code{\link{h2o.tot_withinss}}, \code{\link{h2o.withinss}}, \code{\link{h2o.centersSTD}}, \code{\link{h2o.centers}}"""
if algo == "pca":
return """\code{\link{h2o.svd}}, \code{\link{h2o.glrm}}"""
if algo in ["deeplearning", "drf", "gbm"]:
return """\code{\link{predict.H2OModel}} for prediction"""
def help_references_for(algo):
if algo == "glrm":
return """<NAME>, <NAME>, <NAME>, <NAME> (2014). {Generalized Low Rank Models}[http://arxiv.org/abs/1410.0342]. Unpublished manuscript, Stanford Electrical Engineering Department
#' <NAME>, <NAME>, <NAME>. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[http://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011."""
if algo in ["svd", "pca"]:
return """<NAME>, <NAME>, <NAME>. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[http://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011."""
def help_example_for(algo):
if algo == "aggregator":
return """\donttest{
library(h2o)
h2o.init()
df <- h2o.createFrame(rows=100, cols=5, categorical_fraction=0.6, integer_fraction=0,
binary_fraction=0, real_range=100, integer_range=100, missing_fraction=0)
target_num_exemplars=1000
rel_tol_num_exemplars=0.5
encoding="Eigen"
agg <- h2o.aggregator(training_frame=df,
target_num_exemplars=target_num_exemplars,
rel_tol_num_exemplars=rel_tol_num_exemplars,
categorical_encoding=encoding)
}"""
if algo == "deeplearning":
return """\donttest{
library(h2o)
h2o.init()
iris_hf <- as.h2o(iris)
iris_dl <- h2o.deeplearning(x = 1:4, y = 5, training_frame = iris_hf, seed=123456)
# now make a prediction
predictions <- h2o.predict(iris_dl, iris_hf)
}"""
if algo == "gbm":
return """\donttest{
library(h2o)
h2o.init()
# Run regression GBM on australia data
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
independent <- c("premax", "salmax","minairtemp", "maxairtemp", "maxsst",
"maxsoilmoist", "Max_czcs")
dependent <- "runoffnew"
h2o.gbm(y = dependent, x = independent, training_frame = australia,
ntrees = 3, max_depth = 3, min_rows = 2)
}"""
if algo == "glm":
return """\donttest{
h2o.init()
# Run GLM of CAPSULE ~ AGE + RACE + PSA + DCAPS
prostate_path = system.file("extdata", "prostate.csv", package = "h2o")
prostate = h2o.importFile(path = prostate_path)
h2o.glm(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"), training_frame = prostate,
family = "binomial", nfolds = 0, alpha = 0.5, lambda_search = FALSE)
# Run GLM of VOL ~ CAPSULE + AGE + RACE + PSA + GLEASON
predictors = setdiff(colnames(prostate), c("ID", "DPROS", "DCAPS", "VOL"))
h2o.glm(y = "VOL", x = predictors, training_frame = prostate, family = "gaussian",
nfolds = 0, alpha = 0.1, lambda_search = FALSE)
# GLM variable importance
# Also see:
# https://github.com/h2oai/h2o/blob/master/R/tests/testdir_demos/runit_demo_VI_all_algos.R
bank = h2o.importFile(
path = "https://s3.amazonaws.com/h2o-public-test-data/smalldata/demos/bank-additional-full.csv")
predictors = 1:20
target="y"
glm = h2o.glm(x=predictors, y=target, training_frame=bank, family="binomial", standardize=TRUE,
lambda_search=TRUE)
h2o.std_coef_plot(glm, num_of_features = 20)
}"""
if algo == "glrm":
return """\donttest{
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.glrm(training_frame = australia, k = 5, loss = "Quadratic", regularization_x = "L1",
gamma_x = 0.5, gamma_y = 0, max_iterations = 1000)
}"""
if algo == "kmeans":
return """\donttest{
library(h2o)
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
h2o.kmeans(training_frame = prostate, k = 10, x = c("AGE", "RACE", "VOL", "GLEASON"))
}"""
if algo == "naivebayes":
return """\donttest{
h2o.init()
votes_path <- system.file("extdata", "housevotes.csv", package = "h2o")
votes <- h2o.uploadFile(path = votes_path, header = TRUE)
h2o.naiveBayes(x = 2:17, y = 1, training_frame = votes, laplace = 3)
}"""
if algo == "pca":
return """\donttest{
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.prcomp(training_frame = australia, k = 8, transform = "STANDARDIZE")
}"""
if algo == "svd":
return """\donttest{
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.svd(training_frame = australia, nv = 8)
}"""
if algo == "stackedensemble":
return """
# See example R code here:
# http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
"""
if algo == "generic":
return """\dontrun{
# library(h2o)
# h2o.init()
# generic_model <- h2o.genericModel("/path/to/model.zip")
# predictions <- h2o.predict(generic_model, dataset)
}"""
def get_extra_params_for(algo):
if algo == "glrm":
return "training_frame, cols = NULL"
elif algo in ["deeplearning", "deepwater", "xgboost", "drf", "gbm", "glm", "naivebayes", "stackedensemble"]:
return "x, y, training_frame"
elif algo == "svd":
return "training_frame, x, destination_key"
elif algo == "word2vec":
return "training_frame = NULL"
elif algo == "coxph":
return "x, event_column, training_frame"
elif algo == "generic":
return ""
else:
return "training_frame, x"
def help_extra_params_for(algo):
if algo == "glrm":
return "#' @param cols (Optional) A vector containing the data columns on which k-means operates."
elif algo in ["deeplearning", "deepwater", "xgboost", "drf", "gbm", "glm", "naivebayes"]:
x_string = """#' @param x (Optional) A vector containing the names or indices of the predictor variables to use in building the model.
#' If x is missing, then all columns except y are used."""
elif algo == "coxph":
return """#' @param x (Optional) A vector containing the names or indices of the predictor variables to use in building the model.
#' If x is missing, then all columns except event_column, start_column and stop_column are used.
#' @param event_column The name of binary data column in the training frame indicating the occurrence of an event."""
elif algo == "stackedensemble":
x_string = """#' @param x (Optional). A vector containing the names or indices of the predictor variables to use in building the model.
#' If x is missing, then all columns except y are used. Training frame is used only to compute ensemble training metrics. """
elif algo == "svd":
return """#' @param x A vector containing the \code{character} names of the predictors | |
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
transitions.append(fac.Transition(st_7, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
]))
transitions.append(fac.Transition(st_9, [
]))
transitions.append(fac.Transition(st_10, [
]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_5, False) ]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_6, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_6, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_7, True) ]))
st_10._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Variant'), CTD_ANON_2, scope=CTD_ANON_, location=pyxb.utils.utility.Location('ClaML.xsd', 48, 4)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Variant')), pyxb.utils.utility.Location('ClaML.xsd', 44, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_._Automaton = _BuildAutomaton_()
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_2._Automaton = _BuildAutomaton_2()
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_5._Automaton = _BuildAutomaton_3()
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Author'), CTD_ANON_7, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('ClaML.xsd', 80, 4)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 76, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Author')), pyxb.utils.utility.Location('ClaML.xsd', 76, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_6._Automaton = _BuildAutomaton_4()
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_7._Automaton = _BuildAutomaton_5()
CTD_ANON_8._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClassKind'), CTD_ANON_11, scope=CTD_ANON_8, location=pyxb.utils.utility.Location('ClaML.xsd', 106, 4)))
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_8._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ClassKind')), pyxb.utils.utility.Location('ClaML.xsd', 88, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_8._Automaton = _BuildAutomaton_6()
CTD_ANON_9._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RubricKind'), CTD_ANON_38, scope=CTD_ANON_9, location=pyxb.utils.utility.Location('ClaML.xsd', 114, 4)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_9._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'RubricKind')), pyxb.utils.utility.Location('ClaML.xsd', 95, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_9._Automaton = _BuildAutomaton_7()
CTD_ANON_10._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UsageKind'), CTD_ANON_12, scope=CTD_ANON_10, location=pyxb.utils.utility.Location('ClaML.xsd', 130, 4)))
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_10._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'UsageKind')), pyxb.utils.utility.Location('ClaML.xsd', 102, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_10._Automaton = _BuildAutomaton_8()
CTD_ANON_11._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Display'), CTD_ANON_13, scope=CTD_ANON_11, location=pyxb.utils.utility.Location('ClaML.xsd', 136, 4)))
def _BuildAutomaton_9 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_9
del _BuildAutomaton_9
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 109, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_11._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Display')), pyxb.utils.utility.Location('ClaML.xsd', 109, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_11._Automaton = _BuildAutomaton_9()
def _BuildAutomaton_10 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_10
del _BuildAutomaton_10
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_13._Automaton = _BuildAutomaton_10()
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_11 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_11
del _BuildAutomaton_11
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 145, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 146, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 147, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 148, 16))
counters.add(cc_3)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 145, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SubClass')), pyxb.utils.utility.Location('ClaML.xsd', 146, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Rubric')), pyxb.utils.utility.Location('ClaML.xsd', 147, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 148, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_14._Automaton = _BuildAutomaton_11()
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), CTD_ANON_22, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 242, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_12 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_12
del _BuildAutomaton_12
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 157, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 159, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 160, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 161, 16))
counters.add(cc_3)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 157, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SuperClass')), pyxb.utils.utility.Location('ClaML.xsd', 158, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SubClass')), pyxb.utils.utility.Location('ClaML.xsd', 159, 16))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Rubric')), pyxb.utils.utility.Location('ClaML.xsd', 160, 16))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 161, 16))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_15._Automaton = _BuildAutomaton_12()
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ModifiedBy'), CTD_ANON_39, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 186, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExcludeModifier'), CTD_ANON_17, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 205, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), CTD_ANON_22, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 242, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_13 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_13
del _BuildAutomaton_13
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 172, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 173, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 174, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 175, 16))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 176, 16))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 177, 16))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 178, 16))
counters.add(cc_6)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, | |
<gh_stars>10-100
import os
import time
from pathlib import Path
import numpy as np
np.warnings.filterwarnings('ignore')
import flopy
import pandas as pd
fm = flopy.modflow
from flopy.modflow import Modflow
from mfsetup.bcs import (
remove_inactive_bcs,
setup_flopy_stress_period_data,
setup_ghb_data,
)
from mfsetup.discretization import (
deactivate_idomain_above,
find_remove_isolated_cells,
make_ibound,
)
from mfsetup.fileio import (
add_version_to_fileheader,
flopy_mf2005_load,
load,
load_cfg,
save_array,
)
from mfsetup.ic import setup_strt
from mfsetup.lakes import (
make_bdlknc2d,
make_bdlknc_zones,
setup_lake_fluxes,
setup_lake_info,
setup_lake_tablefiles,
)
from mfsetup.mfmodel import MFsetupMixin
from mfsetup.obs import read_observation_data, setup_head_observations
from mfsetup.oc import parse_oc_period_input
from mfsetup.tdis import get_parent_stress_periods, setup_perioddata_group
from mfsetup.tmr import TmrNew
from mfsetup.units import convert_length_units, itmuni_text, lenuni_text
from mfsetup.utils import get_input_arguments, get_packages
from .wells import setup_wel_data
class MFnwtModel(MFsetupMixin, Modflow):
"""Class representing a MODFLOW-NWT model"""
default_file = '/mfnwt_defaults.yml'
def __init__(self, parent=None, cfg=None,
modelname='model', exe_name='mfnwt',
version='mfnwt', model_ws='.',
external_path='external/', **kwargs):
defaults = {'parent': parent,
'modelname': modelname,
'exe_name': exe_name,
'version': version,
'model_ws': model_ws,
'external_path': external_path,
}
# load configuration, if supplied
if cfg is not None:
if not isinstance(cfg, dict):
cfg = self.load_cfg(cfg)
cfg = self._parse_model_kwargs(cfg)
defaults.update(cfg['model'])
kwargs = {k: v for k, v in kwargs.items() if k not in defaults}
# otherwise, pass arguments on to flopy constructor
args = get_input_arguments(defaults, Modflow,
exclude='packages')
Modflow.__init__(self, **args, **kwargs)
#Modflow.__init__(self, modelname, exe_name=exe_name, version=version,
# model_ws=model_ws, external_path=external_path,
# **kwargs)
MFsetupMixin.__init__(self, parent=parent)
# default configuration
self._package_setup_order = ['dis', 'bas6', 'upw', 'rch', 'oc',
'chd', 'ghb', 'lak', 'sfr', 'riv', 'wel', 'mnw2',
'gag', 'hyd']
# set up the model configuration dictionary
# start with the defaults
self.cfg = load(self.source_path + self.default_file) # '/mf6_defaults.yml')
self.relative_external_paths = self.cfg.get('model', {}).get('relative_external_paths', True)
# set the model workspace and change working directory to there
self.model_ws = self._get_model_ws(cfg=cfg)
# update defaults with user-specified config. (loaded above)
# set up and validate the model configuration dictionary
self._set_cfg(cfg)
# set the list file path
self.lst.file_name = [self.cfg['model']['list_filename_fmt'].format(self.name)]
# property arrays
self._ibound = None
def __repr__(self):
return MFsetupMixin.__repr__(self)
@property
def nlay(self):
return self.cfg['dis'].get('nlay', 1)
@property
def length_units(self):
return lenuni_text[self.cfg['dis']['lenuni']]
@property
def time_units(self):
return itmuni_text[self.cfg['dis']['itmuni']]
@property
def ipakcb(self):
"""By default write everything to one cell budget file."""
return self.cfg['upw'].get('ipakcb', 53)
@property
def ibound(self):
"""3D array indicating which cells will be included in the simulation.
Made a property so that it can be easily updated when any packages
it depends on change.
"""
if self._ibound is None and 'BAS6' in self.get_package_list():
self._set_ibound()
return self._ibound
def _set_ibound(self):
"""Remake the idomain array from the source data,
no data values in the top and bottom arrays, and
so that cells above SFR reaches are inactive."""
ibound_from_layer_elevations = make_ibound(self.dis.top.array,
self.dis.botm.array,
nodata=self._nodata_value,
minimum_layer_thickness=self.cfg['dis'].get(
'minimum_layer_thickness', 1),
#drop_thin_cells=self._drop_thin_cells,
tol=1e-4)
# include cells that are active in the existing idomain array
# and cells inactivated on the basis of layer elevations
ibound = (self.bas6.ibound.array > 0) & (ibound_from_layer_elevations >= 1)
ibound = ibound.astype(int)
# remove cells that conincide with lakes
ibound[self.isbc == 1] = 0.
# remove cells that are above stream cells
if self.get_package('sfr') is not None:
ibound = deactivate_idomain_above(ibound, self.sfr.reach_data)
# remove cells that are above ghb cells
if self.get_package('ghb') is not None:
ibound = deactivate_idomain_above(ibound, self.ghb.stress_period_data[0])
# inactivate any isolated cells that could cause problems with the solution
ibound = find_remove_isolated_cells(ibound, minimum_cluster_size=20)
self._ibound = ibound
# re-write the input files
self._setup_array('bas6', 'ibound', resample_method='nearest',
data={i: arr for i, arr in enumerate(ibound)},
datatype='array3d', write_fmt='%d', dtype=int)
self.bas6.ibound = self.cfg['bas6']['ibound']
def _set_parent(self):
"""Set attributes related to a parent or source model
if one is specified."""
if self.cfg['parent'].get('version') == 'mf6':
raise NotImplementedError("MODFLOW-6 parent models")
kwargs = self.cfg['parent'].copy()
if kwargs is not None:
kwargs = kwargs.copy()
kwargs['f'] = kwargs.pop('namefile')
# load only specified packages that the parent model has
packages_in_parent_namefile = get_packages(os.path.join(kwargs['model_ws'],
kwargs['f']))
load_only = list(set(packages_in_parent_namefile).intersection(
set(self.cfg['model'].get('packages', set()))))
kwargs['load_only'] = load_only
kwargs = get_input_arguments(kwargs, fm.Modflow.load, warn=False)
print('loading parent model {}...'.format(os.path.join(kwargs['model_ws'],
kwargs['f'])))
t0 = time.time()
self._parent = fm.Modflow.load(**kwargs)
print("finished in {:.2f}s\n".format(time.time() - t0))
# parent model units
if 'length_units' not in self.cfg['parent']:
self.cfg['parent']['length_units'] = lenuni_text[self.parent.dis.lenuni]
if 'time_units' not in self.cfg['parent']:
self.cfg['parent']['time_units'] = itmuni_text[self.parent.dis.itmuni]
# set the parent model grid from mg_kwargs if not None
# otherwise, convert parent model grid to MFsetupGrid
mg_kwargs = self.cfg['parent'].get('SpatialReference',
self.cfg['parent'].get('modelgrid', None))
self._set_parent_modelgrid(mg_kwargs)
# parent model perioddata
if not hasattr(self.parent, 'perioddata'):
kwargs = {}
kwargs['start_date_time'] = self.cfg['parent'].get('start_date_time',
self.cfg['model'].get('start_date_time',
'1970-01-01'))
kwargs['nper'] = self.parent.nper
kwargs['model_time_units'] = self.cfg['parent']['time_units']
for var in ['perlen', 'steady', 'nstp', 'tsmult']:
kwargs[var] = self.parent.dis.__dict__[var].array
kwargs = get_input_arguments(kwargs, setup_perioddata_group)
kwargs['oc_saverecord'] = {}
self._parent.perioddata = setup_perioddata_group(**kwargs)
# default_source_data, where omitted configuration input is
# obtained from parent model by default
# Set default_source_data to True by default if it isn't specified
if self.cfg['parent'].get('default_source_data') is None:
self.cfg['parent']['default_source_data'] = True
if self.cfg['parent'].get('default_source_data'):
self._parent_default_source_data = True
if self.cfg['dis'].get('nlay') is None:
self.cfg['dis']['nlay'] = self.parent.dis.nlay
parent_start_date_time = self.cfg.get('parent', {}).get('start_date_time')
if self.cfg['dis'].get('start_date_time', '1970-01-01') == '1970-01-01' and parent_start_date_time is not None:
self.cfg['dis']['start_date_time'] = self.cfg['parent']['start_date_time']
if self.cfg['dis'].get('nper') is None:
self.cfg['dis']['nper'] = self.parent.dis.nper
parent_periods = get_parent_stress_periods(self.parent, nper=self.cfg['dis']['nper'],
parent_stress_periods=self.cfg['parent']['copy_stress_periods'])
for var in ['perlen', 'nstp', 'tsmult', 'steady']:
if self.cfg['dis'].get(var) is None:
self.cfg['dis'][var] = self.parent.dis.__dict__[var].array[parent_periods]
def _update_grid_configuration_with_dis(self):
"""Update grid configuration with any information supplied to dis package
(so that settings specified for DIS package have priority). This method
is called by MFsetupMixin.setup_grid.
"""
for param in ['nrow', 'ncol', 'delr', 'delc']:
if param in self.cfg['dis']:
self.cfg['setup_grid'][param] = self.cfg['dis'][param]
def setup_dis(self):
""""""
package = 'dis'
print('\nSetting up {} package...'.format(package.upper()))
t0 = time.time()
# resample the top from the DEM
if self.cfg['dis']['remake_top']:
self._setup_array(package, 'top', datatype='array2d',
resample_method='linear',
write_fmt='%.2f')
# make the botm array
self._setup_array(package, 'botm', datatype='array3d',
resample_method='linear',
write_fmt='%.2f')
# put together keyword arguments for dis package
kwargs = self.cfg['grid'].copy() # nrow, ncol, delr, delc
kwargs.update(self.cfg['dis']) # nper, nlay, etc.
kwargs = get_input_arguments(kwargs, fm.ModflowDis)
# we need flopy to read the intermediate files
# (it will write the files in cfg)
lmult = convert_length_units('meters', self.length_units)
kwargs.update({'top': self.cfg['intermediate_data']['top'][0],
'botm': self.cfg['intermediate_data']['botm'],
'nper': self.nper,
'delc': self.modelgrid.delc * lmult,
'delr': self.modelgrid.delr * lmult
})
for arg in ['perlen', 'nstp', 'tsmult', 'steady']:
kwargs[arg] = self.perioddata[arg].values
dis = fm.ModflowDis(model=self, **kwargs)
self._perioddata = None # reset perioddata
#if not isinstance(self._modelgrid, MFsetupGrid):
# self._modelgrid = None # override DIS package grid setup
self.setup_grid() # reset the model grid
self._reset_bc_arrays()
#self._isbc = None # reset BC property arrays
print("finished in {:.2f}s\n".format(time.time() - t0))
return dis
def setup_tdis(self):
"""Calls the _set_perioddata, to establish time discretization. Only purpose
is to conform to same syntax as mf6 for MFsetupMixin.setup_from_yaml()
"""
self._set_perioddata()
def setup_bas6(self):
""""""
package = 'bas6'
print('\nSetting up {} package...'.format(package.upper()))
t0 = time.time()
kwargs = self.cfg[package]
kwargs['source_data_config'] = kwargs['source_data']
kwargs['filename_fmt'] = kwargs['strt_filename_fmt']
kwargs['write_fmt'] = kwargs['strt_write_fmt']
# make the starting heads array
strt = setup_strt(self, package, **kwargs)
# initial ibound input for creating a bas6 package instance
self._setup_array(package, 'ibound', datatype='array3d', write_fmt='%d',
resample_method='nearest',
dtype=int)
kwargs = get_input_arguments(self.cfg['bas6'], fm.ModflowBas)
kwargs['strt'] = strt
bas = fm.ModflowBas(model=self, **kwargs)
print("finished in {:.2f}s\n".format(time.time() - t0))
self._set_ibound()
return bas
def setup_oc(self):
package = 'oc'
print('\nSetting up {} package...'.format(package.upper()))
t0 = time.time()
#stress_period_data = {}
#for i, r in self.perioddata.iterrows():
# stress_period_data[(r.per, r.nstp -1)] = r.oc
# use stress_period_data if supplied
# (instead of period_input defaults)
if 'stress_period_data' in self.cfg['oc']:
del self.cfg['oc']['period_options']
kwargs = self.cfg['oc']
period_input = parse_oc_period_input(kwargs, nstp=self.perioddata.nstp,
output_fmt='mfnwt')
kwargs.update(period_input)
kwargs = get_input_arguments(kwargs, fm.ModflowOc)
oc = fm.ModflowOc(model=self, **kwargs)
print("finished in {:.2f}s\n".format(time.time() - t0))
return oc
def setup_rch(self):
package = 'rch'
print('\nSetting up {} package...'.format(package.upper()))
t0 = time.time()
# make the rech array
self._setup_array(package, 'rech', datatype='transient2d',
resample_method='linear',
write_fmt='%.6e',
write_nodata=0.)
# create flopy package instance
kwargs = self.cfg['rch']
kwargs['ipakcb'] = self.ipakcb
kwargs = get_input_arguments(kwargs, fm.ModflowRch)
rch = fm.ModflowRch(model=self, **kwargs)
print("finished in {:.2f}s\n".format(time.time() - t0))
return rch
def setup_upw(self):
"""
"""
package = 'upw'
print('\nSetting up {} package...'.format(package.upper()))
t0 = time.time()
hiKlakes_value = float(self.cfg['parent'].get('hiKlakes_value', 1e4))
# copy transient variables if they were included in config file
# defaults are hard coded to arrays in parent model priority
# over config file values, in the case that ss and sy weren't entered
hk = self.cfg['upw'].get('hk')
vka = self.cfg['upw'].get('vka')
default_sy = 0.1
default_ss = 1e-6
# Determine which hk, vka to use
# load parent upw if it's needed and not loaded
source_package = package
if np.any(np.array([hk, vka]) == None) and \
'UPW' not in self.parent.get_package_list() and \
'LPF' not in self.parent.get_package_list():
for ext, pckgcls in {'upw': fm.ModflowUpw,
'lpf': fm.ModflowLpf,
}.items():
pckgfile = '{}/{}.{}'.format(self.parent.model_ws, self.parent.name, package)
if os.path.exists(pckgfile):
upw = | |
<reponame>silentsokolov/dagster<filename>python_modules/dagster/dagster/core/definitions/job_definition.py
from functools import update_wrapper
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Dict,
FrozenSet,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
import dagster._check as check
from dagster.core.definitions.composition import MappedInputPlaceholder
from dagster.core.definitions.dependency import (
DependencyDefinition,
DynamicCollectDependencyDefinition,
IDependencyDefinition,
MultiDependencyDefinition,
Node,
NodeHandle,
NodeInvocation,
SolidOutputHandle,
)
from dagster.core.definitions.events import AssetKey
from dagster.core.definitions.node_definition import NodeDefinition
from dagster.core.definitions.policy import RetryPolicy
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
DagsterInvalidSubsetError,
)
from dagster.core.selector.subset_selector import (
AssetSelectionData,
LeafNodeSelection,
OpSelectionData,
parse_op_selection,
)
from dagster.core.utils import str_format_set
from dagster.utils import merge_dicts
from .asset_layer import AssetLayer, build_asset_selection_job
from .config import ConfigMapping
from .executor_definition import ExecutorDefinition
from .graph_definition import GraphDefinition, SubselectedGraphDefinition
from .hook_definition import HookDefinition
from .logger_definition import LoggerDefinition
from .metadata import RawMetadataValue
from .mode import ModeDefinition
from .partition import PartitionSetDefinition, PartitionedConfig, PartitionsDefinition
from .pipeline_definition import PipelineDefinition
from .preset import PresetDefinition
from .resource_definition import ResourceDefinition
from .run_request import RunRequest
from .version_strategy import VersionStrategy
if TYPE_CHECKING:
from dagster.core.execution.execute_in_process_result import ExecuteInProcessResult
from dagster.core.instance import DagsterInstance
from dagster.core.snap import PipelineSnapshot
class JobDefinition(PipelineDefinition):
def __init__(
self,
graph_def: GraphDefinition,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
executor_def: Optional[ExecutorDefinition] = None,
logger_defs: Optional[Dict[str, LoggerDefinition]] = None,
config_mapping: Optional[ConfigMapping] = None,
partitioned_config: Optional[PartitionedConfig] = None,
name: Optional[str] = None,
description: Optional[str] = None,
preset_defs: Optional[List[PresetDefinition]] = None,
tags: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, RawMetadataValue]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
op_retry_policy: Optional[RetryPolicy] = None,
version_strategy: Optional[VersionStrategy] = None,
_subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]] = None,
asset_layer: Optional[AssetLayer] = None,
_input_values: Optional[Mapping[str, object]] = None,
):
# Exists for backcompat - JobDefinition is implemented as a single-mode pipeline.
mode_def = ModeDefinition(
resource_defs=resource_defs,
logger_defs=logger_defs,
executor_defs=[executor_def] if executor_def else None,
_config_mapping=config_mapping,
_partitioned_config=partitioned_config,
)
self._cached_partition_set: Optional["PartitionSetDefinition"] = None
self._subset_selection_data = check.opt_inst_param(
_subset_selection_data,
"_subset_selection_data",
(OpSelectionData, AssetSelectionData),
)
self._input_values: Mapping[str, object] = check.opt_mapping_param(
_input_values, "_input_values"
)
for input_name in sorted(list(self._input_values.keys())):
if not graph_def.has_input(input_name):
job_name = name or graph_def.name
raise DagsterInvalidDefinitionError(
f"Error when constructing JobDefinition '{job_name}': Input value provided for key '{input_name}', but job has no top-level input with that name."
)
super(JobDefinition, self).__init__(
name=name,
description=description,
mode_defs=[mode_def],
preset_defs=preset_defs,
tags=tags,
metadata=metadata,
hook_defs=hook_defs,
solid_retry_policy=op_retry_policy,
graph_def=graph_def,
version_strategy=version_strategy,
asset_layer=asset_layer,
)
@property
def target_type(self) -> str:
return "job"
@property
def is_job(self) -> bool:
return True
def describe_target(self):
return f"{self.target_type} '{self.name}'"
@property
def executor_def(self) -> ExecutorDefinition:
return self.get_mode_definition().executor_defs[0]
@property
def resource_defs(self) -> Mapping[str, ResourceDefinition]:
return self.get_mode_definition().resource_defs
@property
def partitioned_config(self) -> Optional[PartitionedConfig]:
return self.get_mode_definition().partitioned_config
@property
def config_mapping(self) -> Optional[ConfigMapping]:
return self.get_mode_definition().config_mapping
@property
def loggers(self) -> Mapping[str, LoggerDefinition]:
return self.get_mode_definition().loggers
def execute_in_process(
self,
run_config: Optional[Dict[str, Any]] = None,
instance: Optional["DagsterInstance"] = None,
partition_key: Optional[str] = None,
raise_on_error: bool = True,
op_selection: Optional[List[str]] = None,
asset_selection: Optional[List[AssetKey]] = None,
run_id: Optional[str] = None,
input_values: Optional[Mapping[str, object]] = None,
) -> "ExecuteInProcessResult":
"""
Execute the Job in-process, gathering results in-memory.
The `executor_def` on the Job will be ignored, and replaced with the in-process executor.
If using the default `io_manager`, it will switch from filesystem to in-memory.
Args:
run_config (Optional[Dict[str, Any]]:
The configuration for the run
instance (Optional[DagsterInstance]):
The instance to execute against, an ephemeral one will be used if none provided.
partition_key: (Optional[str])
The string partition key that specifies the run config to execute. Can only be used
to select run config for jobs with partitioned config.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``.
op_selection (Optional[List[str]]): A list of op selection queries (including single op
names) to execute. For example:
* ``['some_op']``: selects ``some_op`` itself.
* ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).
* ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
* ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its
ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.
Returns:
:py:class:`~dagster.ExecuteInProcessResult`
"""
from dagster.core.definitions.executor_definition import execute_in_process_executor
from dagster.core.execution.execute_in_process import core_execute_in_process
run_config = check.opt_dict_param(run_config, "run_config")
op_selection = check.opt_list_param(op_selection, "op_selection", str)
asset_selection = check.opt_list_param(asset_selection, "asset_selection", AssetKey)
check.invariant(
not (op_selection and asset_selection),
"op_selection and asset_selection cannot both be provided as args to execute_in_process",
)
partition_key = check.opt_str_param(partition_key, "partition_key")
input_values = check.opt_mapping_param(input_values, "input_values")
# Combine provided input values at execute_in_process with input values
# provided to the definition. Input values provided at
# execute_in_process will override those provided on the definition.
input_values = merge_dicts(self._input_values, input_values)
resource_defs = dict(self.resource_defs)
logger_defs = dict(self.loggers)
ephemeral_job = JobDefinition(
name=self._name,
graph_def=self._graph_def,
resource_defs=_swap_default_io_man(resource_defs, self),
executor_def=execute_in_process_executor,
logger_defs=logger_defs,
hook_defs=self.hook_defs,
config_mapping=self.config_mapping,
partitioned_config=self.partitioned_config,
tags=self.tags,
op_retry_policy=self._solid_retry_policy,
version_strategy=self.version_strategy,
asset_layer=self.asset_layer,
_input_values=input_values,
)
ephemeral_job = ephemeral_job.get_job_def_for_subset_selection(
op_selection, frozenset(asset_selection) if asset_selection else None
)
tags = None
if partition_key:
if not self.partitioned_config:
check.failed(
f"Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config"
)
check.invariant(
not run_config,
"Cannot provide both run_config and partition_key arguments to `execute_in_process`",
)
partition_set = self.get_partition_set_def()
if not partition_set:
check.failed(
f"Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config"
)
partition = partition_set.get_partition(partition_key)
run_config = partition_set.run_config_for_partition(partition)
tags = partition_set.tags_for_partition(partition)
return core_execute_in_process(
node=self._graph_def,
ephemeral_pipeline=ephemeral_job,
run_config=run_config,
instance=instance,
output_capturing_enabled=True,
raise_on_error=raise_on_error,
run_tags=tags,
run_id=run_id,
asset_selection=frozenset(asset_selection),
)
@property
def op_selection_data(self) -> Optional[OpSelectionData]:
return (
self._subset_selection_data
if isinstance(self._subset_selection_data, OpSelectionData)
else None
)
@property
def asset_selection_data(self) -> Optional[AssetSelectionData]:
return (
self._subset_selection_data
if isinstance(self._subset_selection_data, AssetSelectionData)
else None
)
def get_job_def_for_subset_selection(
self,
op_selection: Optional[List[str]] = None,
asset_selection: Optional[FrozenSet[AssetKey]] = None,
):
check.invariant(
not (op_selection and asset_selection),
"op_selection and asset_selection cannot both be provided as args to execute_in_process",
)
if op_selection:
return self._get_job_def_for_op_selection(op_selection)
if asset_selection: # asset_selection:
return self._get_job_def_for_asset_selection(asset_selection)
else:
return self
def _get_job_def_for_asset_selection(
self,
asset_selection: Optional[FrozenSet[AssetKey]] = None,
) -> "JobDefinition":
asset_selection = check.opt_set_param(asset_selection, "asset_selection", AssetKey)
for asset in asset_selection:
nonexistent_assets = [
asset for asset in asset_selection if asset not in self.asset_layer.asset_keys
]
nonexistent_asset_strings = [
asset_str
for asset_str in (asset.to_string() for asset in nonexistent_assets)
if asset_str
]
if nonexistent_assets:
raise DagsterInvalidSubsetError(
"Assets provided in asset_selection argument "
f"{', '.join(nonexistent_asset_strings)} do not exist in parent asset group or job."
)
asset_selection_data = AssetSelectionData(
asset_selection=asset_selection,
parent_job_def=self,
)
check.invariant(
self.asset_layer.assets_defs_by_key is not None,
"Asset layer must have _asset_defs argument defined",
)
new_job = build_asset_selection_job(
name=self.name,
assets=set(self.asset_layer.assets_defs_by_key.values()),
source_assets=self.asset_layer.source_assets_by_key.values(),
executor_def=self.executor_def,
resource_defs=self.resource_defs,
description=self.description,
tags=self.tags,
asset_selection=asset_selection,
asset_selection_data=asset_selection_data,
)
return new_job
def _get_job_def_for_op_selection(
self,
op_selection: Optional[List[str]] = None,
) -> "JobDefinition":
if not op_selection:
return self
op_selection = check.opt_list_param(op_selection, "op_selection", str)
resolved_op_selection_dict = parse_op_selection(self, op_selection)
try:
sub_graph = get_subselected_graph_definition(self.graph, resolved_op_selection_dict)
return JobDefinition(
name=self.name,
description=self.description,
resource_defs=dict(self.resource_defs),
logger_defs=dict(self.loggers),
executor_def=self.executor_def,
config_mapping=self.config_mapping,
partitioned_config=self.partitioned_config,
preset_defs=self.preset_defs,
tags=self.tags,
hook_defs=self.hook_defs,
op_retry_policy=self._solid_retry_policy,
graph_def=sub_graph,
version_strategy=self.version_strategy,
_subset_selection_data=OpSelectionData(
op_selection=op_selection,
resolved_op_selection=set(
resolved_op_selection_dict.keys()
), # equivalent to solids_to_execute. currently only gets top level nodes.
parent_job_def=self, # used by pipeline snapshot lineage
),
# TODO: subset this structure.
# https://github.com/dagster-io/dagster/issues/7541
asset_layer=self.asset_layer,
)
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(resolved_op_selection_dict)} for graph "
f"{self.graph.name} results in an invalid graph."
) from exc
def get_partition_set_def(self) -> Optional["PartitionSetDefinition"]:
mode = self.get_mode_definition()
if not mode.partitioned_config:
return None
if not self._cached_partition_set:
tags_fn = mode.partitioned_config.tags_for_partition_fn
if not tags_fn:
tags_fn = lambda _: {}
self._cached_partition_set = PartitionSetDefinition(
job_name=self.name,
name=f"{self.name}_partition_set",
partitions_def=mode.partitioned_config.partitions_def,
run_config_fn_for_partition=mode.partitioned_config.run_config_for_partition_fn,
tags_fn_for_partition=tags_fn,
mode=mode.name,
)
return self._cached_partition_set
@property
def partitions_def(self) -> Optional[PartitionsDefinition]:
mode = self.get_mode_definition()
if not mode.partitioned_config:
return None
return mode.partitioned_config.partitions_def
def run_request_for_partition(self, partition_key: str, run_key: Optional[str]) -> RunRequest:
partition_set = self.get_partition_set_def()
if not partition_set:
check.failed("Called run_request_for_partition on a non-partitioned job")
partition = partition_set.get_partition(partition_key)
run_config = partition_set.run_config_for_partition(partition)
tags = partition_set.tags_for_partition(partition)
return RunRequest(run_key=run_key, run_config=run_config, tags=tags)
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "JobDefinition":
"""Apply a set of hooks to all op instances within the job."""
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
job_def = JobDefinition(
name=self.name,
graph_def=self._graph_def,
resource_defs=dict(self.resource_defs),
logger_defs=dict(self.loggers),
executor_def=self.executor_def,
partitioned_config=self.partitioned_config,
config_mapping=self.config_mapping,
preset_defs=self.preset_defs,
tags=self.tags,
hook_defs=hook_defs | self.hook_defs,
description=self._description,
op_retry_policy=self._solid_retry_policy,
asset_layer=self.asset_layer,
_subset_selection_data=self._subset_selection_data,
)
update_wrapper(job_def, self, updated=())
return job_def
def get_parent_pipeline_snapshot(self) -> Optional["PipelineSnapshot"]:
if self.op_selection_data:
return self.op_selection_data.parent_job_def.get_pipeline_snapshot()
elif self.asset_selection_data:
return self.asset_selection_data.parent_job_def.get_pipeline_snapshot()
else:
return None
def has_direct_input_value(self, input_name: str) -> bool:
return input_name in self._input_values
def get_direct_input_value(self, input_name: str) -> object:
if input_name not in self._input_values:
raise DagsterInvalidInvocationError(
f"On job '{self.name}', attempted to retrieve input value for input named '{input_name}', but no value was provided. Provided input values: {sorted(list(self._input_values.keys()))}"
)
return self._input_values[input_name]
def _swap_default_io_man(resources: Dict[str, ResourceDefinition], job: | |
20 )
iiii1 = socket . htons ( 0x4000 )
iii1IiiiI1i1 = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , iiIIIIiii , 0xdfdf ,
iiii1 , self . outer_ttl , 17 , 0 )
iii1IiiiI1i1 += self . outer_source . pack_address ( )
iii1IiiiI1i1 += self . outer_dest . pack_address ( )
iii1IiiiI1i1 = lisp_ip_checksum ( iii1IiiiI1i1 )
elif ( self . outer_version == 6 ) :
iii1IiiiI1i1 = ""
if 37 - 37: Oo0Ooo - i1IIi - IiII + I11i . iIii1I11I1II1
if 59 - 59: OoooooooOO - I1Ii111 % o0oOOo0O0Ooo . I11i + i1IIi * I11i
if 5 - 5: II111iiii - IiII
if 86 - 86: IiII * I11i + O0 * I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: i11iIiiIii
if 57 - 57: I11i % OOooOOo + ooOoO0o * Ii1I . Oo0Ooo
if 78 - 78: OoooooooOO / i1IIi . OOooOOo
else :
return ( None )
if 88 - 88: I11i + I1IiiI - I11i / OoooooooOO - i11iIiiIii
if 24 - 24: iIii1I11I1II1
self . packet = iii1IiiiI1i1 + IIi1ii1 + iIiII11 + self . packet
return ( self )
if 89 - 89: Ii1I / i1IIi - o0oOOo0O0Ooo % I1IiiI . Oo0Ooo - O0
if 71 - 71: OoO0O00 % I1IiiI - iII111i . iII111i
def cipher_pad ( self , packet ) :
I1I1 = len ( packet )
if ( ( I1I1 % 16 ) != 0 ) :
O0O = ( ( I1I1 / 16 ) + 1 ) * 16
packet = packet . ljust ( O0O )
if 66 - 66: I1ii11iIi11i - i1IIi % I1ii11iIi11i / Ii1I % i1IIi . I11i
return ( packet )
if 37 - 37: OoooooooOO . IiII / OoOoOO00 / oO0o % OoooooooOO . OoooooooOO
if 40 - 40: O0 . I1Ii111 / iIii1I11I1II1 * o0oOOo0O0Ooo
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
if 3 - 3: OOooOOo . IiII / Oo0Ooo
iI1IIII1ii1 = self . cipher_pad ( self . packet )
Ooo = key . get_iv ( )
if 11 - 11: oO0o + I1Ii111 . IiII * OoooooooOO - I1ii11iIi11i - OOooOOo
III11I1 = lisp_get_timestamp ( )
I1Ii1 = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
O0oo0oOoO00 = chacha . ChaCha ( key . encrypt_key , Ooo ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
i1ii1iIi = binascii . unhexlify ( key . encrypt_key )
try :
I1I1Ii = AES . new ( i1ii1iIi , AES . MODE_GCM , Ooo )
O0oo0oOoO00 = I1I1Ii . encrypt
I1Ii1 = I1I1Ii . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 42 - 42: o0oOOo0O0Ooo - Oo0Ooo % I1ii11iIi11i
else :
i1ii1iIi = binascii . unhexlify ( key . encrypt_key )
O0oo0oOoO00 = AES . new ( i1ii1iIi , AES . MODE_CBC , Ooo ) . encrypt
if 43 - 43: I11i % i1IIi % ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
Ii = O0oo0oOoO00 ( iI1IIII1ii1 )
if 97 - 97: i11iIiiIii + Oo0Ooo * OOooOOo % iII111i . IiII
if ( Ii == None ) : return ( [ self . packet , False ] )
III11I1 = int ( str ( time . time ( ) - III11I1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 4 - 4: O0 . iII111i - iIii1I11I1II1
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
if 89 - 89: Ii1I
if 51 - 51: iII111i
if 68 - 68: iII111i - o0oOOo0O0Ooo * OoO0O00 % ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if ( I1Ii1 != None ) : Ii += I1Ii1 ( )
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if 98 - 98: Ii1I - I1IiiI . i11iIiiIii * Oo0Ooo
if 29 - 29: Ii1I / ooOoO0o % I11i
if 10 - 10: iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
self . lisp_header . key_id ( key . key_id )
iIiII11 = self . lisp_header . encode ( )
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
O0o0O0O0O = key . do_icv ( iIiII11 + Ooo + Ii , Ooo )
if 79 - 79: IiII + IiII + Ii1I
iiiII1i1I = 4 if ( key . do_poly ) else 8
if 97 - 97: O0 . I1Ii111 / II111iiii . O0 + OoooooooOO
oo0OooO = bold ( "Encrypt" , False )
I11iI1 = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
oOo00OO0o0 = "poly" if key . do_poly else "sha256"
oOo00OO0o0 = bold ( oOo00OO0o0 , False )
IiIiI = "ICV({}): 0x{}...{}" . format ( oOo00OO0o0 , O0o0O0O0O [ 0 : iiiII1i1I ] , O0o0O0O0O [ - iiiII1i1I : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( oo0OooO , key . key_id , addr_str , IiIiI , I11iI1 , III11I1 ) )
if 47 - 47: OoOoOO00
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
O0o0O0O0O = int ( O0o0O0O0O , 16 )
if ( key . do_poly ) :
oooOO = byte_swap_64 ( ( O0o0O0O0O >> 64 ) & LISP_8_64_MASK )
iI1IIIi11 = byte_swap_64 ( O0o0O0O0O & LISP_8_64_MASK )
O0o0O0O0O = struct . pack ( "QQ" , oooOO , iI1IIIi11 )
else :
oooOO = byte_swap_64 ( ( O0o0O0O0O >> 96 ) & LISP_8_64_MASK )
iI1IIIi11 = byte_swap_64 ( ( O0o0O0O0O >> 32 ) & LISP_8_64_MASK )
oooOo00O0 = socket . htonl ( O0o0O0O0O & 0xffffffff )
O0o0O0O0O = struct . pack ( "QQI" , oooOO , iI1IIIi11 , oooOo00O0 )
if 26 - 26: I1Ii111 . Ii1I + I1IiiI . OoOoOO00 + OOooOOo
if 17 - 17: OOooOOo + i11iIiiIii + I1ii11iIi11i % OOooOOo . oO0o
return ( [ Ooo + Ii + O0o0O0O0O , True ] )
if 33 - 33: I11i * I1IiiI % OoOoOO00 . IiII . ooOoO0o . OoO0O00
if 53 - 53: OoOoOO00
def decrypt ( self , packet , header_length , key , addr_str ) :
if 84 - 84: OoO0O00
if 97 - 97: i1IIi
if 98 - 98: OoooooooOO - I1IiiI + ooOoO0o
if 98 - 98: iII111i . IiII . IiII - OOooOOo
if 65 - 65: Oo0Ooo + o0oOOo0O0Ooo - Ii1I
if 12 - 12: OoooooooOO + I1ii11iIi11i
if ( key . do_poly ) :
oooOO , iI1IIIi11 = struct . unpack ( "QQ" , packet [ - 16 : : ] )
o0OoO0000oOO = byte_swap_64 ( oooOO ) << 64
o0OoO0000oOO |= byte_swap_64 ( iI1IIIi11 )
o0OoO0000oOO = lisp_hex_string ( o0OoO0000oOO ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
iiiII1i1I = 4
i1iIIiiIiII = bold ( "poly" , False )
else :
oooOO , iI1IIIi11 , oooOo00O0 = struct . unpack ( "QQI" , packet [ - 20 : | |
<gh_stars>0
# pip install pytorch-lightning
# pip install neptune-client
# %%
from __future__ import print_function
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.progress import ProgressBar
from sklearn.model_selection import train_test_split
import ast
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from collections import defaultdict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
import math
import pytorch_lightning as pl
# import utils.plot_utils as utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import wandb
import time
import os
from utils import run_utils, plot_utils, data_utils, utils, metric_utils, settings, latent_space_utils, \
disentangle_utils
# ToDo EDA:
# - Long Tail graphics
# - Remove user who had less than a threshold of seen items
# - Create Markdown with EDA results
# ToDo input_params:
# Parameter that should be tweakable by invoking the routine:
# - epochs
# - learning_rate
# - batch_size
# - simplified_rating
# - hidden_layer number
# - Algorithm: VAE, AE or SVD
# ToDo metrics:
# Add https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093
seed = 42
torch.manual_seed(seed)
class VAE(pl.LightningModule):
def __init__(self, conf: dict, *args, **kwargs):
super().__init__()
# self.kwargs = kwargs
self.save_hyperparameters(conf)
self.ls_predicted_movies = []
self.is_hessian_penalty_activated = self.hparams["is_hessian_penalty_activated"]
self.expanded_user_item = self.hparams["expanded_user_item"]
self.used_data = self.hparams["used_data"]
self.generative_factors = self.hparams["generative_factors"]
self.mixup = self.hparams["mixup"]
self.np_synthetic_data = self.hparams["synthetic_data"]
self.ls_syn_y = self.hparams["syn_y"]
self.experiment_path_train = conf["experiment_path"]
self.experiment_path_test = self.experiment_path_train
self.beta = self.hparams["beta"]
self.avg_mce = 0.0
self.train_dataset = None
self.test_dataset = None
self.test_size = self.hparams["test_size"]
self.no_latent_factors = self.hparams["latent_dim"]
self.max_unique_movies = 0
self.unique_movies = 0
self.np_user_item = None
self.small_dataset = self.hparams["small_dataset"]
self.simplified_rating = self.hparams["simplified_rating"]
self.max_epochs = self.hparams["max_epochs"]
self.dct_index2itemId = None
self.test_y_bin = None
self.df_movies_z_combined = None
if (self.np_synthetic_data is None):
self.load_dataset() # additionaly assigns self.unique_movies and self.np_user_item
self.df_movies = pd.read_csv('../data/generated/df_movies_cleaned3.csv')
self.dct_attribute_distribution = utils.load_json_as_dict(
'attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
else:
self.train_dataset, self.test_dataset = train_test_split(self.np_synthetic_data, test_size=self.test_size,
random_state=42)
self.train_y, self.test_y = train_test_split(self.ls_syn_y, test_size=self.test_size, random_state=42)
self.test_y_bin = np.asarray(pd.get_dummies(pd.DataFrame(data=self.test_y)))
self.unique_movies = self.np_synthetic_data.shape[1]
self.df_movies = pd.read_csv('../data/generated/syn.csv')
self.dct_attribute_distribution = utils.load_json_as_dict(
'syn_attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
# nn.Linear layer creates a linear function (θx + b), with its parameters initialized
self.input_dimension = int(self.unique_movies * math.pow(4,
self.generative_factors)) if self.expanded_user_item == True else self.unique_movies
self.fc11 = nn.Linear(in_features=self.input_dimension, out_features=40) # input
self.fc12 = nn.Linear(in_features=40, out_features=self.no_latent_factors) # input
# self.fc13 = nn.Linear(in_features=1000, out_features=600) # input
self.encoder = nn.Sequential(self.fc11 ,nn.LeakyReLU(),# nn.ReLU(),
self.fc12#, nn.LeakyReLU()
# self.fc13, nn.LeakyReLU()
)
self.fc31 = nn.Linear(in_features=self.no_latent_factors, out_features=40)
# self.fc32 = nn.Linear(in_features=600, out_features=1000)
# self.fc33 = nn.Linear(in_features=1000, out_features=1200)
self.fc34 = nn.Linear(in_features=40, out_features=self.input_dimension)
self.decoder = nn.Sequential(self.fc31, nn.LeakyReLU(),
# self.fc32, nn.LeakyReLU(),
# self.fc33, nn.ReLU(),
self.fc34)
self.KLD = None
self.ls_kld = []
self.dis_KLD = None
self.z = None
self.kld_matrix = np.empty((0, self.no_latent_factors))
self.np_z_test = np.empty((0, self.no_latent_factors)) # self.test_dataset.shape[0]
self.np_mu_test = np.empty((0, self.no_latent_factors))
self.np_logvar_test = np.empty((0, self.no_latent_factors))
self.np_z_train = np.empty((0, self.no_latent_factors)) # self.test_dataset.shape[0]
self.np_mu_train = np.empty((0, self.no_latent_factors))
self.np_logvar_train = np.empty((0, self.no_latent_factors))
# self.dct_attribute_distribution = None # load relative frequency distributioon from dictionary (pickle it)
# self.df_links = pd.read_csv('../data/movielens/small/links.csv')
self.sigmoid_annealing_threshold = self.hparams['sigmoid_annealing_threshold']
self.mce_batch_train = None
self.mce_batch_test = None
self.z_mean_train = []
self.z_min_train = []
self.z_max_train = []
# Initialize weights
self.encoder.apply(run_utils.weight_init)
self.decoder.apply(run_utils.weight_init)
def encode(self, x):
return F.relu(self.encoder(x))
def decode(self, z):
return torch.sigmoid(self.decoder(z))
def forward(self, x, **kwargs):
# Si
if (kwargs):
z = kwargs['z']
mu = kwargs['mu']
logvar = kwargs['logvar']
p = None
q = None
else:
self.z = self.encode(x)
return self.decode(self.z)
def load_dataset(self):
if (self.small_dataset):
print("Load small dataset of ratings.csv")
df_ratings = pd.read_csv("../data/movielens/small/ratings.csv")
else:
print("Load large dataset of ratings.csv")
df_ratings = pd.read_csv("../data/movielens/large/ratings.csv")
print('Shape of dataset:{}'.format(df_ratings.shape))
self.np_user_item, self.unique_movies, self.max_unique_movies, self.dct_index2itemId = data_utils.pivot_create_user_item_matrix(
df_ratings, True) # manual_create_user_item_matrix(df_ratings, simplified_rating=self.simplified_rating)
# self.np_user_item, self.max_unique_movies = manual_create_user_item_matrix(df_ratings, simplified_rating=self.simplified_rating)
self.train_dataset, self.test_dataset = train_test_split(self.np_user_item, test_size=self.test_size,
random_state=42)
def train_dataloader(self):
# TODO Change shuffle to True, just for dev purpose switched on
train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=100, shuffle=True, num_workers=0, pin_memory=True
)
return train_loader
def test_dataloader(self):
test_loader = torch.utils.data.DataLoader(
self.test_dataset, batch_size=100, shuffle=False, num_workers=0
)
return test_loader
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1e-3) # , weight_decay = 0.00001
# criterion = nn.Binar()#MSELoss() # mean-squared error loss
# scheduler = StepLR(optimizer, step_size=1)
return optimizer # , scheduler
def collect_z_values(self, ts_mu_chunk, ts_logvar_chunk): # , ls_y
start = time.time()
ls_grad_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
self.np_z_train = np.append(self.np_z_train, np.asarray(ls_grad_z.tolist()),
axis=0) # TODO Describe in thesis that I get back a grad object instead of a pure tensor as it is in the test method since we are in the training method.
# print('Shape np_z_train: {}'.format(self.np_z_train.shape))
z_mean = self.np_z_train.mean(axis=0)
z_min = self.np_z_train.min(axis=0)
z_max = self.np_z_train.max(axis=0)
if (len(self.z_mean_train) == 0):
self.z_mean_train = z_mean
self.z_min_train = z_min
self.z_max_train = z_max
else:
self.z_mean_train = (z_mean + self.z_mean_train) / 2
self.z_max_train = np.amax(np.vstack((self.z_max_train, z_max)),
axis=0) # Stack old and new together and find the max
self.z_min_train = np.amin(np.vstack((self.z_min_train, z_min)), axis=0)
# if (z_min < self.z_min_train):
# self.z_min_train = z_min
#
# if (z_max > self.z_max_train):
# self.z_max_train = z_max
# print('collect_z_values in seconds: {}'.format(time.time() - start))
def training_step(self, batch, batch_idx):
mce_minibatch = None
batch_len = batch.shape[0]
ts_batch_user_features = batch # .view(-1, self.input_dimension)
# ts_batch_user_features = ts_batch_user_features * random.uniform(0.4,0.9)
recon_batch = self.forward(ts_batch_user_features) # sample data
if (np.isnan(np.sum(recon_batch.detach().numpy()))):
print('s')
batch_mse = self.loss_function(recon_batch,
ts_batch_user_features,
self.beta,
self.unique_movies)
hp_loss = 0
# normalizing reconstruction loss
batch_mse = batch_mse / len(ts_batch_user_features)
if (self.is_hessian_penalty_activated and self.current_epoch > int(3 / 4 * self.max_epochs - 1)): #
print('<---- Applying Hessian Penalty ---->')
# np_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
# hp_loss = hessian_penalty(G=self.decode, z=np_z)
# print('Hessian Penalty:{}'.format(hp_loss))
# batch_loss = batch_mse + hp_loss + batch_kld
if (self.used_data == 'ae'):
batch_loss = batch_mse
# Additional logs go into tensorboard_logs
tensorboard_logs = {'train_loss': batch_loss,
'MSE-Train': batch_mse,
} #
return {'loss': batch_loss, 'log': tensorboard_logs}
def training_epoch_end(self, outputs):
print("Saving MCE before KLD is applied...")
return {}
# def validation_step(self, batch, batch_idx):
# return 0
def test_step(self, batch, batch_idx):
print('test step')
batch_mce = 0
test_loss = 0
# self.eval()
ts_batch_user_features = batch.view(-1, self.input_dimension)
recon_batch = self.forward(ts_batch_user_features)
self.ls_predicted_movies.extend((-recon_batch).argsort()[:, 0].tolist())
ls_z = self.encode(ts_batch_user_features).tolist()
self.np_z_test = np.append(self.np_z_test, np.asarray(ls_z),
axis=0) # TODO get rid of np_z_chunk and use np.asarray(mu_chunk)
batch_rmse_w_zeros, batch_mse_w_zeros, batch_rmse, batch_mse = self.calculate_batch_metrics(
recon_batch=recon_batch, ts_batch_user_features=ts_batch_user_features)
batch_mse = self.loss_function(recon_batch,
ts_batch_user_features,
self.beta,
self.unique_movies)
# normalizing reconstruction loss
batch_mse = batch_mse / len(ts_batch_user_features)
batch_loss = batch_mse
# to be rermoved mean_mce = { for single_mce in batch_mce}
loss = batch_loss.item() / len(ts_batch_user_features)
# bce = batch_bce/len(ts_batch_user_features)
tensorboard_logs = {
'MSE-test': batch_mse}
return {'test_loss': loss,
'rmse': batch_rmse,
'mse': batch_mse,
'rmse_w_zeros': batch_rmse_w_zeros,
'mse_w_zeros': batch_mse_w_zeros,
'log': tensorboard_logs,
'MSE-Test': batch_mse
}
# test_loss /= len(test_loader.dataset)
# print('====> Test set loss: {:.4f}'.format(test_loss))
def test_epoch_end(self, outputs):
# avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
avg_loss = np.array([x['test_loss'] for x in outputs]).mean()
mse_test = np.array([x['MSE-Test'] for x in outputs])
avg_rmse = np.array([x['rmse'] for x in outputs]).mean()
avg_rmse_w_zeros = np.array([x['rmse_w_zeros'] for x in outputs]).mean()
avg_mse = np.array([x['mse'] for x in outputs]).mean()
avg_mse_w_zeros = np.array([x['mse_w_zeros'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss, 'MSE-Test': mse_test}
wandb_logger.log_metrics({'rmse': avg_rmse,
'rmse_w_zeros': avg_rmse_w_zeros,
'mse': avg_mse,
'mse_w_zeros': avg_mse_w_zeros}) # , 'kld_matrix':self.kld_matrix
return {'test_loss': avg_loss, 'log': tensorboard_logs, 'rmse': avg_rmse, 'MSE-Test': mse_test
} # , , 'mce':avg_mce
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(self, recon_x, x, beta, unique_movies):
try:
# MSE = F.binary_cross_entropy(recon_x, x, reduction='sum')# MSE is bad for this
MSE = F.mse_loss(x, recon_x, reduction='sum') # MSE is bad for this
self.KLD = [0]
if (np.isnan(np.sum(MSE.detach().numpy()))):
print('s')
except RuntimeError as e:
print('fo', e)
return MSE
def calculate_batch_metrics(self, recon_batch, ts_batch_user_features):
# Compute MSE
# TODO MOre generic ...
# mask = training_utils.generate_mask(ts_batch_user_features, tsls_yhat_user, user_based_items_filter=loss_user_items_only)
# tsls_yhat_user_filtered = tsls_yhat_user[~mask] # Predicted: Filter out unseen+unrecommended items
# ts_user_features_seen = ts_batch_user_features[~mask] # Ground Truth: Filter out unseen+unrecommended items
# TODO ...than this approach
batch_rmse = 0
batch_mse = 0
batch_rmse_wo_zeros = 0
batch_mse_wo_zeros = 0
ls_yhat_user = recon_batch * ts_batch_user_features # Set all items to zero that are of no interest and haven't been seen
for idx, tensor in enumerate(ls_yhat_user):
np_y = ts_batch_user_features[idx].data.numpy()
np_y_wo_zeros = np_y[np.nonzero(np_y)] # inner returns the index
np_yhat = tensor.data.numpy()
np_yhat_wo_zeros = np_yhat[np.nonzero(np_y)] # This must be np_y
rmse, mse = metric_utils.calculate_metrics(np_y, np_yhat)
batch_mse += mse
batch_rmse += rmse
if (len(np_yhat_wo_zeros) > 0):
rmse_wo_zeros, mse_wo_zeros = metric_utils.calculate_metrics(np_y_wo_zeros, np_yhat_wo_zeros)
batch_rmse_wo_zeros += rmse_wo_zeros
batch_mse_wo_zeros += mse_wo_zeros
# batch_rmse, batch_mse = utils.calculate_metrics(ts_batch_user_features,ls_yhat_user)
avg_rmse = batch_rmse / ls_yhat_user.shape[0]
avg_rmse_wo_zeros = batch_rmse_wo_zeros / ls_yhat_user.shape[0]
avg_mse = batch_mse / ls_yhat_user.shape[0]
avg_mse_wo_zeros = batch_mse_wo_zeros / ls_yhat_user.shape[0]
return avg_rmse, avg_mse, avg_rmse_wo_zeros, avg_mse_wo_zeros
def load_attributes_and_files(self, path): | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Contains the classes system_objects and single_object.
Used to compute system models
"""
from .. import mats
import os
import copy
from .. import solvers
import object
class system_objects:
"""system_objects class
This class creates a system of thermal objects, establishes contact between
them and computes the respective thermal processes.
"""
def __init__(self, number_objects=2, materials=('Cu', 'Cu'),
objects_length=(10, 10), amb_temperature=293, dx=0.01, dt=0.1,
file_name='data', initial_state=False,
boundaries=((2, 0), (3, 0))):
"""Initializes the object.
amb_temperature: ambient temperature of the whole system
materials: list of strings of all the used materials present in the
folder materials
number_objects: integer for the number of thermal objects
objects_length: list of the object lengths (spacial steps)
dx: the space step
dt: the times step
file_name: file name where the temperature and heat flux are saved
boundaries: tuple of two entries that define the boundary condition
for tempreture. The first corresponds to the thermal obect while
the second defines the temperature. If 0 the boundary condition is
insulation
initial_state: initial state of the materials. True if applied field
and False is removed field.
"""
# check the validity of inputs
cond01 = isinstance(amb_temperature, float)
cond01 = cond01 or isinstance(amb_temperature, int)
cond02 = isinstance(materials, tuple)
cond03 = isinstance(number_objects, int)
cond04 = isinstance(objects_length, tuple)
cond05 = isinstance(dx, int) or isinstance(dx, float)
cond06 = isinstance(dt, int) or isinstance(dt, float)
cond07 = isinstance(file_name, unicode)
cond07 = cond07 or isinstance(file_name, str)
cond08 = isinstance(boundaries, tuple)
cond09 = isinstance(initial_state, bool)
condition = cond01 and cond02 and cond03 and cond04 and cond05
condition = condition and cond06 and cond07 and cond08 and cond09
if not condition:
raise ValueError
# initial definitions
self.objects = []
for i in range(number_objects):
if i not in [l[0] for l in boundaries] or (i, 0) in boundaries:
heat_save = False
else:
heat_save = True
self.objects.append(object.object(amb_temperature,
materials=(materials[i],),
borders=(1, objects_length[i]+1),
materials_order=(0,), dx=dx, dt=dt,
file_name=file_name+'_'+str(i)+'.txt',
boundaries=(0, 0), Q=[], Q0=[],
initial_state=initial_state,
heat_save=heat_save))
self.contacts = set()
self.boundaries = boundaries
self.dt = dt
self.q1 = 0.
self.q2 = 0.
for i in boundaries:
if i[1] != 0:
for j in range(len(self.objects[i[0]].temperature)):
self.objects[i[0]].temperature[j] = [i[1], i[1]]
def contactFilter(self, object):
"""Filter self.contacts by thermal object id
object: thermal object id
"""
filtered = [x for x in
self.contacts if (x[0][0] == object or x[1][0] == object)]
return set(filtered)
self.contacts.add(contact)
def contactAdd(self, contact):
"""Add contact to self.contacts
contact: thermal contact
"""
self.contacts.add(contact)
def contactRemove(self, contact):
"""Remove all contacts from an object
contact: thermal object id
"""
removing_contact = None
for i in range(len(self.contacts)):
if self.contacts[i][0] == contact:
removing_contact = self.contacts[i]
self.contacts.remove(removing_contact)
def compute(self, timeInterval, write_interval, solver='implicit_k'):
"""Computes the thermal process
Computes the system for timeInterval, and writes into the file_name
file every write_interval time steps. Four different solvers can be
used: 'explicit_general', 'explicit_k(x)', 'implicit_general',
and 'implicit_k(x)'.
"""
# number of time steps for the given timeInterval
nt = int(timeInterval / self.dt)
# number of time steps counting from the last writing process
nw = 0
# computes
for j in range(nt):
for obj in self.objects:
obj.Q0 = copy.copy(obj.Q0_ref)
for contact in self.contacts:
td1 = self.objects[contact[1][0]].temperature[contact[1][1]][0]
td2 = self.objects[contact[0][0]].temperature[contact[0][1]][0]
heat_contact_1 = contact[2] * (td1 - td2)
heat_contact_2 = contact[2] * (td2 - td1)
self.objects[contact[0][0]].Q0[contact[0][1]] = heat_contact_1
self.objects[contact[1][0]].Q0[contact[1][1]] = heat_contact_2
object_number = -1
for obj in self.objects:
object_number = object_number + 1
obj.time_passed = obj.time_passed + obj.dt
cond1 = object_number not in [l[0] for l in self.boundaries]
if cond1 or (object_number, 0) in self.boundaries:
# defines the material properties
for i in range(1, obj.num_points - 1):
if obj.state[i] is True:
ind = obj.materials_index[i]
obj.rho[i] = obj.materials[ind].rhoa(
obj.temperature[i][0])
obj.Cp[i] = obj.materials[ind].cpa(
obj.temperature[i][0])
obj.k[i] = obj.materials[ind].ka(
obj.temperature[i][0])
if obj.state[i] is False:
ind = obj.materials_index[i]
obj.rho[i] = obj.materials[ind].rho0(
obj.temperature[i][0])
obj.Cp[i] = obj.materials[ind].cp0(
obj.temperature[i][0])
obj.k[i] = obj.materials[ind].k0(
obj.temperature[i][0])
# SOLVERS
# implicit k constant
if solver == 'implicit_general':
obj.temperature = solvers.implicit_general(obj)
# implicit k dependent on x
if solver == 'implicit_k(x)':
obj.temperature = solvers.implicit_k(obj)
# explicit k constant
if solver == 'explicit_general':
obj.temperature = solvers.explicit_general(obj)
# explicit k dependent on x
if solver == 'explicit_k(x)':
obj.temperature = solvers.explicit_K(obj)
# writes the temperature to file_name file ...
# if the number of time steps is verified
if nw + 1 == write_interval or j == 0 or j == nt - 1:
line = '%f' % obj.time_passed
for i in obj.temperature:
new_line = ',%f' % i[1]
line = line + new_line
f = open(obj.file_name, 'a')
f.write(line+'\n')
f.close()
else:
heat = [p*self.dt*obj.dx for p in obj.Q0 if p is not None]
heat = sum(heat)/(len(heat)*obj.dx)
if object_number == self.boundaries[0][0]:
self.q1 = self.q1 + heat
q = self.q1
else:
self.q2 = self.q2 + heat
q = self.q2
# writes the temperature to file_name file ...
# if the number of time steps is verified
if nw + 1 == write_interval or j == 0 or j == nt - 1:
line = '%f' % obj.time_passed
for i in obj.temperature:
new_line = ',%f' % i[1]
line = line + new_line
new_line = ',%f' % q
line = line + new_line
f = open(obj.file_name, 'a')
f.write(line+'\n')
f.close()
if nw == write_interval:
nw = 0
else:
nw = nw + 1
class single_object(object.object):
"""single_object class
This class solves numerically the heat conduction equation for 1 dimension
of an active material(s). Three solvers can be used: explicit with
x-independent k, explicit with x-dependent k, implicit with x-independent
k, and implicit with x-dependent k. The class has 5 methods: activate for
the activation of part of the solid, deactivate for the deactivation of
part of the solid, and compute for solving the equation for a given period
of time. This class is suited for simulations involving caloric systems
such as magnetocaloric or electrocaloric systems.
"""
def __init__(self, amb_temperature, materials=('Cu',), borders=(1, 11),
materials_order=(0,), dx=0.01, dt=0.1, file_name='data.txt',
boundaries=(0, 0), Q=[], Q0=[], heat_points=(1, -2),
initial_state=False, h_left=50000., h_right=50000.):
"""Initializes the object.
amb_temperature: ambient temperature of the whole system
materials: list of strings of all the used materials present in the
folder materials
borders: list of the points where there is a change of material
materials_order: list of the materials list indexes that defines the
material properties given by borders
dx: the space step
dt: the times step
file_name: file name where the temperature and heat flux are saved
boundaries: list of two entries that define the boundary condition
for tempreture. If 0 the boundary condition is insulation
Q: list of 3 entry lists that gives the fixed heat source coeficient.
The first term is the initial space index where it is applies. The
second is the final space index where it is applies. The third is
the value of the coeficient.
Q0 is a list of 3 entry lists that gives the temperature dependent heat
source coefficient. The first term is the initial space index where
it is applies. The second is the final space index where it is
applies. The third is the value of the coeficient.
heat_points: list of the space indexes where we want to extract the
heat flux. Normally, the first term is the heat flux of the hot end
and the second term is the heat flux of the cold end
initial_state: initial state of the materials. True if applied field
and False is removed field.
h_left: left heat transfer coefficient
h_right: right heat transfer coefficient
"""
# check the validity of inputs
cond01 = isinstance(amb_temperature, float)
cond01 = cond01 or isinstance(amb_temperature, int)
cond02 = isinstance(materials, tuple)
cond03 = isinstance(borders, tuple)
cond04 = isinstance(materials_order, tuple)
cond05 = isinstance(dx, int) or isinstance(dx, float)
cond06 = isinstance(dt, int) or isinstance(dt, float)
cond07 = isinstance(file_name, unicode)
cond07 = cond07 or isinstance(file_name, str)
cond08 = isinstance(boundaries, tuple)
cond09 = isinstance(heat_points, tuple)
cond10 = isinstance(initial_state, bool)
cond11 = isinstance(h_left, int) or isinstance(h_left, float)
cond12 = isinstance(h_right, int) or isinstance(h_right, float)
condition = cond01 and cond02 and cond03 and cond04 and cond05
condition = condition and cond06 and cond07 and cond08 and cond09
condition = | |
<gh_stars>1-10
import unittest
from helper import RpcWrapper, get_utxo
from cfd.address import AddressUtil
from cfd.key import SchnorrUtil, SigHashType, SchnorrPubkey, SignParameter
from cfd.hdwallet import HDWallet
from cfd.script import HashType, Script
from cfd.descriptor import parse_descriptor
from cfd.psbt import Psbt, PsbtAppendInputData, PsbtAppendOutputData
from cfd.taproot import TaprootScriptTree, TapBranch
from cfd.transaction import OutPoint, Transaction, TxIn, TxOut, UtxoData
from decimal import Decimal
from typing import List
import logging
import time
MNEMONIC = [
'clerk', 'zoo', 'mercy', 'board', 'grab', 'service', 'impact', 'tortoise',
'step', 'crash', 'load', 'aerobic', 'suggest', 'rack', 'refuse', 'can',
'solve', 'become', 'upset', 'jump', 'token', 'anchor', 'apart', 'dog']
PASSPHRASE = 'Unx3HmdQ'
NETWORK = 'regtest'
ROOT_PATH = 'm/44h/0h/0h'
FEE_PATH = ROOT_PATH + '/1/0'
BTC_AMOUNT = 100000000
BTC_AMOUNT_BIT = 8
def convert_bitcoin_utxos(test_obj, utxo_list) -> List['UtxoData']:
# {"txid": "f3c8453e1bda1366bc859532e27a829c8ce623b766ae699a0377b168993c44b5", "vout": 0, "address": "bcrt1qyq7xhec45m75m5nvhzuh47vsj3as7tqf8t8vkr", "label": "test_fee", "scriptPubKey": "0014203c6be715a6fd4dd26cb8b97af990947b0f2c09", "amount": 50.0, "confirmations": 101, "spendable": false, "solvable": false, "safe": true} # noqa8
utxos: List['UtxoData'] = []
for utxo in utxo_list:
desc = test_obj.desc_dic[utxo['address']]
value = Decimal(str(utxo['amount']))
value = value * BTC_AMOUNT
data = UtxoData(txid=utxo['txid'], vout=utxo['vout'],
amount=int(value), descriptor=desc)
utxos.append(data)
return utxos
def search_utxos(test_obj, utxo_list, outpoint):
for utxo in utxo_list:
if utxo.outpoint == outpoint:
return utxo
test_obj.assertTrue(False, 'UTXO is empty. outpoint={}'.format(outpoint))
def create_bitcoin_address(test_obj: 'TestBitcoin'):
root_pk = test_obj.hdwallet.ext_privkey.get_extpubkey().pubkey
fp = root_pk.get_fingerprint()
# fee address
pk = str(test_obj.hdwallet.get_pubkey(path=FEE_PATH).pubkey)
addr = AddressUtil.p2wpkh(pk, network=NETWORK)
test_obj.path_dic[str(addr)] = FEE_PATH
test_obj.addr_dic['fee'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'wpkh([{}{}]{})'.format(str(fp), FEE_PATH[1:], str(pk)),
network=NETWORK)
print('set fee addr: ' + str(addr))
# wpkh main address
path = '{}/0/0'.format(ROOT_PATH)
pk = str(test_obj.hdwallet.get_pubkey(path=path).pubkey)
addr = AddressUtil.p2wpkh(pk, network=NETWORK)
test_obj.path_dic[str(addr)] = path
test_obj.addr_dic['main'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'wpkh([{}{}]{})'.format(str(fp), path[1:], str(pk)),
network=NETWORK)
print('set main addr: ' + str(addr))
# pkh address
path = '{}/0/1'.format(ROOT_PATH)
pk = str(test_obj.hdwallet.get_pubkey(path=path).pubkey)
addr = AddressUtil.p2pkh(pk, network=NETWORK)
test_obj.path_dic[str(addr)] = path
test_obj.addr_dic['p2pkh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'pkh([{}{}]{})'.format(str(fp), path[1:], str(pk)),
network=NETWORK)
print('set p2pkh addr: ' + str(addr))
# wpkh address
path = '{}/0/2'.format(ROOT_PATH)
pk = str(test_obj.hdwallet.get_pubkey(path=path).pubkey)
addr = AddressUtil.p2wpkh(pk, network=NETWORK)
test_obj.path_dic[str(addr)] = path
test_obj.addr_dic['p2wpkh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'wpkh([{}{}]{})'.format(str(fp), path[1:], str(pk)),
network=NETWORK)
print('set p2wpkh addr: ' + str(addr))
# p2sh-p2wpkh address
path = '{}/0/3'.format(ROOT_PATH)
pk = str(test_obj.hdwallet.get_pubkey(path=path).pubkey)
addr = AddressUtil.p2sh_p2wpkh(pk, network=NETWORK)
test_obj.path_dic[str(addr)] = path
test_obj.addr_dic['p2sh-p2wpkh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'sh(wpkh([{}{}]{}))'.format(str(fp), path[1:], str(pk)),
network=NETWORK)
print('set p2sh-p2wpkh addr: ' + str(addr))
# multisig_key
path = '{}/0/'.format(ROOT_PATH)
path_list = [path + str(i + 1) for i in range(3)]
pk1 = str(test_obj.hdwallet.get_pubkey(path=path_list[0]).pubkey)
pk2 = str(test_obj.hdwallet.get_pubkey(path=path_list[1]).pubkey)
pk3 = str(test_obj.hdwallet.get_pubkey(path=path_list[2]).pubkey)
pk_list = [pk1, pk2, pk3]
req_num = 2
desc_multi = 'multi({},{},{},{})'.format(req_num, pk1, pk2, pk3)
addr = AddressUtil.multisig(
req_num, pk_list, HashType.P2SH, network=NETWORK)
test_obj.path_dic[str(addr)] = path_list
test_obj.addr_dic['p2sh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'sh({})'.format(desc_multi), network=NETWORK)
print('set p2sh addr: ' + str(addr))
addr = AddressUtil.multisig(
req_num, pk_list, HashType.P2WSH, network=NETWORK)
test_obj.path_dic[str(addr)] = path_list
test_obj.addr_dic['p2wsh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'wsh({})'.format(desc_multi), network=NETWORK)
print('set p2wsh addr: ' + str(addr))
addr = AddressUtil.multisig(
req_num, pk_list, HashType.P2SH_P2WSH, network=NETWORK)
test_obj.path_dic[str(addr)] = path_list
test_obj.addr_dic['p2sh-p2wsh'] = addr
test_obj.desc_dic[str(addr)] = parse_descriptor(
'sh(wsh({}))'.format(desc_multi), network=NETWORK)
print('set p2sh-p2wsh addr: ' + str(addr))
def test_import_address(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
# fee address
btc_rpc.importaddress(str(test_obj.addr_dic['fee']), 'test_fee', False)
# pkh address
btc_rpc.importaddress(str(test_obj.addr_dic['main']), 'test_main', False)
btc_rpc.importaddress(str(test_obj.addr_dic['p2pkh']), 'test_pkh', False)
btc_rpc.importaddress(str(test_obj.addr_dic['p2wpkh']), 'test_wpkh', False)
btc_rpc.importaddress(
str(test_obj.addr_dic['p2sh-p2wpkh']), 'test_sh_wpkh', False)
# multisig_key
btc_rpc.importaddress(str(test_obj.addr_dic['p2sh']), 'test_sh', False)
btc_rpc.importaddress(str(test_obj.addr_dic['p2wsh']), 'test_wsh', False)
btc_rpc.importaddress(
str(test_obj.addr_dic['p2sh-p2wsh']), 'test_sh_wsh', False)
def test_generate(test_obj: 'TestBitcoin'):
# generatetoaddress -> fee addresss
print(test_obj.addr_dic)
btc_rpc = test_obj.conn.get_rpc()
addr = str(test_obj.addr_dic['fee'])
btc_rpc.generatetoaddress(100, addr)
btc_rpc.generatetoaddress(5, addr)
time.sleep(2)
resp = get_utxo(btc_rpc, [addr])
print(resp)
def test_bitcoin_pkh(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
# create tx (output wpkh, p2sh-segwit, pkh)
txouts = [
TxOut(100000000, str(test_obj.addr_dic['p2pkh'])),
TxOut(100000000, str(test_obj.addr_dic['p2wpkh'])),
TxOut(100000000, str(test_obj.addr_dic['p2sh-p2wpkh'])),
]
tx = Transaction.create(2, 0, [], txouts)
# fundrawtransaction
fee_addr = str(test_obj.addr_dic['fee'])
fee_desc = test_obj.desc_dic[fee_addr]
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx.fund_raw_transaction([], utxo_list, fee_addr,
target_amount=0, effective_fee_rate=20.0,
knapsack_min_change=1)
# add sign
for txin in tx.txin_list:
utxo = search_utxos(test_obj, utxo_list, txin.outpoint)
tx.sign_with_privkey(txin.outpoint, fee_desc.data.hash_type, fee_sk,
amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# create tx (output wpkh only, input tx1-3)
txid = tx.txid
txin_list = []
txin_utxo_list = []
for index, txout in enumerate(tx.txout_list):
temp_addr = str(txout.get_address(network=NETWORK))
if temp_addr == fee_addr:
continue
txin_list.append(TxIn(txid=txid, vout=index))
if temp_addr not in test_obj.desc_dic:
test_obj.assertTrue(False, 'addr not found. [{}]:[{}]'.format(
index, temp_addr))
desc = test_obj.desc_dic[temp_addr]
txin_utxo_list.append(UtxoData(
txid=txid, vout=index, amount=txout.amount, descriptor=desc))
txouts2 = [
TxOut(300000000, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=20.0,
knapsack_min_change=1)
# add sign
join_utxo_list: List['UtxoData'] = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = utxo_list
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
tx2.sign_with_privkey(txin.outpoint, utxo.descriptor.data.hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
def test_bitcoin_multisig(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
# create tx (output multisig)
txouts = [
TxOut(100000000, str(test_obj.addr_dic['p2sh'])),
TxOut(100000000, str(test_obj.addr_dic['p2wsh'])),
TxOut(100000000, str(test_obj.addr_dic['p2sh-p2wsh'])),
]
tx = Transaction.create(2, 0, [], txouts)
# fundrawtransaction
fee_addr = str(test_obj.addr_dic['fee'])
fee_desc = test_obj.desc_dic[fee_addr]
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx.fund_raw_transaction([], utxo_list, fee_addr,
target_amount=0, effective_fee_rate=20.0,
knapsack_min_change=1)
# add sign
for txin in tx.txin_list:
utxo = search_utxos(test_obj, utxo_list, txin.outpoint)
tx.sign_with_privkey(txin.outpoint, fee_desc.data.hash_type, fee_sk,
amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# create tx (output wpkh only, input multisig tx1-3)
txid = tx.txid
txin_list = []
txin_utxo_list = []
for index, txout in enumerate(tx.txout_list):
temp_addr = str(txout.get_address(network=NETWORK))
if temp_addr == fee_addr:
continue
txin_list.append(TxIn(txid=txid, vout=index))
if temp_addr not in test_obj.desc_dic:
test_obj.assertTrue(False, 'addr not found. [{}]:[{}]'.format(
index, temp_addr))
desc = test_obj.desc_dic[temp_addr]
txin_utxo_list.append(UtxoData(
txid=txid, vout=index, amount=txout.amount, descriptor=desc))
txouts2 = [
TxOut(300000000, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=20.0,
knapsack_min_change=1)
# add sign
def multisig_sign(tx_obj, utxo, path_list):
sighash = tx_obj.get_sighash(
outpoint=utxo.outpoint,
hash_type=utxo.descriptor.data.hash_type,
amount=utxo.amount,
redeem_script=utxo.descriptor.data.redeem_script)
signature_list = []
for path in path_list:
sk = test_obj.hdwallet.get_privkey(path=path).privkey
sig = sk.calculate_ec_signature(sighash)
sig.related_pubkey = sk.pubkey
signature_list.append(sig)
if len(signature_list) == 2:
break
tx_obj.add_multisig_sign(
utxo.outpoint, utxo.descriptor.data.hash_type,
utxo.descriptor.data.redeem_script, signature_list)
join_utxo_list: List['UtxoData'] = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = utxo_list
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if not utxo.descriptor.data.redeem_script:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
tx2.sign_with_privkey(txin.outpoint,
utxo.descriptor.data.hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL)
else:
path_list = test_obj.path_dic[str(utxo.descriptor.data.address)]
multisig_sign(tx2, utxo, path_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
def test_psbt(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
fee_addr = str(test_obj.addr_dic['fee'])
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [str(fee_addr)]) # listunspent
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
txouts = [
PsbtAppendOutputData(
100000000,
address=test_obj.addr_dic['p2pkh'],
descriptor=test_obj.desc_dic[str(test_obj.addr_dic['p2pkh'])]),
PsbtAppendOutputData(
100000000,
address=str(test_obj.addr_dic['p2wpkh']),
descriptor=test_obj.desc_dic[str(test_obj.addr_dic['p2wpkh'])]),
PsbtAppendOutputData(
100000000,
address=str(test_obj.addr_dic['p2sh-p2wpkh']),
descriptor=test_obj.desc_dic[str(
test_obj.addr_dic['p2sh-p2wpkh'])],
),
]
psbt = Psbt.create(tx_version=2, network=NETWORK)
psbt.add(outputs=txouts)
psbt.fund(
utxo_list=utxo_list,
reserved_address_descriptor=test_obj.desc_dic[str(fee_addr)],
effective_fee_rate=2.0, long_term_fee_rate=2.0, knapsack_min_change=0)
psbt.sign(fee_sk)
# bitcoinrpc: finalize extract
ret = btc_rpc.finalizepsbt(str(psbt), True)
tx_hex = ret['hex'] if 'hex' in ret else ''
if not ret.get('complete', True):
raise AssertionError("finalizepsbt not complete.")
print(Transaction.parse_to_json(tx_hex, network=NETWORK))
txid = btc_rpc.sendrawtransaction(tx_hex)
tx = Transaction(tx_hex)
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
txid = tx.txid
txin_list = []
key_list = []
for index, _ in enumerate(txouts):
txout = tx.txout_list[index]
addr = txout.get_address(network=NETWORK)
desc = test_obj.desc_dic[str(addr)]
txin_list.append(PsbtAppendInputData(
outpoint=OutPoint(txid, index),
utxo=txout, descriptor=str(desc),
utxo_tx=tx_hex))
path = str(test_obj.path_dic[str(addr)])
key_list.append(test_obj.hdwallet.get_privkey(path=path).privkey)
txouts2 = [
TxOut(300000000, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, [], txouts2)
psbt2 = Psbt.from_transaction(transaction=tx2, network=NETWORK)
psbt2.set_output_bip32_key(0, pubkey=str(
test_obj.desc_dic[str(txouts2[0].address)]))
psbt2.add(inputs=txin_list)
utxos = get_utxo(btc_rpc, [str(fee_addr)]) # listunspent
utxo_list2 = convert_bitcoin_utxos(test_obj, utxos)
psbt2.fund(
utxo_list=utxo_list2,
reserved_address_descriptor=test_obj.desc_dic[str(fee_addr)],
effective_fee_rate=2.0, long_term_fee_rate=2.0, knapsack_min_change=0)
psbt21 = Psbt(str(psbt2), network=NETWORK)
psbt22 = Psbt(str(psbt2), network=NETWORK)
psbt21.sign(fee_sk)
for key in key_list:
psbt22.sign(key)
# psbt2_str = btc_rpc.combinepsbt([str(psbt21), str(psbt22)])
# psbt2 = Psbt(psbt2_str, network=NETWORK)
psbt2 = Psbt.combine_psbts([str(psbt21), psbt22])
tx2 = psbt2.extract(True)
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
txid = btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
def test_taproot_schnorr(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
main_addr = test_obj.addr_dic['main']
main_pk, _ = SchnorrPubkey.from_pubkey(str(main_addr.pubkey))
tr_addr = AddressUtil.taproot(main_pk, network=NETWORK)
main_path = str(test_obj.path_dic[str(main_addr)])
main_sk = test_obj.hdwallet.get_privkey(path=main_path).privkey
txouts = [
TxOut(100000000, str(tr_addr)),
]
tx = Transaction.create(2, 0, [], txouts)
# fundrawtransaction
fee_addr = str(test_obj.addr_dic['fee'])
fee_desc = test_obj.desc_dic[fee_addr]
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx.fund_raw_transaction([], utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
for txin in tx.txin_list:
utxo = search_utxos(test_obj, utxo_list, txin.outpoint)
tx.sign_with_privkey(txin.outpoint, fee_desc.data.hash_type, fee_sk,
amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# create tx | |
#!/usr/bin/python
# Copyright 2002-2011 <NAME>. See LICENSE for licensing information.
import sys
#
# Current Mixminion version
#
VERSION = '0.0.8alpha3'
# System: 0==alpha, 50==beta, 98=pre, 99==release candidate, 100==release
VERSION_INFO = (0,0,8,0,3)
# Check the version. We need to make sure version_info exists before we
# compare to it: it was only added as of Python version 1.6.
#
# (Because of syntax issues, this file won't even parse for any python older
# than 1.3. I'm okay with that.)
if getattr(sys, 'platform', None) == 'win32':
if not hasattr(sys, 'version_info') or sys.version_info < (2,3,0):
print "Sorry, but I require Python 2.3 or higher on Windows."
sys.exit(1)
if not hasattr(sys, 'version_info') or sys.version_info < (2, 0, 0):
print "Sorry, but I require Python 2.0 or higher."
sys.exit(1)
if sys.version_info[:3] == (2,1,0):
print "Python 2.1.0 has known bugs that keep Mixminion from working."
print "Maybe you should upgrade to 2.1.3 or some more recent version."
sys.exit(1)
if sys.version_info[:3] == (2,1,1):
print "Python 2.1.1 has known bugs that keep Mixminion from working."
print "Maybe you should upgrade to 2.1.3 or some more recent version."
sys.exit(1)
try:
import zlib
except ImportError:
print "Zlib support seems to be missing; install Python with zlib support."
sys.exit(1)
try:
import socket
del socket
except ImportError:
print "Your Python installation is somehow missing socket support."
if sys.platform.startswith("sunos") or sys.platform.startswith("solaris"):
print "This is a known issue when building some versions of Python"
print "on Solaris."
sys.exit(1)
import os, re, shutil, string, struct
os.umask(022)
# Function to pull openssl version number out of an opensslv.h file. This
# isn't a real C preprocessor, but it seems to work well enough.
_define_version_line = re.compile(
r'\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(\S+)$')
def getOpenSSLVersion(filename):
if not os.path.exists(filename):
print "Uh oh; can't open %s"%filename
return None
f = open(filename, 'r')
version = None
for l in f.readlines():
m = _define_version_line.match(l)
if m:
version = m.group(1)
break
f.close()
if not version:
print "Uh oh; can't find a version in %s"%filename
return None
version = version.lower()
try:
return string.atol(version, 0)
except ValueError:
print "Can't parse version from %s"%filename
return None
USE_OPENSSL = 1
# Lowest allowable OpenSSL version; this corresponds to OpenSSL 0.9.7b3
MIN_OPENSSL_VERSION = 0x00907003L
OPENSSL_CFLAGS = []
OPENSSL_LDFLAGS = []
MACROS=[]
MODULES=[]
BAD_OPENSSL_IN_CONTRIB = """
=========================================================
Bizarrely, ./contrib/openssl contains an obsolete version
of OpenSSL. Try removing ./contrib/openssl, then running
make download-openssl; make build-openssl again.
========================================================="""
NO_OPENSSL_FOUND = """
======================================================================
I need OpenSSL 0.9.7 or greater, and I couldn't find it anywhere that
I looked. If you installed it somewhere unusual, try setting the
variable OPENSSL_PREFIX as in:
make OPENSSL_PREFIX=/opt/openssl-0.9.7
If you have a nonstandard OpenSSL 0.9.7 installation, you may need to
give compiler flags directly, as in:
make OPENSSL_CFLAGS='-I ~/openssl-include' \\
OPENSSL_LDFLAGS='-L ~/openssl-libs -lssl097 -lcrypto097'
If your C compiler knows where to find OpenSSL 0.9.7, and I should
just trust it, use the SKIP_OPENSSL_SEARCH option, as in:
make SKIP_OPENSSL_SEARCH="y"
Finally, if you don't have OpenSSL 0.9.7 and you don't want to install
it, you can grab and build a local copy for Mixminion only by running:
make download-openssl
make build-openssl
(then)
make
(Or, if you have the OpenSSL source somewhere else, use OPENSSL_SRC
as in:
make build-openssl OPENSSL_SRC=~/src/openssl-0.9.7
make OPENSSL_SRC=~/src/openssl-0.9.7
)
======================================================================"""
#XXXX Use pkg-config when possible, if it exists.
if USE_OPENSSL and sys.platform == 'win32':
# If we're on windows, insist on finding the libraries in ./contrib/openssl
INCLUDE_DIRS = []
STATIC_LIBS = []
LIBRARY_DIRS = []
incd = ".\\contrib\\OpenSSL\\include"
libd = ".\\contrib\\OpenSSL\\"
if os.path.exists(incd): INCLUDE_DIRS.append(incd)
if os.path.exists(incd.lower()): INCLUDE_DIRS.append(incd.lower())
if os.path.exists(libd): LIBRARY_DIRS.append(libd)
if os.path.exists(libd.lower()): LIBRARY_DIRS.append(libd.lower())
if not (INCLUDE_DIRS and LIBRARY_DIRS):
print ("Can't find openssl: make sure that a compiled openssl "
"distribution is stored \nat .\\contrib\\OpenSSL")
sys.exit(1)
LIBRARIES = [ "ssleay32", "libeay32", "advapi32" ]
elif USE_OPENSSL:
# For now, we assume that openssl-0.9.7 isn't generally deployed, so we
# need to look carefully.
# If the user has specified an OpenSSL installation, we trust the user.
# Anything else is loopy.
if os.environ.get("OPENSSL_CFLAGS") or os.environ.get("OPENSSL_LDFLAGS"):
OPENSSL_CFLAGS = os.environ.get("OPENSSL_CFLAGS", "").split()
OPENSSL_LDFLAGS = os.environ.get("OPENSSL_LDFLAGS", "").split()
print "Using OpenSSL as specified in OPENSSL_CFLAGS/OPENSSL_LDFLAGS."
INCLUDE_DIRS = []
STATIC_LIBS = []
LIBRARY_DIRS = []
LIBRARIES = []
# Otherwise, if the user has run 'make build-openssl', we have a good
# copy of OpenSSL sitting in ./contrib/openssl that they want us to use.
elif os.environ.get("SKIP_OPENSSL_SEARCH"):
print "Assuming that the C compiler knows where to find OpenSSL."
INCLUDE_DIRS = []
STATIC_LIBS = []
LIBRARY_DIRS = []
LIBRARIES = [ 'ssl', 'crypto' ]
elif ((os.path.exists("./contrib/openssl") or
os.environ.get("OPENSSL_SRC"))
and not os.environ.get("OPENSSL_PREFIX")):
openssl_src = os.environ.get("OPENSSL_SRC", "./contrib/openssl")
openssl_src = os.path.expanduser(openssl_src)
if not os.path.exists(openssl_src):
print "$OPENSSL_SRC does not exist."
sys.exit(1)
print "Using OpenSSL from", openssl_src
openssl_inc = os.path.join(openssl_src, "include")
INCLUDE_DIRS = [openssl_inc]
STATIC_LIBS=[ os.path.join(openssl_src, "libssl.a"),
os.path.join(openssl_src, "libcrypto.a") ]
LIBRARY_DIRS=[]
LIBRARIES=[]
v = getOpenSSLVersion(os.path.join(openssl_src,
"include", "openssl", "opensslv.h"))
if not v or v < MIN_OPENSSL_VERSION:
print BAD_OPENSSL_IN_CONTRIB
sys.exit(1)
# Otherwise, look in a bunch of standard places for a possible OpenSSL
# installation. This logic is adapted from check_ssl.m4 from ac-archive;
# the list of locations is extended with locations from Python's setup.py.
else:
print "Searching for platform OpenSSL."
found = 0
PREFIXES = ("/usr/local/ssl", "/usr/contrib/ssl", "/usr/lib/ssl",
"/usr/ssl", "/usr/pkg", "/usr/local", "/usr", "/")
if os.environ.get("OPENSSL_PREFIX"):
PREFIXES = (os.environ["OPENSSL_PREFIX"],)
for prefix in PREFIXES:
if found:
break
print "Looking in %s ..."%prefix
incdir = os.path.join(prefix, "include")
for trunc in 0,1:
if trunc:
opensslv_h = os.path.join(incdir, "opensslv.h")
else:
opensslv_h = os.path.join(incdir, "openssl", "opensslv.h")
if os.path.exists(opensslv_h):
v = getOpenSSLVersion(opensslv_h)
if v and v >= MIN_OPENSSL_VERSION:
INCLUDE_DIRS = [incdir]
LIBRARY_DIRS = [os.path.join(prefix,"lib")]
if trunc:
MACROS.append(('TRUNCATED_OPENSSL_INCLUDES', None))
print "Using version of OpenSSL in %s"%prefix
found = 1
break
print "Skipping old version of OpenSSL in %s"%prefix
if not found:
print NO_OPENSSL_FOUND
sys.exit(1)
STATIC_LIBS=[]
LIBRARIES=['ssl','crypto']
#======================================================================
# Check the version of Mixminion as it's set in the source, and update
# __init__.py as needed.
f = open("lib/mixminion/__init__.py", 'r')
initFile = f.read()
f.close()
initCorrected = re.compile(r'^__version__\s*=.*$', re.M).sub(
'__version__ = \"%s\"'%VERSION, initFile)
initCorrected = re.compile(r'^version_info\s*=.*$', re.M).sub(
'version_info = %r'%(VERSION_INFO,), initCorrected)
if initCorrected != initFile:
f = open("lib/mixminion/__init__.py", 'w')
f.write(initCorrected)
f.close()
#======================================================================
# Install unittest if python doesn't provide it. (This is a 2.0 issue)
try:
import unittest
except:
if not os.path.exists("lib/mixminion/_unittest.py"):
shutil.copy("contrib/unittest.py", "lib/mixminion/_unittest.py")
# Install textwrap if Python doesn't provide it. (This goes for all python<2.3)
try:
import textwrap
except:
if not os.path.exists("lib/mixminion/_textwrap.py"):
shutil.copy("contrib/textwrap.py", "lib/mixminion/_textwrap.py")
# If we have a version of Python older than 2.2, we can't do bounded-space
# decompression without magic. That magic is written by Zooko.
if sys.version_info[:3] < (2,2,0):
if not os.path.exists("lib/mixminion/_zlibutil.py"):
shutil.copy("contrib/zlibutil.py", "lib/mixminion/_zlibutil.py")
#======================================================================
# Detect endian-ness
#XXXX This breaks cross-compilation, but might be good enough for now.
num = struct.pack("@I", 0x01020304)
big_endian = (num== "\x01\x02\x03\x04")
little_endian = (num=="\x04\x03\x02\x01")
other_endian = not (big_endian or little_endian)
if big_endian:
print "Host is big-endian"
MACROS.append( ("MM_B_ENDIAN", 1) )
elif little_endian:
print "Host is little-endian"
MACROS.append( ("MM_L_ENDIAN", 1) )
elif other_endian:
print "\nWild! Your machine seems to be middle-endian, and yet you've"
print "somehow made it run Python. Despite your perversity, I admire"
print "your nerve, and will try to soldier on.\n"
MACROS.append( ("MM_O_ENDIAN", 1) )
#======================================================================
# Apple's OS X 10.2 has really weird options for its Python distutils.
# The logic to fix this comes from Twisted.
BROKEN_CONFIG = '2.2 (#1, 07/14/02, 23:25:09) \n[GCC Apple cpp-precomp 6.14]'
if sys.platform == 'darwin' and sys.version == BROKEN_CONFIG:
# change this to 1 if you have some need to compile
# with -flat_namespace as opposed to -bundle_loader
FLAT_NAMESPACE = 0
BROKEN_ARCH = '-arch i386'
BROKEN_NAMESPACE = '-flat_namespace -undefined_suppress'
import distutils.sysconfig
distutils.sysconfig.get_config_vars()
x = distutils.sysconfig._config_vars['LDSHARED']
y = x.replace(BROKEN_ARCH, '')
if not FLAT_NAMESPACE:
e = os.path.realpath(sys.executable)
y = y.replace(BROKEN_NAMESPACE, '-bundle_loader ' + e)
if y != x:
print "Fixing some of Apple's compiler flag mistakes..."
distutils.sysconfig._config_vars['LDSHARED'] = y
#======================================================================
# Create a startup script if we're installing.
# This isn't as fully general as distutils allows. Unfortunately, distutils
# doesn't make it easy for us to create a script that knows where distutils
# has been told to install.
if os.environ.get('PREFIX'):
prefix = os.path.expanduser(os.environ["PREFIX"])
pathextra = os.path.join(prefix, "lib",
"python"+(sys.version)[:3],
"site-packages")
else:
pathextra = ""
if not os.path.exists("build"):
os.mkdir("build")
if sys.platform == 'win32':
SCRIPT_SUFFIX = ".py"
else:
SCRIPT_SUFFIX = ""
SCRIPTS = []
for name in "mixminion", "mixminiond":
SCRIPT_PATH = os.path.join("build", name+SCRIPT_SUFFIX)
f = open(SCRIPT_PATH, 'wt')
# Distutils will take care of the executable path, and actually gets angry
# if we try to be smart on our own. *sigh*.
f.write("#!python\n")
f.write("import sys\n")
if pathextra and 'py2exe' not in sys.argv:
f.write("sys.path[0:0] = [%r]\n"%pathextra)
f.write("""\
try:
import mixminion.Main
except:
| |
"""
Get/set secret variable values from the AWS Secrets Manager
"""
import os
import sys
import select
import typing
import argparse
import json
import logging
import copy
import subprocess
from botocore.exceptions import ClientError
from dss.operations import dispatch
from dss.operations.util import polite_print
from dss.util.aws.clients import secretsmanager as sm_client # type: ignore
logger = logging.getLogger(__name__)
def get_secret_store_prefix() -> str:
"""
Use information from the environment to assemble the necessary prefix for accessing variables in the
SecretsManager.
"""
store_name = os.environ["DSS_SECRETS_STORE"]
stage_name = os.environ["DSS_DEPLOYMENT_STAGE"]
store_prefix = f"{store_name}/{stage_name}"
return store_prefix
def fix_secret_variable_prefix(secret_name: str) -> str:
"""
Given a secret name, check if it already has the secrets store prefix.
"""
prefix = get_secret_store_prefix()
if not (secret_name.startswith(prefix) or secret_name.startswith("/" + prefix)):
secret_name = f"{prefix}/{secret_name}"
return secret_name
def fetch_secret_safely(secret_name: str) -> dict:
"""
Fetch a secret from the store safely, raising errors if the secret is not found or is marked for deletion.
"""
try:
response = sm_client.get_secret_value(SecretId=secret_name)
except ClientError as e:
if "Error" in e.response:
errtype = e.response["Error"]["Code"]
if errtype == "ResourceNotFoundException":
raise RuntimeError(f"Error: secret {secret_name} was not found!")
raise RuntimeError(f"Error: could not fetch secret {secret_name} from secrets manager")
else:
return response
secrets = dispatch.target("secrets", arguments={}, help=__doc__)
json_flag_options = dict(
default=False, action="store_true", help="format the output as JSON if this flag is present"
)
dryrun_flag_options = dict(
default=False, action="store_true", help="do a dry run of the actual operation"
)
quiet_flag_options = dict(
default=False, action="store_true", help="suppress output"
)
@secrets.action(
"list",
arguments={
"--json": json_flag_options,
},
)
def list_secrets(argv: typing.List[str], args: argparse.Namespace):
"""
Print a list of names of every secret variable in the secrets manager for the DSS secrets manager
for $DSS_DEPLOYMENT_STAGE.
"""
store_prefix = get_secret_store_prefix()
paginator = sm_client.get_paginator("list_secrets")
secret_names = []
for response in paginator.paginate():
for secret in response["SecretList"]:
# Get resource IDs
secret_name = secret["Name"]
# Only save secrets for this store and stage
secret_names.append(fix_secret_variable_prefix(secret_name))
if secret_name.startswith(store_prefix):
secret_names.append(secret_name)
secret_names.sort()
if args.json is True:
print(json.dumps(secret_names, indent=4))
else:
for secret_name in secret_names:
print(secret_name)
@secrets.action(
"get",
arguments={
"secret_name": dict(help="the name of the secret to retrieve"),
"--outfile": dict(required=False, type=str, help="specify an output file where the secret will be saved"),
"--force": dict(
default=False,
action="store_true",
help="overwrite the output file, if it already exists; must be used with --output flag.",
),
},
)
def get_secret(argv: typing.List[str], args: argparse.Namespace):
"""
Get the value of the secret variable specified by secret_name.
"""
# Note: this function should not print anything except the final JSON, in case the user pipes the JSON
# output of this script to something else
if args.outfile:
if os.path.exists(args.outfile) and not args.force:
raise RuntimeError(
f"Error: file {args.outfile} already exists, use the --force flag to overwrite it"
)
secret_name = fix_secret_variable_prefix(args.secret_name)
# Attempt to obtain secret
try:
response = sm_client.get_secret_value(SecretId=secret_name)
secret_val = response["SecretString"]
except ClientError:
# A secret variable with that name does not exist
raise RuntimeError(f"Error: Resource not found: {secret_name}")
else:
# Get operation was successful, secret variable exists
if args.outfile:
sys.stdout = open(args.outfile, "w")
print(secret_val)
if args.outfile:
sys.stdout = sys.__stdout__
@secrets.action(
"set",
arguments={
"secret_name": dict(help="name of secret to set (limit 1 at a time)"),
"--infile": dict(help="specify an input file whose contents is the secret value"),
"--force": dict(
default=False, action="store_true", help="force the action to happen (no interactive prompt)"
),
"--dry-run": dryrun_flag_options,
"--quiet": quiet_flag_options
},
)
def set_secret(argv: typing.List[str], args: argparse.Namespace):
"""Set the value of the secret variable."""
secret_name = fix_secret_variable_prefix(args.secret_name)
# Decide what to use for infile
secret_val = None
if args.infile is not None:
if os.path.isfile(args.infile):
with open(args.infile, 'r') as f:
secret_val = f.read()
else:
raise RuntimeError(f"Error: specified input file {args.infile} does not exist!")
else:
# Use stdin (input piped to this script) as secret value.
# stdin provides secret value, flag --secret-name provides secret name.
if not select.select([sys.stdin], [], [], 0.0)[0]:
raise RuntimeError("Error: stdin was empty! A secret value must be provided via stdin")
secret_val = sys.stdin.read()
# Create or update
try:
# Start by trying to get the secret variable
sm_client.get_secret_value(SecretId=secret_name)
except ClientError:
# A secret variable with that name does not exist, so create it
if args.dry_run:
polite_print(
args.quiet,
f"Secret variable {secret_name} not found in secrets manager, dry-run creating it"
)
else:
if args.infile:
polite_print(
args.quiet,
f"Secret variable {secret_name} not found in secrets manager, creating from input file"
)
else:
polite_print(
args.quiet,
f"Secret variable {secret_name} not found in secrets manager, creating from stdin"
)
sm_client.create_secret(Name=secret_name, SecretString=secret_val)
else:
# Get operation was successful, secret variable exists
# Prompt the user before overwriting, unless --force flag present
if not args.force and not args.dry_run:
# Prompt the user to make sure they really want to do this
confirm = f"""
*** WARNING!!! ***
The secret you are setting currently has a value. Calling the
set secret function will overwrite the current value of the
secret!
Note:
- To do a dry run of this operation first, use the --dry-run flag.
- To ignore this warning, use the --force flag.
Are you really sure you want to update the secret?
(Type 'y' or 'yes' to confirm):
"""
response = input(confirm)
if response.lower() not in ["y", "yes"]:
print("You safely aborted the set secret operation!")
sys.exit(0)
if args.dry_run:
polite_print(
args.quiet,
f"Secret variable {secret_name} found in secrets manager, dry-run updating it"
)
else:
if args.infile:
polite_print(
args.quiet,
f"Secret variable {secret_name} found in secrets manager, updating from input file"
)
else:
polite_print(
args.quiet,
f"Secret variable {secret_name} found in secrets manager, updating from stdin"
)
sm_client.update_secret(SecretId=secret_name, SecretString=secret_val)
@secrets.action(
"delete",
arguments={
"secret_name": dict(help="name of secret to delete (limit 1 at a time)"),
"--force": dict(
default=False,
action="store_true",
help="force the delete operation to happen non-interactively (no user prompt)",
),
"--dry-run": dict(default=False, action="store_true", help="do a dry run of the actual operation"),
"--quiet": quiet_flag_options
},
)
def del_secret(argv: typing.List[str], args: argparse.Namespace):
"""
Delete the value of the secret variable specified by the
--secret-name flag from the secrets manager
"""
secret_name = fix_secret_variable_prefix(args.secret_name)
try:
# Start by trying to get the secret variable
sm_client.get_secret_value(SecretId=secret_name)
except ClientError:
# No secret var found
polite_print(
args.quiet,
f"Secret variable {secret_name} not found in secrets manager!"
)
except sm_client.exceptions.InvalidRequestException:
# Already deleted secret var
polite_print(
args.quiet,
f"Secret variable {secret_name} already marked for deletion in secrets manager!"
)
else:
# Get operation was successful, secret variable exists
if not args.force and not args.dry_run:
# Make sure the user really wants to do this
confirm = f"""
*** WARNING!!! ****
You are about to delete secret {secret_name} from the secrets
manager. Are you sure you want to delete the secret?
(Type 'y' or 'yes' to confirm):
"""
response = input(confirm)
if response.lower() not in ["y", "yes"]:
raise RuntimeError("You safely aborted the delete secret operation!")
if args.dry_run:
# Delete it for fakes
polite_print(
args.quiet,
f"Secret variable {secret_name} found in secrets manager, dry-run deleting it"
)
else:
# Delete it for real
polite_print(
args.quiet,
f"Secret variable {secret_name} found in secrets manager, deleting it"
)
sm_client.delete_secret(SecretId=secret_name)
class SecretsChecker(object):
"""
A class to aid in checking deployed secrets in the secrets manager.
Will only check canonical HCA DSS stages ('dev', 'integration', 'staging').
Checking occurs as follows:
#1
For the json returned from the secret in GOOGLE_APPLICATION_SECRETS_SECRETS_NAME:
`auth_uri` should be in ['https://auth.data.humancellatlas.org/authorize',
'https://auth.dev.data.humancellatlas.org/authorize']
`token_uri` should be in ['https://auth.data.humancellatlas.org/oauth/token',
'https://auth.dev.data.humancellatlas.org/oauth/token']
#2
For the json returned from the secret in GOOGLE_APPLICATION_CREDENTIALS_SECRETS_NAME:
`project_id` should be `human-cell-atlas-travis-test`
`type` should be `service_account`
`client_email` should be the user account returned from the terraform output "service_account".
For example: dev should be `<EMAIL>@human-<EMAIL>`.
"""
def __init__(self, stage):
"""Initialize variables useful to the secrets checker"""
self.stage = stage
self.stages = {'dev': 'environment',
'integration': 'environment.integration',
'staging': 'environment.staging'}
self.missing_secrets = []
self.malformed_secrets = []
self.incomplete_secrets = []
self.error_message = f'\n\n' \
f'Deploying to {self.stage.upper()} could not be completed.\n' \
f'It looks like an AWS secret has an unexpected value.\n' \
f'Please do not change AWS secrets for releases.\n'
if self.stage not in self.stages:
print(f'Custom stage "{self.stage}" provided. Secret checking skipped.')
return
self.stage_env = copy.deepcopy(os.environ)
self.stage_env = self.get_stage_env(self.stages[self.stage])
self.service_account = self.fetch_terraform_output("service_account", "gcp_service_account").strip()
self.email = [f'{<EMAIL>']
self.project = ['human-cell-atlas-travis-test']
self.type = ['service_account']
self.auth_uri = ['https://auth.data.humancellatlas.org/oauth/authorize',
'https://auth.dev.data.humancellatlas.org/oauth/authorize']
self.token_uri = ['https://auth.data.humancellatlas.org/oauth/token',
'https://auth.dev.data.humancellatlas.org/oauth/token']
self.app_secret_name = os.environ['GOOGLE_APPLICATION_SECRETS_SECRETS_NAME']
self.gcp_cred_secret_name = os.environ['GOOGLE_APPLICATION_CREDENTIALS_SECRETS_NAME']
self.app_secret = self.fetch_secret(self.app_secret_name)
self.gcp_cred_secret = self.fetch_secret(self.gcp_cred_secret_name)
def run_cmd(self, cmd, cwd=os.getcwd(), shell=True):
"""Run a command and return stdout"""
p = subprocess.Popen(cmd,
shell=shell,
| |
+= calcRelURL(toNodeURL, fromPageURL)
continue
mo = re.match('env:(.*)', directive)
if mo:
envName = mo.group(1)
if envName in os.environ:
xvalue += os.environ[envName]
else:
print "warning: the environment variable '%s' not defined" % envName
continue
raise DocError(
"unknown directive '%s' found while expanding an attribute" % directive)
if next < len(value): xvalue += value[next:]
#print "EXPAND: ", value, " -> ", xvalue
return xvalue
# --------------------------------------------------------------------
class Generator:
# --------------------------------------------------------------------
def __init__(self, rootDir):
ensureDir(rootDir)
self.fileStack = []
self.dirStack = [rootDir]
ensureDir(rootDir)
#print "CD ", rootDir
def open(self, filePath):
filePath = os.path.join(self.dirStack[-1], filePath)
fid = open(filePath, "w")
self.fileStack.append(fid)
fid.write(DOCTYPE_XHTML_TRANSITIONAL)
#print "OPEN ", filePath
def putString(self, str):
fid = self.fileStack[-1]
try:
encoded = str.encode('latin-1')
fid.write(encoded)
except (UnicodeEncodeError, IOError), e:
raise DocError(e.__str__())
def putXMLString(self, str):
fid = self.fileStack[-1]
xstr = xml.sax.saxutils.escape(str, mapUnicodeToHtmlEntity)
try:
fid.write(xstr.encode('latin-1'))
except:
print "OFFENDING", str, xstr
print mapUnicodeToHtmlEntity[str]
raise
def putXMLAttr(self, str):
fid = self.fileStack[-1]
xstr = xml.sax.saxutils.quoteattr(str)
fid.write(xstr.encode('latin-1'))
def close(self):
self.fileStack.pop().close()
#print "CLOSE"
def changeDir(self, dirName):
currentDir = self.dirStack[-1]
newDir = os.path.join(currentDir, dirName)
ensureDir(newDir)
self.dirStack.append(newDir)
#print "CD ", newDir
def parentDir(self):
self.dirStack.pop()
#print "CD .."
def tell(self):
fid = self.fileStack[-1]
return fid.tell()
def seek(self, pos):
fid = self.fileStack[-1]
fid.seek(pos)
# --------------------------------------------------------------------
class DocInclude(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
if not attrs.has_key("src"):
raise DocError("include missing 'src' attribute")
self.filePath = attrs["src"]
def __str__(self):
return DocNode.__str__(self) + ":<web:include src=%s>" \
% xml.sax.saxutils.quoteattr(self.filePath)
# --------------------------------------------------------------------
class DocDir(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
if not attrs.has_key("name"):
raise DocError("dir tag missing 'name' attribute")
self.dirName = attrs["name"]
def __str__(self):
return DocNode.__str__(self) + ":<web:dir name=%s>" \
% xml.sax.saxutils.quoteattr(self.dirName)
def getPublishDirName(self):
return self.parent.getPublishDirName() + self.dirName + os.sep
def publish(self, generator, pageNode = None):
generator.changeDir(self.dirName)
DocNode.publish(self, generator, pageNode)
generator.parentDir()
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocGroup(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def __str__(self):
return DocNode.__str__(self) + ":<web:group>"
# --------------------------------------------------------------------
class DocCDATAText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":CDATA text:" + self.text
def publish(self, gen, pageNode = None):
if pageNode is None: return
if not pageNode: return
gen.putString(self.text)
# --------------------------------------------------------------------
class DocCDATA(DocNode):
# --------------------------------------------------------------------
def __init__(self):
DocNode.__init__(self, {}, None, None)
def __str__(self):
return DocNode.__str__(self) + ":CDATA"
def publish(self, gen, pageNode = None):
if pageNode is None: return
gen.putString("<![CDATA[")
DocNode.publish(self, gen, pageNode)
gen.putString("]]>") ;
# --------------------------------------------------------------------
class DocHtmlText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":text:'" + \
self.text.encode('utf-8').encode('string_escape') + "'"
def publish(self, gen, pageNode = None):
if pageNode is None: return
# find occurences of %directive; in the text node and do the
# appropriate substitutions
next = 0
for m in re.finditer("%(\w+)(:.*)?;", self.text):
if next < m.start():
gen.putXMLString(self.text[next : m.start()])
next = m.end()
directive = self.text[m.start()+1 : m.end()-1]
directive = m.group(1)
if directive == "content":
pageNode.publish(gen, pageNode)
elif directive == "pagestyle":
for s in pageNode.findChildren(DocPageStyle):
s.publish(gen, pageNode)
elif directive == "pagescript":
for s in pageNode.findChildren(DocPageScript):
s.publish(gen, pageNode)
elif directive == "pagetitle":
gen.putString(pageNode.title)
elif directive == "path":
ancPages = [x for x in walkAncestors(pageNode, DocPage)]
ancPages.reverse()
gen.putString(" - ".join([x.title for x in ancPages]))
elif directive == "navigation":
gen.putString("<ul>\n")
openNodeStack = [x for x in walkAncestors(pageNode, DocPage)]
siteNode = walkAncestors(pageNode, DocSite).next()
siteNode.publishIndex(gen, pageNode, openNodeStack)
gen.putString("</ul>\n")
elif directive == "env":
envName = m.group(2)[1:]
if envName in os.environ:
gen.putString(os.environ[envName])
else:
print "warning: environment variable '%s' not defined" % envName
else:
print "warning: ignoring unknown directive '%s'" % label
if next < len(self.text):
gen.putXMLString(self.text[next:])
# --------------------------------------------------------------------
class DocCodeText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":text:'" + \
self.text.encode('utf-8').encode('string_escape') + "'"
# --------------------------------------------------------------------
class DocCode(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL = None, locator = None):
DocNode.__init__(self, attrs, URL, locator)
self.type = "plain"
if attrs.has_key("type"): self.type = attrs["type"]
def __str__(self):
str = "<web:precode"
for k, v in self.attrs.items():
str = str + " " + k + "='" + xml.sax.saxutils.escape(v) + "'"
str = str + "> type = " + self.type
return DocNode.__str__(self) + ":" + str
def publish(self, gen, pageNode = None):
if pageNode is None: return
code = ""
for n in self.getChildren():
if n.isA(DocCodeText):
code = code + n.text
if has_pygments and not self.type == "plain":
try:
lexer = pygments.lexers.get_lexer_by_name(self.type)
gen.putString(pygments.highlight(code,
lexer,
pygments.formatters.HtmlFormatter()))
except pygments.util.ClassNotFound:
print "warning: could not find a syntax highlighter for '%s'" % self.type
gen.putString("<pre>" + code + "</pre>")
else:
gen.putString("<pre>" + code + "</pre>")
DocNode.publish(self, gen, pageNode)
# --------------------------------------------------------------------
class DocHtmlElement(DocNode):
# --------------------------------------------------------------------
def __init__(self, tag, attrs, URL = None, locator = None):
DocNode.__init__(self, attrs, URL, locator)
self.tag = tag
def __str__(self):
str = "<html:" + self.tag
for k, v in self.attrs.items():
str = str + " " + k + "='" + xml.sax.saxutils.escape(v) + "'"
str = str + ">"
return DocNode.__str__(self) + ":" + str
def getPublishURL(self):
anc = self.findAncestors(DocPage)
if len(anc) == 0: return None
return anc[0].getPublishURL() + "#" + self.id
def publish(self, gen, pageNode = None):
if pageNode is None: return
gen.putString("<")
gen.putString(self.tag)
for name, value in self.attrs.items():
gen.putString(" ")
gen.putString(name)
gen.putString("=")
gen.putXMLAttr(expandAttr(value, pageNode))
if self.tag == 'br':
# workaround for browser that do not like <br><br/>
gen.putString("/>")
else:
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</")
gen.putString(self.tag)
gen.putString(">")
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocTemplate(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def publish(self, generator, pageNode = None):
if pageNode is None: return
DocNode.publish(self, generator, pageNode)
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocPageStyle(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def publish(self, gen, pageNode = None):
if pageNode is None: return
sa = self.getAttributes()
if sa.has_key("href"):
gen.putString("<link rel=\"stylesheet\" type=")
if sa.has_key("type"):
gen.putXMLAttr(expandAttr(sa["type"], pageNode))
else:
gen.putString("\"text/css\" ")
gen.putString("href=")
gen.putXMLAttr(expandAttr(sa["href"], pageNode))
gen.putString("></style>")
else:
gen.putString("<style rel=\"stylesheet\" type=")
if sa.has_key("type"):
gen.putXMLAttr(expandAttr(sa["type"], pageNode))
else:
gen.putString("\"text/css\" ")
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</style>")
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocPageScript(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def publish(self, gen, pageNode = None):
if pageNode is None: return
sa = self.getAttributes()
gen.putString("<script type=")
if sa.has_key("type"):
gen.putXMLAttr(expandAttr(sa["type"], pageNode))
else:
gen.putString("\"text/javascript\" ")
if sa.has_key("src"):
gen.putString("src=")
gen.putXMLAttr(expandAttr(sa["src"], pageNode))
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</script>")
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocPage(DocNode):
# --------------------------------------------------------------------
counter = 0
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
DocPage.counter = 1 + DocPage.counter
self.templateID = "template.default"
self.name = "page%d" % DocPage.counter
self.title = "untitled"
self.hide = False
for k, v in self.attrs.items():
if k == 'src':
self.title = v
elif k == 'name':
self.name = v
elif k == 'id':
pass
elif k == 'title':
self.title = v
elif k == 'hide':
self.hide = (v.lower() == 'yes')
else:
raise DocError(
"web:page cannot have '%s' attribute" % k)
def __str__(self):
return DocNode.__str__(self) + ":<web:page name='%s' title='%s'>" \
% (xml.sax.saxutils.escape(self.name),
xml.sax.saxutils.escape(self.title))
def getPublishFileName(self):
return self.name + ".html"
def getPublishURL(self):
siteNode = self.findAncestors(DocSite)[0]
return siteNode.getPublishURL() + \
self.getPublishDirName() + \
self.getPublishFileName()
def publish(self, generator, pageNode = None):
if not pageNode:
generator.open(self.getPublishFileName())
templateNode = nodeIndex[self.templateID]
templateNode.publish(generator, self)
generator.close()
DocNode.publish(self, generator, None)
elif pageNode is self:
DocNode.publish(self, generator, pageNode)
def publishIndex(self, gen, pageNode, openNodeStack):
if self.hide: return
gen.putString("<li><a href=")
gen.putXMLAttr(
expandAttr("%%pathto:%s;" % self.getID(), pageNode))
if len(openNodeStack) == 1 and self == openNodeStack[0]:
gen.putString(" class='active' ")
gen.putString(">")
gen.putXMLString(self.title)
gen.putString("</a>\n")
pos = gen.tell()
gen.putString("<ul>\n")
hasIndexedChildren = False
if len(openNodeStack) > 0 and self == openNodeStack[-1]:
openNodeStack.pop()
hasIndexedChildren = DocNode.publishIndex(self, gen, pageNode, openNodeStack)
if hasIndexedChildren:
gen.putString("</ul>")
else:
gen.seek(pos)
gen.putString("</li>\n")
return True
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocSite(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
self.siteURL = "http://www.foo.org/"
self.outDir = "html"
def __str__(self):
return DocNode.__str__(self) + ":<web:site>"
def getPublishURL(self):
return self.siteURL
def getPublishDirName(self):
return ""
def getOutDir(self):
return self.outDir
def setOutDir(self, outDir):
self.outDir = outDir
def publish(self):
generator = Generator(self.outDir)
DocNode.publish(self, generator)
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocHandler(ContentHandler):
# --------------------------------------------------------------------
def __init__(self):
ContentHandler.__init__(self)
self.rootNode = None
self.stack = []
self.locatorStack = []
self.filePathStack = []
self.verbosity = 1
self.inDTD = False
def resolveEntity(self, publicid, systemid):
"""
Resolve XML entities by mapping to a local copy of the (X)HTML
DTDs.
"""
return open(os.path.join(
os.path.dirname(__file__),
'dtd/xhtml1',
systemid[systemid.rfind('/')+1:]), "rb")
def lookupFile(self, filePath):
if os.path.exists(filePath):
return filePath
if filePath[0] == '/':
return None
for path in self.filePathStack:
dir = os.path.dirname(path)
qualFilePath = os.path.join(dir, | |
import re
import collections
from copy import copy
from numbers import Number
from operator import truediv
from itertools import chain, repeat, product, accumulate
from typing import Any, Iterable, Dict, List, Tuple, Optional, Sequence, Hashable, Iterator, Union, Type, Callable
from coba.config import CobaConfig
from coba.utilities import PackageChecker
from coba.pipes import Filter, Cartesian, JsonEncode, JsonDecode, StopPipe, Pipe, DiskSink, DiskSource
class Table:
"""A container class for storing tabular data."""
def __init__(self, name:str, primary_cols: Sequence[str], rows: Sequence[Dict[str,Any]] = []):
"""Instantiate a Table.
Args:
name: The name of the table.
default: The default values to fill in missing values with
"""
self._name = name
self._primary = primary_cols
def index_cols():
for row in rows:
if '_packed' in row:
return ['index']
return []
def data_cols():
return ( sorted(row.keys() - ['_packed'] | row.get('_packed',{}).keys()) for row in rows)
for row in rows:
assert len(row.keys() & primary_cols) == len(primary_cols), 'A Table row was provided without a primary key.'
all_columns = list(chain(primary_cols, index_cols(), *data_cols()))
self._columns = sorted(set(all_columns), key=lambda col: all_columns.index(col))
self._rows_keys: List[Hashable ] = []
self._rows_flat: Dict[Hashable, Dict[str,Any]] = {}
self._rows_pack: Dict[Hashable, Dict[str,Any]] = {}
for row in rows:
row_key = row[primary_cols[0]] if len(primary_cols) == 1 else tuple(row[col] for col in primary_cols)
row_pack = row.pop('_packed',{})
row_flat = row
if row_pack:
row_pack['index'] = list(range(1,len(list(row_pack.values())[0])+1))
self._rows_keys.append(row_key)
self._rows_pack[row_key] = row_pack
self._rows_flat[row_key] = row_flat
self._rows_keys = sorted(self._rows_keys)
@property
def name(self) -> str:
return self._name
@property
def keys(self) -> Sequence[Hashable]:
return self._rows_keys
@property
def columns(self) -> Sequence[str]:
return self._columns
@property
def dtypes(self) -> Sequence[Type[Union[int,float,bool,object]]]:
flats = self._rows_flat
packs = self._rows_pack
columns_packed = [ any([ col in packs[key] for key in self.keys]) for col in self.columns ]
columns_values = [ [flats[key].get(col, packs[key].get(col, self._default(col))) for key in self.keys] for col in self.columns ]
return [ self._infer_type(column_packed, column_values) for column_packed, column_values in zip(columns_packed,columns_values)]
def filter(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Table':
def satisfies_all_filters(key):
row = self[key]
if pred is not None and not pred(row):
return False
for col,value in kwargs.items():
if isinstance(value,Number) and not re.search(f'(\D|^){value}(\D|$)', str(row[col])):
return False
if isinstance(value,str) and not re.search(value, row[col]):
return False
if callable(value) and not value(row[col]):
return False
return True
new_result = copy(self)
new_result._rows_keys = list(filter(satisfies_all_filters,self.keys))
return new_result
def to_pandas(self) -> Any:
PackageChecker.pandas("Table.to_pandas")
import pandas as pd #type: ignore
import numpy as np #type: ignore #pandas installs numpy so if we have pandas we have numpy
col_numpy = { col: np.empty(len(self), dtype=dtype) for col,dtype in zip(self.columns,self.dtypes)}
row_index = 0
for key in self.keys:
flat = self._rows_flat[key]
pack = self._rows_pack[key]
pack_size = 1 if not pack else len(pack['index'])
for col in self.columns:
if col in pack:
val = pack[col]
elif col in flat:
if isinstance(flat[col], (tuple,list)):
val = [flat[col]]
else:
val = flat[col]
else:
val = self._default(col)
col_numpy[col][row_index:(row_index+pack_size)] = val
row_index += pack_size
return pd.DataFrame(col_numpy, columns=self.columns)
def to_tuples(self) -> Sequence[Tuple[Any,...]]:
tooples = []
for key in self.keys:
flat = self._rows_flat[key]
pack = self._rows_pack[key]
if not pack:
tooples.append(tuple(flat.get(col,self._default(col)) for col in self.columns))
else:
tooples.extend(zip(*[pack.get(col,repeat(flat.get(col,self._default(col)))) for col in self.columns]))
return tooples
def _default(self, column:str) -> Any:
return [1] if column == "index" else float('nan')
def _infer_type(self, is_packed: bool, values: Sequence[Any]) -> Type[Union[int,float,bool,object]]:
types: List[Optional[Type[Any]]] = []
to_type = lambda value: None if value is None else type(value)
for value in values:
if is_packed and isinstance(value, (list,tuple)):
types.extend([to_type(v) for v in value])
else:
types.append(to_type(value))
return self._resolve_types(types)
def _resolve_types(self, types: Sequence[Optional[Type[Any]]]) -> Type[Union[int,float,bool,object]]:
types = list(set(types))
if len(types) == 1 and types[0] in [dict,str]:
return object
if len(types) == 1 and types[0] in [int,float,bool]:
return types[0]
if all(t in [None,int,float] for t in types):
return float
return object
def __iter__(self) -> Iterator[Dict[str,Any]]:
for key in self.keys:
yield self[key]
def __contains__(self, key: Union[Hashable, Sequence[Hashable]]) -> bool:
return key in self.keys
def __str__(self) -> str:
return str({"Table": self.name, "Columns": self.columns, "Rows": len(self)})
def __len__(self) -> int:
return sum([ len(self._rows_pack[key].get('index',[None])) for key in self.keys ])
def __getitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> Dict[str,Any]:
if key not in self.keys: raise KeyError(key)
return dict(**self._rows_flat[key], **self._rows_pack[key])
class Result:
"""A class representing the result of a Benchmark evaluation on a given collection of Simulations and Learners."""
@staticmethod
def from_file(filename: str) -> 'Result':
"""Create a Result from a transaction file."""
#Why is this here??? This is really confusing in practice
#if filename is None or not Path(filename).exists(): return Result()
json_encode = Cartesian(JsonEncode())
json_decode = Cartesian(JsonDecode())
Pipe.join(DiskSource(filename), [json_decode, ResultPromote(), json_encode], DiskSink(filename, 'w')).run()
return Result.from_transactions(Pipe.join(DiskSource(filename), [json_decode]).read())
@staticmethod
def from_transactions(transactions: Iterable[Any]) -> 'Result':
version = None
benchmark = {}
lrn_rows = []
sim_rows = []
int_rows = []
for trx in transactions:
if trx[0] == "version" : version = trx[1]
if trx[0] == "benchmark": benchmark = trx[1]
if trx[0] == "S" : sim_rows.append({**trx[2], "simulation_id": trx[1]})
if trx[0] == "L" : lrn_rows.append({**trx[2], "learner_id" : trx[1]})
if trx[0] == "I" : int_rows.append({**trx[2], "simulation_id": trx[1][0], "learner_id": trx[1][1]})
return Result(version, benchmark, sim_rows, lrn_rows, int_rows)
def __init__(self,
version : Optional[int] = None,
benchmark: Dict[str,Any] = {},
sim_rows : Sequence[Dict[str,Any]] = [],
lrn_rows : Sequence[Dict[str,Any]] = [],
int_rows : Sequence[Dict[str,Any]] = []) -> None:
"""Instantiate a Result class."""
self.version = version
self.benchmark = benchmark
self._simulations = Table("Simulations" , ['simulation_id'] , sim_rows)
self._learners = Table("Learners" , ['learner_id'] , lrn_rows)
self._interactions = Table("Interactions", ['simulation_id', 'learner_id'], int_rows)
@property
def learners(self) -> Table:
"""The collection of learners evaluated by Benchmark. The easiest way to work with the
learners is to convert them to a pandas data frame via Result.learners.to_pandas()
"""
return self._learners
@property
def simulations(self) -> Table:
"""The collection of simulations used to evaluate each learner in the Benchmark. The easiest
way to work with simulations is to convert to a dataframe via Result.simulations.to_pandas()
"""
return self._simulations
@property
def interactions(self) -> Table:
"""The collection of interactions that learners chose actions for in the Benchmark. Each interaction
has a simulation_id and learner_id column to link them to the learners and simulations tables. The
easiest way to work with interactions is to convert to a dataframe via Result.interactions.to_pandas()
"""
return self._interactions
def _copy(self) -> 'Result':
result = Result()
result.simulations = copy(self._simulations)
result.learners = copy(self._learners)
result.interactions = copy(self._interactions)
return result
def filter_fin(self) -> 'Result':
def is_complete_sim(sim_id):
return all((sim_id, lrn_id) in self.interactions for lrn_id in self.learners.keys)
new_result = copy(self)
new_result._simulations = self.simulations.filter(simulation_id=is_complete_sim)
new_result._interactions = self.interactions.filter(simulation_id=is_complete_sim)
if len(new_result.simulations) == 0:
CobaConfig.Logger.log(f"No simulation was found with interaction data for every learner.")
return new_result
def filter_sim(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Result':
new_result = copy(self)
new_result._simulations = new_result.simulations.filter(pred, **kwargs)
new_result._interactions = new_result.interactions.filter(simulation_id=lambda id: id in new_result.simulations)
if len(new_result.simulations) == 0:
CobaConfig.Logger.log(f"No simulations matched the given filter: {kwargs}.")
return new_result
def filter_lrn(self, pred:Callable[[Dict[str,Any]],bool] = None, **kwargs) -> 'Result':
new_result = copy(self)
new_result._learners = new_result.learners.filter(pred, **kwargs)
new_result._interactions = new_result.interactions.filter(learner_id=lambda id: id in new_result.learners)
if len(new_result.learners) == 0:
CobaConfig.Logger.log(f"No learners matched the given filter: {kwargs}.")
return new_result
def plot_learners(self,
xlim: Optional[Tuple[Number,Number]] = None,
ylim: Optional[Tuple[Number,Number]] = None,
span: int = None,
err : Optional[str] = None,
each: bool = False,
show: bool = True,
ax = None,
fig = None) -> None:
"""This plots the performance of multiple Learners on multiple simulations. It gives a sense of the expected
performance for different learners across independent simulations. This plot is valuable in gaining insight
into how various learners perform in comparison to one another.
Args:
xlim: Define the x-axis limits to plot. If `None` the x-axis limits will be inferred.
ylim: Define the y-axis limits to plot. If `None` the y-axis limits will be inferred.
span: In general this indicates how many previous evaluations to average together. In practice this works
identically to ewm span value in the Pandas API. Additionally, if span equals None then all previous
rewards are averaged together vs span = 1 WHERE the instantaneous reward is plotted for each interaction.
err: Determine what kind of error bars to plot (if any). Valid types are `None`, 'se', and 'sd'. If `None`
then no bars are plotted, if 'se' the standard error is shown, and if 'sd the standard deviation is shown.
each: | |
return self._raw_data
def AsDict(self):
return self._dict
def GetPath(self):
return self._path
def FormatKeyValue(self, key, value):
return ('%s=%s' if self._is_cros or ' ' not in value else '%s="%s"') % (
key, value)
def GetValue(self, key, default=None):
return self._dict.get(key, default)
def AppendValue(self, key, value):
self._dict[key] = value
self._raw_data += '\n' + self.FormatKeyValue(key, value)
def SetValue(self, key, value):
if key in self._dict:
self._dict[key] = value
self._raw_data = re.sub(
r'^' + re.escape(key) + r'=.*', self.FormatKeyValue(key, value),
self._raw_data, flags=re.MULTILINE)
else:
self.AppendValue(key, value)
def DeleteValue(self, key):
if key not in self._dict:
return
self._dict.pop(key)
self._raw_data = re.sub(r'^' + re.escape(key) + r'=.*\n*', '',
self._raw_data, flags=re.MULTILINE)
def Install(self, destination, backup=False):
"""Installs the contents to the given location as lsb-release style file.
The file will be owned by root:root, with file mode 0644.
"""
with tempfile.NamedTemporaryFile('w', prefix='lsb_') as f:
f.write(self._raw_data + '\n')
f.flush()
os.chmod(f.name, 0o644)
if backup and os.path.exists(destination):
bak_file = '%s.bak.%s' % (destination, time.strftime('%Y%m%d%H%M%S'))
Sudo(['cp', '-pf', destination, bak_file])
Sudo(['cp', '-pf', f.name, destination])
Sudo(['chown', 'root:root', destination])
def GetChromeOSBoard(self, remove_signer=True):
"""Returns the Chrome OS board name.
Gets the value using KEY_LSB_CROS_BOARD. For test or DEV signed images, this
is exactly the board name we passed to build commands. For PreMP/MP signed
images, this may have suffix '-signed-KEY', where KEY is the key name like
'mpv2'.
Args:
remove_signer: True to remove '-signed-XX' information.
"""
board = self.GetValue(KEY_LSB_CROS_BOARD, '')
if remove_signer:
# For signed images, the board may come in $BOARD-signed-$KEY.
signed_index = board.find('-signed-')
if signed_index > -1:
board = board[:signed_index]
return board
def GetChromeOSVersion(self, remove_timestamp=True, remove_milestone=False):
"""Returns the Chrome OS build version.
Gets the value using KEY_LSB_CROS_VERSION. For self-built images, this may
include a time stamp.
Args:
remove_timestamp: Remove the timestamp like version info if available.
remove_milestone: Remove the milestone if available.
"""
version = self.GetValue('CHROMEOS_RELEASE_VERSION', '')
if remove_timestamp:
version = version.split()[0]
if remove_milestone:
re_branched_image_version = re.compile(r'^R\d+-(\d+\.\d+\.\d+)$')
ver_match = re_branched_image_version.match(version)
if ver_match:
version = ver_match.group(1)
return version
class RMAImageBoardInfo:
"""Store the RMA image information related to one board."""
__slots__ = ['board', 'kernel', 'rootfs']
def __init__(self,
board,
kernel=PART_CROS_KERNEL_A,
rootfs=PART_CROS_ROOTFS_A):
self.board = board
self.kernel = kernel
self.rootfs = rootfs
def ToDict(self):
return {k: getattr(self, k) for k in self.__slots__}
def _WriteRMAMetadata(stateful, board_list):
"""Write RMA metadata to mounted stateful parititon.
Args:
stateful: path of stateful partition mount point.
board_list: a list of RMAImageBoardInfo object.
"""
payloads_dir = os.path.join(stateful, CrosPayloadUtils.GetCrosPayloadsDir())
content = json.dumps([b.ToDict() for b in board_list])
SysUtils.WriteFileToMountedDir(payloads_dir, CROS_RMA_METADATA, content)
def _ReadRMAMetadata(stateful):
"""Read RMA metadata from mounted stateful partition.
Args:
stateful: path of stateful partition mount point.
Returns:
RMA metadata, which is a list of RMAImageBoardInfo.
Raises:
RuntimeError if the file doesn't exist and cannot auto-generate either.
"""
DIR_CROS_PAYLOADS = CrosPayloadUtils.GetCrosPayloadsDir()
PATH_CROS_RMA_METADATA = os.path.join(
stateful, CrosPayloadUtils.GetCrosRMAMetadata())
if os.path.exists(PATH_CROS_RMA_METADATA):
with open(PATH_CROS_RMA_METADATA) as f:
metadata = json.load(f)
metadata = [
RMAImageBoardInfo(board=e['board'],
kernel=e['kernel'],
rootfs=e['rootfs'])
for e in metadata]
return metadata
else:
logging.warning('Cannot find %s.', PATH_CROS_RMA_METADATA)
# Check if it is a legacy single-board RMA shim.
found = glob.glob(os.path.join(stateful, DIR_CROS_PAYLOADS, '*.json'))
if len(found) == 1:
logging.warning('Found legacy RMA shim. Auto-generating metadata.')
board = os.path.basename(found[0]).split('.')[0]
metadata = [RMAImageBoardInfo(board=board, kernel=2, rootfs=3)]
return metadata
raise RuntimeError('Cannot get metadata, is this a RMA shim?')
def _GetBoardName(image):
"""Try to Find the board name from a single-board shim image.
Args:
image: factory shim image.
Returns:
Board name of the shim image.
Raises:
RuntimeError if the shim is a multi-board shim.
"""
try:
with Partition(image, PART_CROS_STATEFUL).Mount() as stateful:
metadata = _ReadRMAMetadata(stateful)
except Exception:
# Just a reset shim. Read board name from lsb-release.
with Partition(image, PART_CROS_ROOTFS_A).Mount() as rootfs:
lsb_path = os.path.join(rootfs, 'etc', 'lsb-release')
return LSBFile(lsb_path).GetChromeOSBoard()
# Single-board RMA shim.
if len(metadata) == 1:
return metadata[0].board
# Multi-board shim.
raise RuntimeError('Cannot get board name in a multi-board shim.')
class RMABoardResourceVersions:
"""Store the RMA resource versions related to one board."""
__slots__ = ['board', 'install_shim'] + PAYLOAD_COMPONENTS
def __init__(self, **kargs):
for component, version in kargs.items():
assert component in self.__slots__, 'Unknown component "%s"' % component
setattr(self, component, version)
def __str__(self):
max_len = max([len(s) for s in self.__slots__])
return '\n'.join(['%-*s: %s' % (max_len, k, getattr(self, k, 'None'))
for k in self.__slots__])
def _ReadBoardResourceVersions(rootfs, stateful, board_info):
"""Read board resource versions from mounted stateful partition.
Get board resource versions from <board>.json and install shim version.
Args:
stateful: path of stateful partition mount point.
rootfs: path of rootfs mount point.
board_info: a RMAImageBoardInfo instance.
Returns:
A RMABoardResourceVersions instance containing resource versions.
"""
def _GetInstallShimVersion(rootfs):
# Version of install shim rootfs.
lsb_path = os.path.join(rootfs, 'etc', 'lsb-release')
shim_version = LSBFile(lsb_path).GetChromeOSVersion(remove_timestamp=False)
return shim_version
versions = {
'board': board_info.board,
'install_shim': _GetInstallShimVersion(rootfs)}
DIR_CROS_PAYLOADS = CrosPayloadUtils.GetCrosPayloadsDir()
json_path = CrosPayloadUtils.GetJSONPath(
os.path.join(stateful, DIR_CROS_PAYLOADS), board_info.board)
payload_versions = CrosPayloadUtils.GetComponentVersions(json_path)
versions.update(payload_versions)
return RMABoardResourceVersions(**versions)
class UserInput:
"""A helper class to manage user inputs."""
@staticmethod
def Select(title, options_list=None, options_dict=None,
single_line_option=True, split_line=False, optional=False):
"""Ask user to select an option from the given options.
Prints the options in `options_list` with their corresponding 1-based index,
and key-value pairs in `options_dict`. Let the user enter a number or string
to select an option.
Args:
title: question description.
options_list: list of strings, each representing an option.
options_dict: dict of (key, option), each representing an option.
single_line_option: True to print the index and option in the same line.
split_line: split line between options.
optional: True to allow the user to input empty string.
Returns:
A user selected number in 0-based index, between 0 and
len(options_list) - 1, or a string that is a key of `options_dict`,
or None if the user inputs an empty string and `optional` set to True.
"""
def print_with_split_line(s):
print(s)
if split_line:
print(SPLIT_LINE)
options_list = options_list or []
options_dict = options_dict or {}
list_n = len(options_list)
dict_n = len(options_dict)
if list_n + dict_n == 0:
return None
print_with_split_line('\n' + title)
for i, option in enumerate(options_list, 1):
print_with_split_line(
'(%d)%s%s' % (i, ' ' if single_line_option else '\n', option))
for key, option in options_dict.items():
print_with_split_line(
'(%s)%s%s' % (key, ' ' if single_line_option else '\n', option))
while True:
keys = [] if list_n == 0 else ['1'] if list_n == 1 else ['1-%d' % list_n]
keys += list(options_dict)
prompt = 'Please select an option [%s]%s: ' % (
', '.join(keys), ' or empty to skip' if optional else '')
answer = input(prompt).strip()
if optional and not answer:
return None
try:
selected = int(answer)
if not 0 < selected <= list_n:
print('Out of range: %d' % selected)
continue
# Convert to 0-based
selected -= 1
except ValueError:
if answer not in options_dict:
print('Invalid option: %s' % answer)
continue
selected = answer
break
return selected
@staticmethod
def YesNo(title):
"""Ask user to input "y" or "n" for a question.
Args:
title: question description.
Returns:
True if the user inputs 'y', or False if the user inputs 'n'.
"""
print('\n' + title)
while True:
prompt = 'Please input "y" or "n": '
answer = input(prompt).strip().lower()
if answer == 'y':
return True
if answer == 'n':
return False
@staticmethod
def GetNumber(title, min_value=None, max_value=None, optional=False):
"""Ask user to input a number in the given range.
Args:
title: question description.
min_value: lower bound of the input number.
max_value: upper bound of the input number.
optional: True to allow the user to input empty string.
Returns:
The user input number, or None if the user inputs an empty string.
"""
if min_value is not None and max_value is not None:
assert min_value <= max_value, (
'min_value %d is greater than max_value %d' % (min_value, max_value))
print('\n' + title)
while True:
prompt = 'Enter a number in [%s, %s]%s: ' % (
str(min_value) if min_value is not None else '-INF',
str(max_value) if max_value is not None else 'INF',
' or empty to skip' if optional else '')
answer = input(prompt).strip()
if optional and not answer:
return None
try:
value = int(answer)
if min_value is not None and value < min_value:
raise ValueError('out of range')
if max_value is not None and value > max_value:
raise ValueError('out of range')
except ValueError:
print('Invalid option: %s' % answer)
continue
break
return value
@staticmethod
def GetString(title, max_length=None, optional=False):
"""Ask user to input a string.
Args:
| |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from builtins import str
from future.utils import iteritems
import os
import time
import threading
import traceback
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
from tensorrtserver.api import *
import tensorrtserver.api.server_status_pb2 as server_status
_no_batching = (int(os.environ['NO_BATCHING']) == 1)
_model_instances = int(os.environ['MODEL_INSTANCES'])
if _no_batching:
_trials = ("savedmodel_nobatch", "graphdef_nobatch", "netdef_nobatch", "plan_nobatch", "onnx_nobatch", "libtorch_nobatch")
elif os.environ['BATCHER_TYPE'] == "VARIABLE":
_trials = ("savedmodel", "graphdef", "netdef", "onnx", "libtorch")
else:
_trials = ("custom", "savedmodel", "graphdef", "netdef", "plan", "onnx", "libtorch")
# Add ensemble to the _trials
ENSEMBLE_PREFIXES = ["simple_", "sequence_", "fan_"]
res = []
for trial in _trials:
res.append(trial)
if ("custom" in trial) or ("onnx" in trial) or ("libtorch" in trial):
continue
for ensemble_prefix in ENSEMBLE_PREFIXES:
res.append(ensemble_prefix + trial)
_trials = tuple(res)
_protocols = ("http", "grpc")
_max_sequence_idle_ms = 5000
_check_exception = None
class SequenceBatcherTest(unittest.TestCase):
def setUp(self):
global _check_exception
_check_exception = None
def check_deferred_exception(self):
if _check_exception is not None:
raise _check_exception
def check_sequence(self, trial, model_name, input_dtype, correlation_id,
sequence_thresholds, values, expected_result,
protocol, batch_size=1, sequence_name="<unknown>"):
"""Perform sequence of inferences. The 'values' holds a list of
tuples, one for each inference with format:
(flag_str, value, (ls_ms, gt_ms), (pre_delay_ms, post_delay_ms)
"""
global _check_exception
if (("savedmodel" in trial) or ("graphdef" in trial) or
("netdef" in trial) or ("custom" in trial) or
("onnx" in trial) or ("libtorch" in trial)):
tensor_shape = (1,)
elif "plan" in trial:
tensor_shape = (1,1,1)
else:
self.assertFalse(True, "unknown trial type: " + trial)
# Can only send the request exactly once since it is a
# sequence model with state
configs = []
if protocol == "http":
configs.append(("localhost:8000", ProtocolType.HTTP, False))
if protocol == "grpc":
configs.append(("localhost:8001", ProtocolType.GRPC, False))
if protocol == "streaming":
configs.append(("localhost:8001", ProtocolType.GRPC, True))
self.assertEqual(len(configs), 1)
for config in configs:
ctx = InferContext(config[0], config[1], model_name,
correlation_id=correlation_id, streaming=config[2],
verbose=True)
# Execute the sequence of inference...
try:
seq_start_ms = int(round(time.time() * 1000))
for flag_str, value, thresholds, delay_ms in values:
if delay_ms is not None:
time.sleep(delay_ms[0] / 1000.0)
flags = InferRequestHeader.FLAG_NONE
if flag_str is not None:
if "start" in flag_str:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
if "end" in flag_str:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_END
input_list = list()
for b in range(batch_size):
if input_dtype == np.object:
in0 = np.full(tensor_shape, value, dtype=np.int32)
in0n = np.array([str(x) for x in in0.reshape(in0.size)], dtype=object)
in0 = in0n.reshape(tensor_shape)
else:
in0 = np.full(tensor_shape, value, dtype=input_dtype)
input_list.append(in0)
start_ms = int(round(time.time() * 1000))
if "libtorch" not in trial:
results = ctx.run(
{ "INPUT" : input_list }, { "OUTPUT" : InferContext.ResultFormat.RAW},
batch_size=batch_size, flags=flags)
OUTPUT = "OUTPUT"
else:
results = ctx.run(
{ "INPUT__0" : input_list }, { "OUTPUT__0" : InferContext.ResultFormat.RAW},
batch_size=batch_size, flags=flags)
OUTPUT = "OUTPUT__0"
end_ms = int(round(time.time() * 1000))
self.assertEqual(len(results), 1)
self.assertTrue(OUTPUT in results)
result = results[OUTPUT][0][0]
print("{}: {}".format(sequence_name, result))
if thresholds is not None:
lt_ms = thresholds[0]
gt_ms = thresholds[1]
if lt_ms is not None:
self.assertTrue((end_ms - start_ms) < lt_ms,
"expected less than " + str(lt_ms) +
"ms response time, got " + str(end_ms - start_ms) + " ms")
if gt_ms is not None:
self.assertTrue((end_ms - start_ms) > gt_ms,
"expected greater than " + str(gt_ms) +
"ms response time, got " + str(end_ms - start_ms) + " ms")
if delay_ms is not None:
time.sleep(delay_ms[1] / 1000.0)
seq_end_ms = int(round(time.time() * 1000))
if input_dtype == np.object:
self.assertEqual(int(result), expected_result)
else:
self.assertEqual(result, expected_result)
if sequence_thresholds is not None:
lt_ms = sequence_thresholds[0]
gt_ms = sequence_thresholds[1]
if lt_ms is not None:
self.assertTrue((seq_end_ms - seq_start_ms) < lt_ms,
"sequence expected less than " + str(lt_ms) +
"ms response time, got " + str(seq_end_ms - seq_start_ms) + " ms")
if gt_ms is not None:
self.assertTrue((seq_end_ms - seq_start_ms) > gt_ms,
"sequence expected greater than " + str(gt_ms) +
"ms response time, got " + str(seq_end_ms - seq_start_ms) + " ms")
except Exception as ex:
_check_exception = ex
def check_sequence_async(self, trial, model_name, input_dtype, correlation_id,
sequence_thresholds, values, expected_result,
protocol, batch_size=1, sequence_name="<unknown>"):
"""Perform sequence of inferences using async run. The 'values' holds
a list of tuples, one for each inference with format:
(flag_str, value, pre_delay_ms)
"""
global _check_exception
if (("savedmodel" in trial) or ("graphdef" in trial) or
("netdef" in trial) or ("custom" in trial) or
("onnx" in trial) or ("libtorch" in trial)):
tensor_shape = (1,)
elif "plan" in trial:
tensor_shape = (1,1,1)
else:
self.assertFalse(True, "unknown trial type: " + trial)
# Can only send the request exactly once since it is a
# sequence model with state
configs = []
if protocol == "http":
configs.append(("localhost:8000", ProtocolType.HTTP, False))
if protocol == "grpc":
configs.append(("localhost:8001", ProtocolType.GRPC, False))
if protocol == "streaming":
configs.append(("localhost:8001", ProtocolType.GRPC, True))
self.assertEqual(len(configs), 1)
for config in configs:
ctx = InferContext(config[0], config[1], model_name,
correlation_id=correlation_id, streaming=config[2],
verbose=True)
# Execute the sequence of inference...
try:
seq_start_ms = int(round(time.time() * 1000))
result_ids = list()
for flag_str, value, pre_delay_ms in values:
flags = InferRequestHeader.FLAG_NONE
if flag_str is not None:
if "start" in flag_str:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
if "end" in flag_str:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_END
input_list = list()
for b in range(batch_size):
if input_dtype == np.object:
in0 = np.full(tensor_shape, value, dtype=np.int32)
in0n = np.array([str(x) for x in in0.reshape(in0.size)], dtype=object)
in0 = in0n.reshape(tensor_shape)
else:
in0 = np.full(tensor_shape, value, dtype=input_dtype)
input_list.append(in0)
if pre_delay_ms is not None:
time.sleep(pre_delay_ms / 1000.0)
if "libtorch" not in trial:
result_ids.append(ctx.async_run(
{ "INPUT" : input_list }, { "OUTPUT" : InferContext.ResultFormat.RAW},
batch_size=batch_size, flags=flags))
OUTPUT = "OUTPUT"
else:
result_ids.append(ctx.async_run(
{ "INPUT__0" : input_list }, { "OUTPUT__0" : InferContext.ResultFormat.RAW},
batch_size=batch_size, flags=flags))
OUTPUT = "OUTPUT__0"
# Wait for the results in the order sent
result = None
for id in result_ids:
results = ctx.get_async_run_results(id, True)
self.assertEqual(len(results), 1)
self.assertTrue(OUTPUT in results)
result = results[OUTPUT][0][0]
print("{}: {}".format(sequence_name, result))
seq_end_ms = int(round(time.time() * 1000))
if input_dtype == np.object:
self.assertEqual(int(result), expected_result)
else:
self.assertEqual(result, expected_result)
if sequence_thresholds is not None:
lt_ms = sequence_thresholds[0]
gt_ms = sequence_thresholds[1]
if lt_ms is not None:
self.assertTrue((seq_end_ms - seq_start_ms) < lt_ms,
"sequence expected less than " + str(lt_ms) +
"ms response time, got " + str(seq_end_ms - seq_start_ms) + " ms")
if gt_ms is not None:
self.assertTrue((seq_end_ms - seq_start_ms) > gt_ms,
"sequence expected greater than " + str(gt_ms) +
"ms response time, got " + str(seq_end_ms - seq_start_ms) + " ms")
except Exception as ex:
_check_exception = ex
def check_setup(self, model_name):
# Make sure test.sh set up the correct batcher settings
ctx = ServerStatusContext("localhost:8000", ProtocolType.HTTP, model_name, True)
ss = ctx.get_server_status()
self.assertEqual(len(ss.model_status), 1)
self.assertTrue(model_name in ss.model_status,
"expected status for model " + model_name)
# Skip the sequence batching check on ensemble model
if ss.model_status[model_name].config.platform != "ensemble":
bconfig = ss.model_status[model_name].config.sequence_batching
self.assertEqual(bconfig.max_sequence_idle_microseconds, _max_sequence_idle_ms * 1000) # 5 secs
def check_status(self, model_name, static_bs, exec_cnt, infer_cnt):
ctx = ServerStatusContext("localhost:8000", ProtocolType.HTTP, model_name, True)
ss = ctx.get_server_status()
self.assertEqual(len(ss.model_status), 1)
self.assertTrue(model_name in ss.model_status,
"expected status for model " + model_name)
vs = ss.model_status[model_name].version_status
self.assertEqual(len(vs), 1)
self.assertTrue(1 in vs, "expected status for version 1")
infer | |
<reponame>NastyaArslanova/infoblox-netmri
from ..broker import Broker
class JobBroker(Broker):
controller = "jobs"
def index(self, **kwargs):
"""Lists the available jobs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param JobID: The internal NetMRI identifier for this job.
:type JobID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param JobID: The internal NetMRI identifier for this job.
:type JobID: Array of Integer
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Name: The name of this job, as entered when the job specification was defined, or when the job was executed from a script.
:type Name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Name: The name of this job, as entered when the job specification was defined, or when the job was executed from a script.
:type Name: Array of String
| ``api version min:`` 2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The time this job started execution.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The time this job started execution.
:type StartTime: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Job. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return jobs: An array of the Job objects that match the specified input criteria.
:rtype jobs: Array of Job
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this job.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return job: The job identified by the specified id.
:rtype job: Job
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def script(self, **kwargs):
"""Downloads the script that ran on each device in a job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The job id to download the script file for.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return script: The script file contents ran on each device in a job. It will be presented as type "application/octet-stream".
:rtype script: String
"""
return self.api_mixed_request(self._get_method_fullname("script"), kwargs)
def issues(self, **kwargs):
"""List any issues associated with the specified job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param JobID: The id of the job to list.
:type JobID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return as the first record.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The maximum number of records to return.
:type limit: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_details: An array of the IssueDetail objects that match the specified input criteria.
:rtype issue_details: Array of IssueDetail
"""
return self.api_list_request(self._get_method_fullname("issues"), kwargs)
def job_files(self, **kwargs):
"""Lists/downloads common files for a job. If no filename is given, a list of files for the job will be returned. If a filename is passed, and it exists, it will be downloaded as type "application/octet-stream".
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The job id to list files for.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param filename: An optional filename to download.
:type filename: String
**Outputs**
| ``api version min:`` None
| ``api | |
request response body.
'''
if self.initialize_body and key in self.initialize_body:
return self.initialize_body[key]
return None
def get_threads(self):
if self.threads is None:
self.request_threads()
return self.threads
def get_thread_id(self, threadIndex=0):
'''Utility function to get the first thread ID in the thread list.
If the thread list is empty, then fetch the threads.
'''
if self.threads is None:
self.request_threads()
if self.threads and threadIndex < len(self.threads):
return self.threads[threadIndex]['id']
return None
def get_stackFrame(self, frameIndex=0, threadId=None):
'''Get a single "StackFrame" object from a "stackTrace" request and
return the "StackFrame as a python dictionary, or None on failure
'''
if threadId is None:
threadId = self.get_thread_id()
if threadId is None:
print('invalid threadId')
return None
response = self.request_stackTrace(threadId, startFrame=frameIndex,
levels=1)
if response:
return response['body']['stackFrames'][0]
print('invalid response')
return None
def get_completions(self, text):
response = self.request_completions(text)
return response['body']['targets']
def get_scope_variables(self, scope_name, frameIndex=0, threadId=None):
stackFrame = self.get_stackFrame(frameIndex=frameIndex,
threadId=threadId)
if stackFrame is None:
return []
frameId = stackFrame['id']
if frameId in self.frame_scopes:
frame_scopes = self.frame_scopes[frameId]
else:
scopes_response = self.request_scopes(frameId)
frame_scopes = scopes_response['body']['scopes']
self.frame_scopes[frameId] = frame_scopes
for scope in frame_scopes:
if scope['name'] == scope_name:
varRef = scope['variablesReference']
variables_response = self.request_variables(varRef)
if variables_response:
if 'body' in variables_response:
body = variables_response['body']
if 'variables' in body:
vars = body['variables']
return vars
return []
def get_global_variables(self, frameIndex=0, threadId=None):
return self.get_scope_variables('Globals', frameIndex=frameIndex,
threadId=threadId)
def get_local_variables(self, frameIndex=0, threadId=None):
return self.get_scope_variables('Locals', frameIndex=frameIndex,
threadId=threadId)
def get_local_variable(self, name, frameIndex=0, threadId=None):
locals = self.get_local_variables(frameIndex=frameIndex,
threadId=threadId)
for local in locals:
if 'name' in local and local['name'] == name:
return local
return None
def get_local_variable_value(self, name, frameIndex=0, threadId=None):
variable = self.get_local_variable(name, frameIndex=frameIndex,
threadId=threadId)
if variable and 'value' in variable:
return variable['value']
return None
def replay_packets(self, replay_file_path):
f = open(replay_file_path, 'r')
mode = 'invalid'
set_sequence = False
command_dict = None
while mode != 'eof':
if mode == 'invalid':
line = f.readline()
if line.startswith('to adapter:'):
mode = 'send'
elif line.startswith('from adapter:'):
mode = 'recv'
elif mode == 'send':
command_dict = read_packet(f)
# Skip the end of line that follows the JSON
f.readline()
if command_dict is None:
raise ValueError('decode packet failed from replay file')
print('Sending:')
pprint.PrettyPrinter(indent=2).pprint(command_dict)
# raw_input('Press ENTER to send:')
self.send_packet(command_dict, set_sequence)
mode = 'invalid'
elif mode == 'recv':
print('Replay response:')
replay_response = read_packet(f)
# Skip the end of line that follows the JSON
f.readline()
pprint.PrettyPrinter(indent=2).pprint(replay_response)
actual_response = self.recv_packet()
if actual_response:
type = actual_response['type']
print('Actual response:')
if type == 'response':
self.validate_response(command_dict, actual_response)
pprint.PrettyPrinter(indent=2).pprint(actual_response)
else:
print("error: didn't get a valid response")
mode = 'invalid'
def request_attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
attachCommands=None, terminateCommands=None,
coreFile=None):
args_dict = {}
if pid is not None:
args_dict['pid'] = pid
if program is not None:
args_dict['program'] = program
if waitFor is not None:
args_dict['waitFor'] = waitFor
if trace:
args_dict['trace'] = trace
args_dict['initCommands'] = self.init_commands
if initCommands:
args_dict['initCommands'].extend(initCommands)
if preRunCommands:
args_dict['preRunCommands'] = preRunCommands
if stopCommands:
args_dict['stopCommands'] = stopCommands
if exitCommands:
args_dict['exitCommands'] = exitCommands
if terminateCommands:
args_dict['terminateCommands'] = terminateCommands
if attachCommands:
args_dict['attachCommands'] = attachCommands
if coreFile:
args_dict['coreFile'] = coreFile
command_dict = {
'command': 'attach',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_configurationDone(self):
command_dict = {
'command': 'configurationDone',
'type': 'request',
'arguments': {}
}
response = self.send_recv(command_dict)
if response:
self.configuration_done_sent = True
return response
def _process_stopped(self):
self.threads = None
self.frame_scopes = {}
def request_continue(self, threadId=None):
if self.exit_status is not None:
raise ValueError('request_continue called after process exited')
# If we have launched or attached, then the first continue is done by
# sending the 'configurationDone' request
if not self.configuration_done_sent:
return self.request_configurationDone()
args_dict = {}
if threadId is None:
threadId = self.get_thread_id()
args_dict['threadId'] = threadId
command_dict = {
'command': 'continue',
'type': 'request',
'arguments': args_dict
}
response = self.send_recv(command_dict)
# Caller must still call wait_for_stopped.
return response
def request_disconnect(self, terminateDebuggee=None):
args_dict = {}
if terminateDebuggee is not None:
if terminateDebuggee:
args_dict['terminateDebuggee'] = True
else:
args_dict['terminateDebuggee'] = False
command_dict = {
'command': 'disconnect',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_evaluate(self, expression, frameIndex=0, threadId=None):
stackFrame = self.get_stackFrame(frameIndex=frameIndex,
threadId=threadId)
if stackFrame is None:
return []
args_dict = {
'expression': expression,
'frameId': stackFrame['id'],
}
command_dict = {
'command': 'evaluate',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_initialize(self):
command_dict = {
'command': 'initialize',
'type': 'request',
'arguments': {
'adapterID': 'lldb-native',
'clientID': 'vscode',
'columnsStartAt1': True,
'linesStartAt1': True,
'locale': 'en-us',
'pathFormat': 'path',
'supportsRunInTerminalRequest': True,
'supportsVariablePaging': True,
'supportsVariableType': True
}
}
response = self.send_recv(command_dict)
if response:
if 'body' in response:
self.initialize_body = response['body']
return response
def request_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
terminateCommands=None ,sourcePath=None,
debuggerRoot=None, launchCommands=None, sourceMap=None):
args_dict = {
'program': program
}
if args:
args_dict['args'] = args
if cwd:
args_dict['cwd'] = cwd
if env:
args_dict['env'] = env
if stopOnEntry:
args_dict['stopOnEntry'] = stopOnEntry
if disableASLR:
args_dict['disableASLR'] = disableASLR
if disableSTDIO:
args_dict['disableSTDIO'] = disableSTDIO
if shellExpandArguments:
args_dict['shellExpandArguments'] = shellExpandArguments
if trace:
args_dict['trace'] = trace
args_dict['initCommands'] = self.init_commands
if initCommands:
args_dict['initCommands'].extend(initCommands)
if preRunCommands:
args_dict['preRunCommands'] = preRunCommands
if stopCommands:
args_dict['stopCommands'] = stopCommands
if exitCommands:
args_dict['exitCommands'] = exitCommands
if terminateCommands:
args_dict['terminateCommands'] = terminateCommands
if sourcePath:
args_dict['sourcePath'] = sourcePath
if debuggerRoot:
args_dict['debuggerRoot'] = debuggerRoot
if launchCommands:
args_dict['launchCommands'] = launchCommands
if sourceMap:
args_dict['sourceMap'] = sourceMap
command_dict = {
'command': 'launch',
'type': 'request',
'arguments': args_dict
}
response = self.send_recv(command_dict)
# Wait for a 'process' and 'initialized' event in any order
self.wait_for_event(filter=['process', 'initialized'])
self.wait_for_event(filter=['process', 'initialized'])
return response
def request_next(self, threadId):
if self.exit_status is not None:
raise ValueError('request_continue called after process exited')
args_dict = {'threadId': threadId}
command_dict = {
'command': 'next',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_stepIn(self, threadId):
if self.exit_status is not None:
raise ValueError('request_continue called after process exited')
args_dict = {'threadId': threadId}
command_dict = {
'command': 'stepIn',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_stepOut(self, threadId):
if self.exit_status is not None:
raise ValueError('request_continue called after process exited')
args_dict = {'threadId': threadId}
command_dict = {
'command': 'stepOut',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_pause(self, threadId=None):
if self.exit_status is not None:
raise ValueError('request_continue called after process exited')
if threadId is None:
threadId = self.get_thread_id()
args_dict = {'threadId': threadId}
command_dict = {
'command': 'pause',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_scopes(self, frameId):
args_dict = {'frameId': frameId}
command_dict = {
'command': 'scopes',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_setBreakpoints(self, file_path, line_array, condition=None,
hitCondition=None):
(dir, base) = os.path.split(file_path)
breakpoints = []
for line in line_array:
bp = {'line': line}
if condition is not None:
bp['condition'] = condition
if hitCondition is not None:
bp['hitCondition'] = hitCondition
breakpoints.append(bp)
source_dict = {
'name': base,
'path': file_path
}
args_dict = {
'source': source_dict,
'breakpoints': breakpoints,
'lines': '%s' % (line_array),
'sourceModified': False,
}
command_dict = {
'command': 'setBreakpoints',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_setExceptionBreakpoints(self, filters):
args_dict = {'filters': filters}
command_dict = {
'command': 'setExceptionBreakpoints',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_setFunctionBreakpoints(self, names, condition=None,
hitCondition=None):
breakpoints = []
for name in names:
bp = {'name': name}
if condition is not None:
bp['condition'] = condition
if hitCondition is not None:
bp['hitCondition'] = hitCondition
breakpoints.append(bp)
args_dict = {'breakpoints': breakpoints}
command_dict = {
'command': 'setFunctionBreakpoints',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_getCompileUnits(self, moduleId):
args_dict = {'moduleId': moduleId}
command_dict = {
'command': 'getCompileUnits',
'type': 'request',
'arguments': args_dict
}
response = self.send_recv(command_dict)
return response
def request_completions(self, text):
args_dict = {
'text': text,
'column': len(text)
}
command_dict = {
'command': 'completions',
'type': 'request',
'arguments': args_dict
}
return self.send_recv(command_dict)
def request_stackTrace(self, threadId=None, startFrame=None, levels=None,
dump=False):
if threadId is None:
threadId = self.get_thread_id()
args_dict = {'threadId': threadId}
if startFrame is not None:
args_dict['startFrame'] = startFrame
if levels is not None:
args_dict['levels'] = levels
command_dict = {
'command': 'stackTrace',
'type': 'request',
'arguments': args_dict
}
response = self.send_recv(command_dict)
if dump:
for (idx, frame) in enumerate(response['body']['stackFrames']):
name = frame['name']
if 'line' in frame and 'source' in frame:
source = frame['source']
if 'sourceReference' not in source:
if 'name' in source:
source_name = source['name']
line = frame['line']
print("[%3u] %s @ %s:%u" % (idx, name, source_name,
line))
continue
print("[%3u] %s" % (idx, name))
return response
def request_threads(self):
'''Request a list of all threads and combine any information from any
"stopped" events | |
<reponame>hugovk/w3lib
import os
import unittest
from urllib.parse import urlparse
import pytest
from w3lib.url import (
add_or_replace_parameter,
add_or_replace_parameters,
any_to_uri,
canonicalize_url,
file_uri_to_path,
is_url,
parse_data_uri,
parse_url,
path_to_file_uri,
safe_download_url,
safe_url_string,
url_query_parameter,
url_query_cleaner,
)
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = "\u8349\u8599 \u7d20\u5b50"
self.assertEqual(
safe_url_string(motoko), # note the %20 for space
"%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90",
)
self.assertEqual(
safe_url_string(motoko), safe_url_string(safe_url_string(motoko))
)
self.assertEqual(safe_url_string("©"), "%C2%A9") # copyright symbol
# page-encoding does not affect URL path
self.assertEqual(safe_url_string("©", "iso-8859-1"), "%C2%A9")
# path_encoding does
self.assertEqual(safe_url_string("©", path_encoding="iso-8859-1"), "%A9")
self.assertEqual(
safe_url_string("http://www.example.org/"), "http://www.example.org/"
)
alessi = "/ecommerce/oggetto/Te \xf2/tea-strainer/1273"
self.assertEqual(
safe_url_string(alessi), "/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273"
)
self.assertEqual(
safe_url_string(
"http://www.example.com/test?p(29)url(http://www.another.net/page)"
),
"http://www.example.com/test?p(29)url(http://www.another.net/page)",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"
),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200",
)
# page-encoding does not affect URL path
# we still end up UTF-8 encoding characters before percent-escaping
safeurl = safe_url_string("http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", path_encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b"http://example.com/"), str))
def test_safe_url_string_remove_ascii_tab_and_newlines(self):
self.assertEqual(
safe_url_string("http://example.com/test\n.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\t.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html\n"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r\n.html\t"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\a\n.html"),
"http://example.com/test%07.html",
)
def test_safe_url_string_unsafe_chars(self):
safeurl = safe_url_string(
r"http://localhost:8001/unwise{,},|,\,^,[,],`?|=[]&[]=|"
)
self.assertEqual(
safeurl, r"http://localhost:8001/unwise%7B,%7D,|,%5C,%5E,[,],%60?|=[]&[]=|"
)
def test_safe_url_string_quote_path(self):
safeurl = safe_url_string('http://google.com/"hello"', quote_path=True)
self.assertEqual(safeurl, "http://google.com/%22hello%22")
safeurl = safe_url_string('http://google.com/"hello"', quote_path=False)
self.assertEqual(safeurl, 'http://google.com/"hello"')
safeurl = safe_url_string('http://google.com/"hello"')
self.assertEqual(safeurl, "http://google.com/%22hello%22")
def test_safe_url_string_with_query(self):
safeurl = safe_url_string("http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ", path_encoding="latin-1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ",
encoding="latin-1",
path_encoding="latin-1",
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
# mixing Unicode and percent-escaped sequences
safeurl = safe_url_string("http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
# bytes input is assumed to be UTF-8
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
# page-encoding encoded bytes still end up as UTF-8 sequences in path
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding="latin1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(
b"http://www.example.com/\xa3?unit=\xb5", encoding="latin1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
# latin1
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
# cp1251
# >>> 'Россия'.encode('cp1251')
# '\xd0\xee\xf1\xf1\xe8\xff'
safeurl = safe_url_string(
b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
# adapted from:
# https://ssl.icu-project.org/icu-bin/idnbrowser
# http://unicode.org/faq/idn.html
# + various others
websites = (
(
"http://www.färgbolaget.nu/färgbolaget",
"http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget",
),
(
"http://www.räksmörgås.se/?räksmörgås=yes",
"http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes",
),
(
"http://www.brændendekærlighed.com/brændende/kærlighed",
"http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed",
),
("http://www.예비교사.com", "http://www.xn--9d0bm53a3xbzui.com"),
("http://理容ナカムラ.com", "http://xn--lck1c3crb1723bpq4a.com"),
("http://あーるいん.com", "http://xn--l8je6s7a45b.com"),
# --- real websites ---
# in practice, this redirect (301) to http://www.buecher.de/?q=b%C3%BCcher
(
"http://www.bücher.de/?q=bücher",
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
),
# Japanese
(
"http://はじめよう.みんな/?query=サ&maxResults=5",
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5",
),
# Russian
("http://кто.рф/", "http://xn--j1ail.xn--p1ai/"),
(
"http://кто.рф/index.php?domain=Что",
"http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE",
),
# Korean
("http://내도메인.한국/", "http://xn--220b31d95hq8o.xn--3e0b707e/"),
(
"http://맨체스터시티축구단.한국/",
"http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/",
),
# Arabic
("http://nic.شبكة", "http://nic.xn--ngbc5azd"),
# Chinese
("https://www.贷款.在线", "https://www.xn--0kwr83e.xn--3ds443g"),
("https://www2.xn--0kwr83e.在线", "https://www2.xn--0kwr83e.xn--3ds443g"),
("https://www3.贷款.xn--3ds443g", "https://www3.xn--0kwr83e.xn--3ds443g"),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
# make sure the safe URL is unchanged when made safe a 2nd time
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
# missing DNS label
self.assertEqual(
safe_url_string("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
safe_url_string(
"http://www.{label}.com/résumé?q=résumé".format(label="example" * 11)
),
"http://www.{label}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9".format(
label="example" * 11
),
)
def test_safe_url_port_number(self):
self.assertEqual(
safe_url_string("http://www.example.com:80/résumé?q=résumé"),
"http://www.example.com:80/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string("http://www.example.com:/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_string_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
def test_safe_download_url(self):
self.assertEqual(
safe_download_url("http://www.example.org"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../../images/../image"),
"http://www.example.org/image",
)
self.assertEqual(
safe_download_url("http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
self.assertEqual(
safe_download_url(b"http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
# Encoding related tests
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xa3",
encoding="latin-1",
path_encoding="latin-1",
),
"http://www.example.org/?%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xc2\xa3",
encoding="utf-8",
path_encoding="utf-8",
),
"http://www.example.org/?%C2%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org/\xc2\xa3?\xc2\xa3",
encoding="utf-8",
path_encoding="latin-1",
),
"http://www.example.org/%A3?%C2%A3",
)
def test_is_url(self):
self.assertTrue(is_url("http://www.example.org"))
self.assertTrue(is_url("https://www.example.org"))
self.assertTrue(is_url("file:///some/path"))
self.assertFalse(is_url("foo://bar"))
self.assertFalse(is_url("foo--bar"))
def test_url_query_parameter(self):
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "id"), "200"
)
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
"mydefault",
)
self.assertEqual(url_query_parameter("product.html?id=", "id"), None)
self.assertEqual(
url_query_parameter("product.html?id=", "id", keep_blank_values=1), ""
)
def test_url_query_parameter_2(self):
"""
This problem was seen several times in the feeds. Sometime affiliate URLs contains
nested encoded affiliate URL with direct URL as parameters. For example:
aff_url1 = 'http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1'
the typical code to extract needed URL from it is:
aff_url2 = url_query_parameter(aff_url1, 'url')
after this aff2_url is:
'http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's gardenfurniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1'
the direct URL extraction is
url = url_query_parameter(aff_url2, 'referredURL')
but this will not work, because aff_url2 contains ' (comma sign encoded in the feed)
and the URL extraction will fail, current workaround was made in the spider,
just a replace for ' to %27
"""
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1",
)
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
# fails, prod_url is None now
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1",
)
def test_add_or_replace_parameter(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameter(url, "arg", "v"), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg3", "nv3"),
"http://domain/test?arg1=v1&arg2=v2&arg3=nv3",
)
self.assertEqual(
add_or_replace_parameter(
"http://domain/moreInfo.asp?prodID=", "prodID", "20"
),
"http://domain/moreInfo.asp?prodID=20",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue",
)
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(
add_or_replace_parameter(url, "version", "2"),
"http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2",
)
self.assertEqual(
add_or_replace_parameter(url, "pageurl", "test"),
"http://example.com/?version=1&pageurl=test¶m2=value2",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg1", "v3"),
"http://domain/test?arg1=v3&arg2=v2",
)
@pytest.mark.xfail(reason="https://github.com/scrapy/w3lib/issues/164")
def test_add_or_replace_parameter_fail(self):
self.assertEqual(
add_or_replace_parameter(
"http://domain/test?arg1=v1;arg2=v2", "arg1", "v3"
),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameters(url, {"arg": "v"}), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4", "arg3": "v3new"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3new&arg4=v4",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg1": "v3"}),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters_does_not_change_input_param(self):
url = "http://domain/test?arg=original"
input_param = {"arg": "value"}
add_or_replace_parameters(url, input_param) # noqa
self.assertEqual(input_param, {"arg": "value"})
def test_url_query_cleaner(self):
self.assertEqual("product.html", url_query_cleaner("product.html?"))
self.assertEqual("product.html", url_query_cleaner("product.html?&"))
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?foo=bar&name=wired", ["id"])
)
self.assertEqual(
"product.html?id=200&name=wired",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id", "name"]),
)
self.assertEqual(
"product.html?id",
url_query_cleaner("product.html?id&other=3&novalue=", ["id"]),
)
# default is to remove duplicate keys
self.assertEqual(
"product.html?d=1",
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ["d"]),
)
# unique=False disables duplicate keys filtering
self.assertEqual(
"product.html?d=1&d=2&d=3",
url_query_cleaner(
"product.html?d=1&e=b&d=2&d=3&other=other", ["d"], unique=False
),
)
self.assertEqual(
"product.html?id=200&foo=bar",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#id20", ["id", "foo"]
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired", ["id"], remove=True
),
)
self.assertEqual(
"product.html?name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "foo"], remove=True
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "footo"], remove=True
),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html", ["id"], remove=True)
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?&", ["id"], remove=True)
)
self.assertEqual(
"product.html?foo=bar",
url_query_cleaner("product.html?foo=bar&name=wired", "foo"),
)
self.assertEqual(
"product.html?foobar=wired",
url_query_cleaner("product.html?foo=bar&foobar=wired", "foobar"),
)
def test_url_query_cleaner_keep_fragments(self):
self.assertEqual(
"product.html?id=200#foo",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#foo",
["id"],
keep_fragments=True,
),
)
def test_path_to_file_uri(self):
if os.name == "nt":
self.assertEqual(
path_to_file_uri(r"C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi",
)
else:
self.assertEqual(
path_to_file_uri("/some/path.txt"), "file:///some/path.txt"
)
fn = "test.txt"
x = path_to_file_uri(fn)
self.assertTrue(x.startswith("file:///"))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == "nt":
self.assertEqual(
file_uri_to_path("file:///C:/windows/clock.avi"),
r"C:\\windows\clock.avi",
)
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(
file_uri_to_path("file:///path/to/test.txt"), "/path/to/test.txt"
)
self.assertEqual(file_uri_to_path("/path/to/test.txt"), "/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"), "test.txt")
def test_any_to_uri(self):
if os.name == "nt":
self.assertEqual(
any_to_uri(r"C:\\windows\clock.avi"), "file:///C:/windows/clock.avi"
)
else:
self.assertEqual(any_to_uri("/some/path.txt"), "file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"), "file:///some/path.txt")
self.assertEqual(
any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt",
)
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(
canonicalize_url("http://www.example.com/"), "http://www.example.com/"
)
def test_return_str(self):
assert isinstance(canonicalize_url("http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com"), "http://www.example.com/"
)
def test_typical_usage(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1",
)
def test_port_number(self):
self.assertEqual(
canonicalize_url("http://www.example.com:8888/do?a=1&b=2&c=3"),
"http://www.example.com:8888/do?a=1&b=2&c=3",
)
# trailing empty ports are removed
self.assertEqual(
canonicalize_url("http://www.example.com:/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
def test_sorting(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3",
)
def test_keep_blank_values(self):
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&c&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?1750,4"),
"http://www.example.com/do?1750%2C4=",
)
def test_spaces(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
def test_canonicalize_url_unicode_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(
canonicalize_url("http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# passed encoding will affect query | |
will be appended.
s_output_type : A string defining the observable type, one of the 1-qubit, 2-qubits,
or global observables.
Returns:
result : A dictionary with the result.
"""
full_filename = s_output_path + f".{s_output_type}.dat"
print("Loading output data file: " + full_filename)
file = open(full_filename, "r")
result = collections.OrderedDict()
file.readline()
for line in file:
words = line.strip().split()
if not words:
continue
LindbladMPOSolver._read_data_line(s_output_type, words, result)
file.close()
return result
@staticmethod
def _read_data_line(s_output_type: str, words: list, result: Dict):
t = float(words[0])
op = words[1]
val = float(words[-1])
if s_output_type == "obs-1q":
q_index1 = int(words[2]) - 1
# data files are storing 1-based indices because of iTensor, while we use 0-based indices
q_indices = (q_index1,)
elif s_output_type == "obs-2q":
q_index1 = int(words[2]) - 1
# data files are storing 1-based indices because of iTensor, while we use 0-based indices
q_index2 = int(words[3]) - 1
q_indices = (q_index1, q_index2)
elif s_output_type == "global":
q_indices = ()
else:
raise Exception(f"Unknown output type {s_output_type}.")
# The result dictionary is indexed by a tuple, first entry is a name, second entry is
# a tuple of qubit indices - 0 indices for the global data, 1 for 1Q observables, 2 for 2Q.
obs_data = result.get((op.lower(), q_indices), None)
# obs_data is a tuple, first entry is a list of times, second entry holds the values.
if obs_data is None:
obs_data = (list(), list())
result[(op.lower(), q_indices)] = obs_data
# TODO: optimize the list appends
obs_data[0].append(t)
obs_data[1].append(val)
@staticmethod
# checks if the value is int (for cleaner code)
def _is_int(value):
return isinstance(value, int)
@staticmethod
# checks if the value is a float (for cleaner code)
def _is_float(value):
# in python terms the value <4> is not float type, in the simulator context float
# can also be a python int:
return isinstance(value, (float, int))
@staticmethod
# returns the number of qubits based on the given parameters, returns -1 if found an error
def _get_number_of_qubits(parameters: Dict) -> int:
if "N" in parameters:
if LindbladMPOSolver._is_int(parameters["N"]):
return parameters["N"]
return -1
def _virtual_verify_parameters(self, ignore_params: Optional[list] = None) -> str:
"""An overridable function that verifies the parameters by calling verify_parameters().
Args:
ignore_params: A list with parameter names that this solver does not recognize, but
should be ignored in the verification (so that an error message for unknown
parameters is not issued). This is useful for derived classes.
Returns:
A detailed error message if parameters arguments are not in the correct format (which
is stated in the spec of the simulator). Otherwise, returns "" (checks passed).
"""
return LindbladMPOSolver.verify_parameters(self.parameters, ignore_params)
@staticmethod
def verify_parameters(
parameters: dict, ignore_params: Optional[list] = None
) -> str:
"""Returns a detailed Error message if parameters are not in the correct format.
Args:
parameters: A dictionary of solver parameters.
ignore_params: A list with parameter names that this solver does not recognize, but
should be ignored in the verification (so that an error message for unknown
parameters is not issued). This is mostly useful for derived subclasses.
Returns:
A detailed error message if parameters are not in the correct format.
Otherwise, returns "" (checks passed).
"""
check_msg = ""
if parameters is None:
check_msg += "Error 100: The `parameters` dictionary must be assigned\n"
return check_msg
if (
("N" not in parameters)
or ("t_final" not in parameters)
or ("tau" not in parameters)
):
check_msg += (
"Error 110: N, t_final and tau must be defined as they do not have default "
"values\n"
)
return check_msg
for key in dict.keys(parameters):
if (
isinstance(parameters[key], str) and "" == parameters[key]
): # ignore empty entrances/space holders <"">
continue
flag_continue = False
if key == "N":
if not LindbladMPOSolver._is_int(parameters[key]):
check_msg += "Error 120: " + key + " should be an integer\n"
continue
if parameters[key] <= 0:
check_msg += (
"Error 130: " + key + " should be bigger/equal to 1 (integer)\n"
)
continue
elif key == "t_init" or key == "t_final" or key == "tau":
if not LindbladMPOSolver._is_float(parameters[key]):
check_msg += "Error 140: " + key + " is not a float\n"
continue
if key != "t_init" and parameters[key] <= 0:
check_msg += "Error 150: " + key + " must be larger than 0\n"
continue
if key == "t_init" and parameters[key] > parameters["t_final"]:
check_msg += (
"Error 151: " + key + " must be equal or smaller than t_final\n"
)
continue
elif (key == "l_x") or (key == "l_y"):
if not LindbladMPOSolver._is_int(parameters[key]):
check_msg += "Error 160: " + key + " should be an integer\n"
continue
if parameters[key] < 0:
check_msg += (
"Error 170: "
+ key
+ " should be equal or larger than 1 (integer)\n"
)
continue
elif key == "output_step" or key == "force_rho_hermitian_step":
if not LindbladMPOSolver._is_int(parameters[key]):
check_msg += "Error 180: " + key + " should be an integer\n"
continue
if parameters[key] < 0:
check_msg += (
"Error 190: " + key + " should be bigger/equal to 0 (integer)\n"
)
continue
elif (
(key == "h_x")
or (key == "h_y")
or (key == "h_z")
or (key == "g_0")
or (key == "g_1")
or (key == "g_2")
):
if LindbladMPOSolver._is_float(parameters[key]):
continue
number_of_qubits = LindbladMPOSolver._get_number_of_qubits(parameters)
if number_of_qubits == -1:
check_msg += (
"Error 200: " + key + " could not be validated because 'N' "
"(or alternatively l_x, l_y) are not "
"defined properly\n "
)
continue
if isinstance(parameters[key], list):
if len(parameters[key]) != number_of_qubits:
check_msg += (
"Error 210: " + key + " is not a float / N-length list / "
"numpy array (of floats)\n"
)
continue
for element in parameters[key]:
if not LindbladMPOSolver._is_float(element):
check_msg += (
"Error 220: " + key + "is not a float / N-length list "
"/ numpy array (of floats)\n "
)
flag_continue = True
break
if flag_continue:
continue
elif isinstance(parameters[key], np.ndarray):
if (str((parameters[key]).dtype).find("int") == -1) and (
str((parameters[key]).dtype).find("float") == -1
):
check_msg += (
"Error 230: " + key + " is not a float / N-length list / "
"numpy array (of floats)\n"
)
continue
if parameters[key].size == 1:
continue
if (parameters[key].shape[0] != number_of_qubits) or (
parameters[key].shape[0] != parameters[key].size
):
check_msg += (
"Error 240: " + key + " is not a float / N-length list / "
"numpy array (of floats)\n"
)
continue
else:
check_msg += (
"Error 250: " + key + " is not a float / N-length list / numpy "
"array (of floats)\n"
)
continue
elif (key == "J_z") or (key == "J"):
if LindbladMPOSolver._is_float(parameters[key]):
continue
number_of_qubits = LindbladMPOSolver._get_number_of_qubits(parameters)
if number_of_qubits == -1:
check_msg += (
"Error 260: " + key + " could not be validated because 'N' "
"(or alternatively l_x, l_y) are not "
"defined properly\n"
)
continue
if isinstance(parameters[key], list):
if len(parameters[key]) != number_of_qubits:
check_msg += (
"Error 270: "
+ key
+ " should be a constant, or a square matrix"
" (nested lists/np.array) of N^2 floats\n "
)
continue
for lst in parameters[key]:
if not isinstance(lst, list):
check_msg += (
"Error 280: "
+ key
+ "should be a constant, or a square "
"matrix (nested lists/np.array) of "
"floats with a size N^2\n "
)
flag_continue = True
break
if len(lst) != number_of_qubits:
check_msg += (
"Error 290: "
+ key
+ "should be a constant, or a square matrix (nested "
"lists/np.array) with N^2 floats\n"
)
flag_continue = True
break
for val in lst:
if not LindbladMPOSolver._is_float(val):
check_msg += (
"Error 300: "
+ key
+ "should be a constant, or a square matrix (nested "
"lists/np.array) in the size of number_of_qubits^2 "
"of floats\n"
)
flag_continue = True
break
if flag_continue:
break
if flag_continue:
continue
elif isinstance(parameters[key], np.ndarray):
if (str((parameters[key]).dtype).find("int") == -1) and (
str((parameters[key]).dtype).find("float") == -1
):
check_msg += (
"Error 310: "
+ key
+ "should be a constant, or a square matrix (nested "
"lists/np.array) in the size of number_of_qubits^2 of "
"floats\n"
)
continue
| |
= (fullname, self.scopestack.clone_top(), self._lineno)
self._deferred_load_checks.append(data)
else:
# We're not in a FunctionDef. Deferring would give us the same
# result; we do the check now to avoid the overhead of cloning the
# stack.
self._check_load(fullname, self.scopestack, self._lineno)
def _check_load(self, fullname, scopestack, lineno):
# Check if the symbol needs import. (As a side effect, if the object
# is a _UseChecker, this will mark it as used. TODO: It would be
# better to refactor symbol_needs_import so that it just returns the
# object it found, and we mark it as used here.)
fullname = DottedIdentifier(fullname)
if symbol_needs_import(fullname, scopestack) and not scopestack.has_star_import():
self.missing_imports.append((lineno,fullname))
def _finish_deferred_load_checks(self):
for fullname, scopestack, lineno in self._deferred_load_checks:
self._check_load(fullname, scopestack, lineno)
self._deferred_load_checks = []
def _scan_unused_imports(self):
# If requested, then check which of our imports were unused.
# For now we only scan the top level. If we wanted to support
# non-global unused-import checking, then we should check this
# whenever popping a scopestack.
unused_imports = self.unused_imports
if unused_imports is None:
return
scope = self.scopestack[-1]
for name, value in six.iteritems(scope):
if not isinstance(value, _UseChecker):
continue
if value.used:
continue
unused_imports.append(( value.lineno, value.source ))
unused_imports.sort()
def scan_for_import_issues(codeblock, find_unused_imports=True, parse_docstrings=False):
"""
Find missing and unused imports, by lineno.
>>> arg = "import numpy, aa.bb as cc\\nnumpy.arange(x)\\narange(x)"
>>> missing, unused = scan_for_import_issues(arg)
>>> missing
[(2, DottedIdentifier('x')), (3, DottedIdentifier('arange')), (3, DottedIdentifier('x'))]
>>> unused
[(1, Import('from aa import bb as cc'))]
:type codeblock:
``PythonBlock``
:type namespaces:
``dict`` or ``list`` of ``dict``
:param parse_docstrings:
Whether to parse docstrings.
Compare the following examples. When parse_docstrings=True, 'bar' is
not considered unused because there is a string that references it in
braces::
>>> scan_for_import_issues("import foo as bar, baz\\n'{bar}'\\n")
([], [(1, Import('import baz')), (1, Import('import foo as bar'))])
>>> scan_for_import_issues("import foo as bar, baz\\n'{bar}'\\n", parse_docstrings=True)
([], [(1, Import('import baz'))])
"""
logger.debug("scan_for_import_issues()")
codeblock = PythonBlock(codeblock)
namespaces = ScopeStack([{}])
finder = _MissingImportFinder(namespaces,
find_unused_imports=find_unused_imports,
parse_docstrings=parse_docstrings)
return finder.scan_for_import_issues(codeblock)
def _find_missing_imports_in_ast(node, namespaces):
"""
Find missing imports in an AST node.
Helper function to `find_missing_imports`.
>>> node = ast.parse("import numpy; numpy.arange(x) + arange(x)")
>>> _find_missing_imports_in_ast(node, [{}])
[DottedIdentifier('arange'), DottedIdentifier('x')]
:type node:
``ast.AST``
:type namespaces:
``dict`` or ``list`` of ``dict``
:rtype:
``list`` of ``DottedIdentifier``
"""
if not isinstance(node, ast.AST):
raise TypeError
# Traverse the abstract syntax tree.
if logger.debug_enabled:
logger.debug("ast=%s", ast.dump(node))
return _MissingImportFinder(namespaces).find_missing_imports(node)
# TODO: maybe we should replace _find_missing_imports_in_ast with
# _find_missing_imports_in_code(compile(node)). The method of parsing opcodes
# is simpler, because Python takes care of the scoping issue for us and we
# don't have to worry about locals. It does, however, depend on CPython
# implementation details, whereas the AST is well-defined by the language.
def _find_missing_imports_in_code(co, namespaces):
"""
Find missing imports in a code object.
Helper function to `find_missing_imports`.
>>> f = lambda: foo.bar(x) + baz(y)
>>> [str(m) for m in _find_missing_imports_in_code(f.__code__, [{}])]
['baz', 'foo.bar', 'x', 'y']
>>> f = lambda x: (lambda: x+y)
>>> _find_missing_imports_in_code(f.__code__, [{}])
[DottedIdentifier('y')]
:type co:
``types.CodeType``
:type namespaces:
``dict`` or ``list`` of ``dict``
:rtype:
``list`` of ``str``
"""
loads_without_stores = set()
_find_loads_without_stores_in_code(co, loads_without_stores)
missing_imports = [
DottedIdentifier(fullname) for fullname in sorted(loads_without_stores)
if symbol_needs_import(fullname, namespaces)
]
return missing_imports
def _find_loads_without_stores_in_code(co, loads_without_stores):
"""
Find global LOADs without corresponding STOREs, by disassembling code.
Recursive helper for `_find_missing_imports_in_code`.
:type co:
``types.CodeType``
:param co:
Code object, e.g. ``function.__code__``
:type loads_without_stores:
``set``
:param loads_without_stores:
Mutable set to which we add loads without stores.
:return:
``None``
"""
if not isinstance(co, types.CodeType):
raise TypeError(
"_find_loads_without_stores_in_code(): expected a CodeType; got a %s"
% (type(co).__name__,))
# Initialize local constants for fast access.
from opcode import HAVE_ARGUMENT, EXTENDED_ARG, opmap
LOAD_ATTR = opmap['LOAD_ATTR']
LOAD_METHOD = opmap['LOAD_METHOD'] if PY3 else None
LOAD_GLOBAL = opmap['LOAD_GLOBAL']
LOAD_NAME = opmap['LOAD_NAME']
STORE_ATTR = opmap['STORE_ATTR']
STORE_GLOBAL = opmap['STORE_GLOBAL']
STORE_NAME = opmap['STORE_NAME']
# Keep track of the partial name so far that started with a LOAD_GLOBAL.
# If ``pending`` is not None, then it is a list representing the name
# components we've seen so far.
pending = None
# Disassemble the code. Look for LOADs and STOREs. This code is based on
# ``dis.disassemble``.
#
# Scenarios:
#
# * Function-level load a toplevel global
# def f():
# aa
# => LOAD_GLOBAL; other (not LOAD_ATTR or STORE_ATTR)
# * Function-level load an attribute of global
# def f():
# aa.bb.cc
# => LOAD_GLOBAL; LOAD_ATTR; LOAD_ATTR; other
# * Function-level store a toplevel global
# def f():
# global aa
# aa = 42
# => STORE_GLOBAL
# * Function-level store an attribute of global
# def f():
# aa.bb.cc = 42
# => LOAD_GLOBAL, LOAD_ATTR, STORE_ATTR
# * Function-level load a local
# def f():
# aa = 42
# return aa
# => LOAD_FAST or LOAD_NAME
# * Function-level store a local
# def f():
# aa = 42
# => STORE_FAST or STORE_NAME
# * Function-level load an attribute of a local
# def f():
# aa = 42
# return aa.bb.cc
# => LOAD_FAST; LOAD_ATTR; LOAD_ATTR
# * Function-level store an attribute of a local
# def f():
# aa == 42
# aa.bb.cc = 99
# => LOAD_FAST; LOAD_ATTR; STORE_ATTR
# * Function-level load an attribute of an expression other than a name
# def f():
# foo().bb.cc
# => [CALL_FUNCTION, etc]; LOAD_ATTR; LOAD_ATTR
# * Function-level store an attribute of an expression other than a name
# def f():
# foo().bb.cc = 42
# => [CALL_FUNCTION, etc]; LOAD_ATTR; STORE_ATTR
# * Function-level import
# def f():
# import aa.bb.cc
# => IMPORT_NAME "aa.bb.cc", STORE_FAST "aa"
# * Module-level load of a top-level global
# aa
# => LOAD_NAME
# * Module-level store of a top-level global
# aa = 42
# => STORE_NAME
# * Module-level load of an attribute of a global
# aa.bb.cc
# => LOAD_NAME, LOAD_ATTR, LOAD_ATTR
# * Module-level store of an attribute of a global
# aa.bb.cc = 42
# => LOAD_NAME, LOAD_ATTR, STORE_ATTR
# * Module-level import
# import aa.bb.cc
# IMPORT_NAME "aa.bb.cc", STORE_NAME "aa"
# * Closure
# def f():
# aa = 42
# return lambda: aa
# f: STORE_DEREF, LOAD_CLOSURE, MAKE_CLOSURE
# g = f(): LOAD_DEREF
bytecode = co.co_code
n = len(bytecode)
i = 0
extended_arg = 0
stores = set()
loads_after_label = set()
loads_before_label_without_stores = set()
# Find the earliest target of a backward jump.
earliest_backjump_label = _find_earliest_backjump_label(bytecode)
# Loop through bytecode.
while i < n:
c = bytecode[i]
op = _op(c)
i += 1
if op >= HAVE_ARGUMENT:
if PY2:
oparg = _op(bytecode[i]) + _op(bytecode[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536
continue
else:
oparg = bytecode[i] | extended_arg
extended_arg = 0
if op == EXTENDED_ARG:
extended_arg = (oparg << 8)
continue
i += 1
if pending is not None:
if op == STORE_ATTR:
# {LOAD_GLOBAL|LOAD_NAME} {LOAD_ATTR}* {STORE_ATTR}
pending.append(co.co_names[oparg])
fullname = ".".join(pending)
pending = None
stores.add(fullname)
continue
if op in [LOAD_ATTR, LOAD_METHOD]:
# {LOAD_GLOBAL|LOAD_NAME} {LOAD_ATTR}* so far;
# possibly more LOAD_ATTR/STORE_ATTR will follow
pending.append(co.co_names[oparg])
continue
# {LOAD_GLOBAL|LOAD_NAME} {LOAD_ATTR}* (and no more
# LOAD_ATTR/STORE_ATTR)
fullname = ".".join(pending)
pending = None
if i >= earliest_backjump_label:
loads_after_label.add(fullname)
elif fullname not in stores:
loads_before_label_without_stores.add(fullname)
# Fall through.
if op in [LOAD_GLOBAL, LOAD_NAME]:
pending = [co.co_names[oparg]]
continue
if op in [STORE_GLOBAL, STORE_NAME]:
stores.add(co.co_names[oparg])
continue
# We don't need to worry about: LOAD_FAST, STORE_FAST, LOAD_CLOSURE,
# LOAD_DEREF, STORE_DEREF. LOAD_FAST and STORE_FAST refer to local
# variables; LOAD_CLOSURE, LOAD_DEREF, and STORE_DEREF relate to
# closure variables. In both cases we know these are not missing
# imports. It's convenient that these are separate opcodes, because
# then we don't need to deal with them manually.
# Record which variables we saw that were loaded in this module without a
# corresponding store. We handle two cases.
#
# 1. Load-before-store; no loops (i.e. no backward jumps).
# Example A::
# foo.bar()
# import foo
# In the above example A, "foo" was used before it was imported. We
# consider it a candidate for auto-import.
# Example | |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /volume targets/ methods.
"""
import datetime
from http import client as http_client
from urllib import parse as urlparse
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from wsme import types as wtypes
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import notification_utils
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_target as api_volume_target
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests import base
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as apiutils
from ironic.tests.unit.db import utils as dbutils
from ironic.tests.unit.objects import utils as obj_utils
def post_get_test_volume_target(**kw):
target = apiutils.volume_target_post_data(**kw)
node = dbutils.get_test_node()
target['node_uuid'] = kw.get('node_uuid', node['uuid'])
return target
class TestVolumeTargetObject(base.TestCase):
def test_volume_target_init(self):
target_dict = apiutils.volume_target_post_data(node_id=None)
del target_dict['extra']
target = api_volume_target.VolumeTarget(**target_dict)
self.assertEqual(wtypes.Unset, target.extra)
class TestListVolumeTargets(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestListVolumeTargets, self).setUp()
self.node = obj_utils.create_test_node(self.context)
def test_empty(self):
data = self.get_json('/volume/targets', headers=self.headers)
self.assertEqual([], data['targets'])
def test_one(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
data = self.get_json('/volume/targets', headers=self.headers)
self.assertEqual(target.uuid, data['targets'][0]["uuid"])
self.assertNotIn('extra', data['targets'][0])
# never expose the node_id
self.assertNotIn('node_id', data['targets'][0])
def test_one_invalid_api_version(self):
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
response = self.get_json(
'/volume/targets',
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_get_one(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
data = self.get_json('/volume/targets/%s' % target.uuid,
headers=self.headers)
self.assertEqual(target.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('node_uuid', data)
# never expose the node_id
self.assertNotIn('node_id', data)
def test_get_one_invalid_api_version(self):
target = obj_utils.create_test_volume_target(self.context,
node_id=self.node.id)
response = self.get_json(
'/volume/targets/%s' % target.uuid,
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_get_one_custom_fields(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
fields = 'boot_index,extra'
data = self.get_json(
'/volume/targets/%s?fields=%s' % (target.uuid, fields),
headers=self.headers)
# We always append "links"
self.assertItemsEqual(['boot_index', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
for i in range(3):
obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=i)
data = self.get_json(
'/volume/targets?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['targets']))
for target in data['targets']:
# We always append "links"
self.assertItemsEqual(['uuid', 'extra', 'links'], target)
def test_get_custom_fields_invalid_fields(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
fields = 'uuid,spongebob'
response = self.get_json(
'/volume/targets/%s?fields=%s' % (target.uuid, fields),
headers=self.headers, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('spongebob', response.json['error_message'])
def test_detail(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
data = self.get_json('/volume/targets?detail=True',
headers=self.headers)
self.assertEqual(target.uuid, data['targets'][0]["uuid"])
self.assertIn('extra', data['targets'][0])
self.assertIn('node_uuid', data['targets'][0])
# never expose the node_id
self.assertNotIn('node_id', data['targets'][0])
def test_detail_false(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
data = self.get_json('/volume/targets?detail=False',
headers=self.headers)
self.assertEqual(target.uuid, data['targets'][0]["uuid"])
self.assertNotIn('extra', data['targets'][0])
# never expose the node_id
self.assertNotIn('node_id', data['targets'][0])
def test_detail_invalid_api_version(self):
obj_utils.create_test_volume_target(self.context,
node_id=self.node.id)
response = self.get_json(
'/volume/targets?detail=True',
headers={api_base.Version.string: str(api_v1.min_version())},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_detail_sepecified_by_path(self):
obj_utils.create_test_volume_target(self.context,
node_id=self.node.id)
response = self.get_json(
'/volume/targets/detail', headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_detail_against_single(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
response = self.get_json('/volume/targets/%s?detail=True'
% target.uuid,
headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_detail_and_fields(self):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
fields = 'boot_index,extra'
response = self.get_json('/volume/targets/%s?detail=True&fields=%s'
% (target.uuid, fields),
headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_many(self):
targets = []
for id_ in range(5):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets', headers=self.headers)
self.assertEqual(len(targets), len(data['targets']))
uuids = [n['uuid'] for n in data['targets']]
self.assertCountEqual(targets, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_volume_target(self.context,
uuid=uuid,
node_id=self.node.id)
data = self.get_json('/volume/targets/%s' % uuid,
headers=self.headers)
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
targets = []
for id_ in range(5):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets/?limit=3', headers=self.headers)
self.assertEqual(3, len(data['targets']))
next_marker = data['targets'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('volume/targets', data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
targets = []
for id_ in range(5):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets', headers=self.headers)
self.assertEqual(3, len(data['targets']))
next_marker = data['targets'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('volume/targets', data['next'])
def test_collection_links_custom_fields(self):
fields = 'uuid,extra'
cfg.CONF.set_override('max_limit', 3, 'api')
targets = []
for id_ in range(5):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['targets']))
next_marker = data['targets'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('volume/targets', data['next'])
self.assertIn('fields', data['next'])
def test_get_collection_pagination_no_uuid(self):
fields = 'boot_index'
limit = 2
targets = []
for id_ in range(3):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target)
data = self.get_json(
'/volume/targets?fields=%s&limit=%s' % (fields, limit),
headers=self.headers)
self.assertEqual(limit, len(data['targets']))
self.assertIn('marker=%s' % targets[limit - 1].uuid, data['next'])
def test_collection_links_detail(self):
targets = []
for id_ in range(5):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets?detail=True&limit=3',
headers=self.headers)
self.assertEqual(3, len(data['targets']))
next_marker = data['targets'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('volume/targets', data['next'])
self.assertIn('detail=True', data['next'])
def test_sort_key(self):
targets = []
for id_ in range(3):
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id,
uuid=uuidutils.generate_uuid(), boot_index=id_)
targets.append(target.uuid)
data = self.get_json('/volume/targets?sort_key=uuid',
headers=self.headers)
uuids = [n['uuid'] for n in data['targets']]
self.assertEqual(sorted(targets), uuids)
def test_sort_key_invalid(self):
invalid_keys_list = ['foo', 'extra', 'properties']
for invalid_key in invalid_keys_list:
response = self.get_json('/volume/targets?sort_key=%s'
% invalid_key,
headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/volume/targets specifying node_name - success
mock_get_rpc_node.return_value = self.node
for i in range(5):
if i < 3:
node_id = self.node.id
else:
node_id = 100000 + i
obj_utils.create_test_volume_target(
self.context, node_id=node_id,
uuid=uuidutils.generate_uuid(), boot_index=i)
data = self.get_json("/volume/targets?node=%s" % 'test-node',
headers=self.headers)
self.assertEqual(3, len(data['targets']))
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/volume/targets/?detail=True specifying node_name - success
mock_get_rpc_node.return_value = self.node
target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
data = self.get_json('/volume/targets?detail=True&node=%s' %
'test-node',
headers=self.headers)
self.assertEqual(target.uuid, data['targets'][0]['uuid'])
self.assertEqual(self.node.uuid, data['targets'][0]['node_uuid'])
@mock.patch.object(rpcapi.ConductorAPI, 'update_volume_target')
class TestPatch(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
def setUp(self):
super(TestPatch, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.target = obj_utils.create_test_volume_target(
self.context, node_id=self.node.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(p.stop)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_update_byid(self, mock_notify, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.target
mock_upd.return_value.extra = extra
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END,
node_uuid=self.node.uuid)])
def test_update_byid_invalid_api_version(self, mock_upd):
headers = {api_base.Version.string: str(api_v1.min_version())}
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_update_not_found(self, mock_upd):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/volume/targets/%s' % uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_singular(self, mock_upd):
boot_index = 100
mock_upd.return_value = self.target
mock_upd.return_value.boot_index = boot_index
response = self.patch_json('/volume/targets/%s' % self.target.uuid,
[{'path': '/boot_index',
'value': boot_index,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(boot_index, response.json['boot_index'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(boot_index, kargs.boot_index)
@mock.patch.object(notification_utils, '_emit_api_notification')
def test_replace_boot_index_already_exist(self, mock_notify, mock_upd):
boot_index = 100
mock_upd.side_effect = \
exception.VolumeTargetBootIndexAlreadyExists(boot_index=boot_index)
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/boot_index',
'value': boot_index,
'op': 'replace'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CONFLICT, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(boot_index, kargs.boot_index)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START,
node_uuid=self.node.uuid),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR,
node_uuid=self.node.uuid)])
def test_replace_invalid_power_state(self, mock_upd):
mock_upd.side_effect = \
exception.InvalidStateRequested(
action='volume target update', node=self.node.uuid,
state='power on')
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/boot_index',
'value': 0,
'op': 'replace'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(0, kargs.boot_index)
def test_replace_node_uuid(self, mock_upd):
mock_upd.return_value = self.target
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_replace_node_uuid_inalid_type(self, mock_upd):
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_uuid',
'value': 123,
'op': 'replace'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertIn(b'Expected a UUID for node_uuid, but received 123.',
response.body)
self.assertFalse(mock_upd.called)
def test_add_node_uuid(self, mock_upd):
mock_upd.return_value = self.target
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'add'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
def test_add_node_uuid_invalid_type(self, mock_upd):
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_uuid',
'value': 123,
'op': 'add'}],
expect_errors=True, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertIn(b'Expected a UUID for node_uuid, but received 123.',
response.body)
self.assertFalse(mock_upd.called)
def test_add_node_id(self, mock_upd):
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'add'}],
headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_node_id(self, mock_upd):
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'replace'}],
headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_remove_node_id(self, mock_upd):
response = self.patch_json('/volume/targets/%s'
% self.target.uuid,
[{'path': '/node_id',
'op': 'remove'}],
headers=self.headers,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_non_existent_node_uuid(self, mock_upd):
node_uuid | |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 16:29:42 2019
@author: Weike (Vicky) Sun <EMAIL>/<EMAIL>
(c) 2020 <NAME>, all rights reserved
"""
"""
Load packages and Set reproduceble results
"""
from sklearn.preprocessing import StandardScaler
import RNN_feedback as RNN_fd
import matplotlib.pyplot as plt
# Seed value
seed_value= 1
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
seed_value += 1
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
seed_value += 1
# 3. Set `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
seed_value += 1
# 4. Set `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.set_random_seed(seed_value)
def timeseries_RNN_feedback_single_train(X, Y, X_val = None, Y_val = None, X_test=None, Y_test=None, train_ratio = 0.8,\
cell_type='e',activation = 'tanh', state_size = 2,\
batch_size = 1, epoch_overlap = None,num_steps = 10,\
num_layers = 1, learning_rate = 1e-2, lambda_l2_reg = 1e-3,\
num_epochs =200, input_prob = 0.95, output_prob = 0.95, state_prob = 0.95,\
input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
max_checks_without_progress = 100,epoch_before_val=50, location='RNN_feedback_0', round_number = '', plot=False):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm
y: training data response numy array: Nx1
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
train_ratio: float, portion of training data used to train the model, and the rest is used as validation data
if X_val is provided, this value is overrided
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
batch_size: int, number of batch used in training
epoch_overlap: None or int, None indicate no overlap between each training patch, int number represnets the space between each path, (e.g. 0 represtns adjacent patch)
num_steps: int, number of steps of memory used in dyanmic_RNN training
num_layer: int, number of RNN layer in the system, default = 1
learning_rate: float, learning rate for Adam, default= 1e-2
labda_l2_reg: float, regularization weight, <=0 indicate no regularization, default = 1e-3,
num_epochs: int, maximum number of epochs considered in the system
intput_prob, output_prob, state_prob: float, (0, 1], the keep probability for dropout during training, default = 0.95
intput_prob_test, output_prob_test, state_prob_test: float (0,1], the keep probability for dropout during testing, default = 1 (no dropout)
max_chekcs_without_progress: int, number of epochs in validation does not improve error for early stopping, default = 100
epoch_before_val: int, number of epochs in training before using validation set to early stop, default = 50
location: str, name for saving the trained RNN-feedback model
plot: Boolean, whether to plot the training results or not
Output:
(AIC or test results, prediction_train, prediction_test)
'''
print('========= Loading data =========')
"""
Load and arrange data for regression
"""
#parameter for the data sets
if X_val is None:
num_train = round(X.shape[0]*train_ratio)
else:
num_train = X.shape[0]
if X_test is not None:
test = True
num_test = X_test.shape[0]
else:
test = False
x_num_features = X.shape[1]
y_num_features = Y.shape[1]
print('======== Pre-process Data =========')
if X_val is None:
scaler = StandardScaler()
scaler.fit(X[:num_train])
X_train = scaler.transform(X[:num_train])
X_val = scaler.transform(X[num_train:])
scalery = StandardScaler()
scalery.fit(Y[:num_train])
Y_train=scalery.transform(Y[:num_train])
Y_val = scalery.transform(Y[num_train:])
else:
scaler = StandardScaler()
scaler.fit(X)
X_train = scaler.transform(X)
X_val = scaler.transform(X_val)
scalery = StandardScaler()
scalery.fit(Y)
Y_train=scalery.transform(Y)
Y_val = scalery.transform(Y_val)
if test:
X_test = scaler.transform(X_test)
Y_test = scalery.transform(Y_test)
input_size_x = x_num_features
input_size_y = y_num_features
print('======== Training =========')
g_train=RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps=num_steps, num_layers=num_layers, input_size_x=input_size_x,
input_size_y=input_size_y , learning_rate=learning_rate, lambda_l2_reg=lambda_l2_reg)
train_loss,val_loss,num_parameter = RNN_fd.train_rnn(X_train,Y_train,X_val,Y_val,
g_train ,num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob,
verbose=True, save=location, epoch_overlap=epoch_overlap, max_checks_without_progress=max_checks_without_progress,
epoch_before_val = epoch_before_val)
if train_loss is None:
return (None, None, None, (100000,100000,100000), 100000,100000,100000)
val_loss = np.array(val_loss)
if plot:
'''Plot the result'''
plt.figure()
s = 12
plt.plot(train_loss, color='xkcd:sky blue', label = 'train loss')
plt.plot(np.linspace(epoch_before_val-1,epoch_before_val+val_loss.shape[0]-1, num = val_loss.shape[0]), val_loss, color= 'xkcd:coral', label = 'val loss')
plt.title('Traingin and validation loss')
plt.ylabel('Loss')
plt.xlabel('# of epoch')
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('Training and validation error round ' + round_number +'.png', dpi = 600,bbox_inches='tight')
############################################################################
"""Training Final Results"""
g_train_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_train , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_train,train_loss_final,_ = RNN_fd.test_rnn(X_train,Y_train, g_train_final, location, input_prob_test, output_prob_test, state_prob_test, num_train)
AIC = num_train*np.log(np.sum(train_loss_final)/y_num_features) + 2*num_parameter
AICc = num_train*np.log(np.sum(train_loss_final)/y_num_features) + (num_parameter+num_train)/(1-(num_parameter+2)/num_train)
BIC = num_train*np.log(np.sum(train_loss_final)/y_num_features) + + num_parameter*np.log(num_train)
############################################################################
"""Validation Final Results"""
g_val_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= X_val.shape[0] , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_val,val_loss_final,_ = RNN_fd.test_rnn(X_val,Y_val, g_val_final, location, input_prob_test, output_prob_test, state_prob_test, X_val.shape[0])
###############################################for other test sets 0 step
"""Testing Results"""
if test:
g_test = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_test , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_test, test_loss_final,_ = RNN_fd.test_rnn(X_test,Y_test, g_test, location, input_prob_test, output_prob_test, state_prob_test, num_test)
else:
prediction_test = None
test_loss_final = None
#############################################plot training results
if plot:
import matplotlib
cmap = matplotlib.cm.get_cmap('Paired')
#plot the prediction vs real
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(Y_train[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_train[1:, j], '--', color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Training data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_' + str(j+1)+'.png', dpi = 600,bbox_inches='tight')
plt.figure(figsize=(5,3))
plt.plot(Y_val[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_val[1:, j], '--', color= 'xkcd:coral',label = 'prediction')
plt.title('RNN Validation data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+' round ' + round_number +'.png', dpi = 600,bbox_inches='tight')
if test:
plt.figure(figsize=(5,3))
plt.plot(Y_test[1:, j], color= cmap(j*2+1), label= 'real')
plt.plot(prediction_test[1:, j], '--',color= 'xkcd:coral', label = 'prediction')
plt.title('RNN Test data prediction for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y',fontsize=s)
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('RNN_test_var_' + str(j+1) + ' round ' + round_number + '.png', dpi = 600,bbox_inches='tight')
#plot fitting errors
for j in range(y_num_features):
plt.figure(figsize=(5,3))
plt.plot(prediction_train[1:,j]-Y_train[1:,j], color= cmap(j*2+1))
plt.title('RNN Training error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_train_var_'+ str(j+1)+' error.png', dpi = 600,bbox_inches='tight')
plt.figure(figsize=(5,3))
plt.plot(prediction_val[1:,j]-Y_val[1:,j], color= cmap(j*2+1))
plt.title('RNN Validation error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_val_var_' + str(j+1)+' round ' + round_number +' error.png', dpi = 600,bbox_inches='tight')
if test:
plt.figure(figsize=(5,3))
plt.plot(prediction_test[1:,j]-Y_test[1:,j], color= cmap(j*2+1))
plt.title('RNN Test error for y' + str(j+1),fontsize=s)
plt.xlabel('Time index',fontsize=s)
plt.ylabel('y_pre - y',fontsize=s)
plt.tight_layout()
plt.savefig('RNN_test_var_' + str(j+1) +' round ' + round_number +' error.png', dpi = 600,bbox_inches='tight')
return (prediction_train,prediction_val, prediction_test, (AIC,AICc,BIC), train_loss_final, val_loss_final, test_loss_final)
def timeseries_RNN_feedback_multi_train(X, Y, X_val, Y_val, timeindex_train, timeindex_val, X_test=None, Y_test=None,\
cell_type='e',activation = 'tanh', state_size = 2,\
batch_size = 1, epoch_overlap = None,num_steps = 10,\
num_layers = 1, learning_rate = 1e-2, lambda_l2_reg = 1e-3,\
num_epochs =200, input_prob = 0.95, output_prob = 0.95, state_prob = 0.95,\
input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
max_checks_without_progress = 100,epoch_before_val=50, location='RNN_feedback_0', plot= False):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm
y: training data response numy array: Nx1
timeindex: dictionary, starting from 1, each contanis the time index for that seires
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
train_ratio: float, portion of training data used to train the model, and the rest is used as validation data
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
batch_size: | |
<gh_stars>0
#%%
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import betabinom as sp_betabinom
from metaDMG.viz import viz_utils
def sort_dataframe(df):
samples_ordered = list(
df.groupby("sample")
.sum("N_reads")
.sort_values("N_reads", ascending=False)
.index
)
# Create the dictionary that defines the order for sorting
sorterIndex = dict(zip(samples_ordered, range(len(samples_ordered))))
# Generate a rank column that will be used to sort
# the dataframe numerically
df["sample_rank"] = df["sample"].map(sorterIndex)
df = df.sort_values(
["sample_rank", "tax_id"],
ascending=[True, True],
inplace=False,
).drop(columns="sample_rank")
return df
def clip_df(df, column):
if column in df.columns:
df["_" + column] = df[column] # save original data _column
df[column] = np.clip(df[column], a_min=0, a_max=None)
def pd_wide_to_long_forward_reverse(group_wide, sep, direction):
stub_names = ["k", "N", "f"]
group_long = pd.wide_to_long(
group_wide,
stubnames=stub_names,
i="tax_id",
j="|x|",
sep=sep,
)[stub_names]
group_long["direction"] = direction
return group_long.reset_index()
def wide_to_long_df(group_wide):
group_long_forward = pd_wide_to_long_forward_reverse(
group_wide,
sep="+",
direction="Forward",
)
try:
group_long_reverse = pd_wide_to_long_forward_reverse(
group_wide,
sep="-",
direction="Reverse",
)
group_long = pd.concat([group_long_forward, group_long_reverse])
# group_long.loc[:, ["k", "N"]] = group_long.loc[:, ["k", "N"]].astype(int)
# happens when forward only
except ValueError:
group_long = group_long_forward
group_long["sample"] = group_wide["sample"].iloc[0]
return group_long
# def remove_LCA_columns(columns):
# remove_cols = ["tax_name", "tax_rank", "N_alignments"]
# return [col for col in columns if col not in remove_cols]
def correct_for_non_LCA(df):
if "N_alignments" in df.columns:
df.loc[:, "N_alignments"] = df["N_alignments"].fillna(np.nan)
else:
df.loc[:, "N_alignments"] = np.nan
if "tax_name" in df.columns:
df.loc[:, "tax_name"] = df["tax_name"].fillna("NO TAX NAME")
else:
df.loc[:, "tax_name"] = "NO TAX NAME"
if "tax_rank" in df.columns:
df.loc[:, "tax_rank"] = df["tax_rank"].fillna("NO TAX RANK")
else:
df.loc[:, "tax_rank"] = "NO TAX RANK"
if "tax_path" in df.columns:
df.loc[:, "tax_path"] = df["tax_path"].fillna("NO TAX PATH")
else:
df.loc[:, "tax_path"] = "NO TAX PATH"
return df
#%%
def compute_variance_scaling(df, phi_string):
phi = df[phi_string]
if "N_x=1_reverse" in df.columns:
N_x = [df["N_x=1_forward"], df["N_x=1_reverse"]]
else:
N_x = [df["N_x=1_forward"]]
N = np.mean(N_x)
return (phi + N) / (phi + 1)
#%%
class VizResults:
def __init__(self, results_dir):
self.results_dir = Path(results_dir)
self._load_df_results()
self._set_cmap()
self._set_hover_info()
def _load_parquet_file(self, results_dir):
df = pd.read_parquet(results_dir)
return correct_for_non_LCA(df)
def _load_df_results(self):
df = self._load_parquet_file(self.results_dir)
df = sort_dataframe(df)
# force tax_id to be categorical strings.
# XXX remove in final version
df.loc[:, "tax_id"] = df["tax_id"].astype("str").astype("category")
for column in ["lambda_LR", "forward_lambda_LR", "reverse_lambda_LR"]:
clip_df(df, column)
Bayesian = any(["Bayesian" in column for column in df.columns]) and (
not any(df["Bayesian_z"].isna())
)
self.Bayesian = Bayesian
df["D_max_significance"] = df["D_max"] / df["D_max_std"]
df["rho_Ac_abs"] = np.abs(df["rho_Ac"])
df["variance_scaling"] = compute_variance_scaling(df, phi_string="phi")
if Bayesian:
df["Bayesian_D_max_significance"] = (
df["Bayesian_D_max"] / df["Bayesian_D_max_std"]
)
df["Bayesian_rho_Ac_abs"] = np.abs(df["Bayesian_rho_Ac"])
df["Bayesian_variance_scaling"] = compute_variance_scaling(
df, phi_string="Bayesian_phi"
)
log_columns = [
"N_reads",
"N_alignments",
"lambda_LR",
"phi",
"k_sum_total",
"N_sum_total",
]
for column in log_columns:
log_column = "log_" + column
df.loc[:, log_column] = np.log10(1 + df[column])
if np.isnan(df["asymmetry"]).all() and not "forward_A" in df.columns:
self.forward_only = True
else:
self.forward_only = False
self.df = df
self.all_tax_ids = set(self.df["tax_id"].unique())
self.all_tax_names = set(self.df["tax_name"].unique()) # if with_LCA else set()
self.all_tax_ranks = set(self.df["tax_rank"].unique()) # if with_LCA else set()
self.samples = list(self.df["sample"].unique())
self.columns = list(self.df.columns)
self.set_marker_size(variable="N_reads", function="sqrt", slider=30)
def set_marker_size(self, variable="N_reads", function="sqrt", slider=30):
d_functions = {
"constant": np.ones_like,
"linear": lambda x: x,
"sqrt": np.sqrt,
"log10": np.log10,
}
self.df.loc[:, "size"] = d_functions[function](self.df[variable])
self.max_of_size = np.max(self.df["size"])
self.marker_size = slider
def filter(self, filters, *, rank=None):
query = ""
for column, filter in filters.items():
if filter is None:
continue
elif column == "samples":
query += f"(sample in {filter}) & "
elif column == "sample":
query += f"(sample == '{filter}') & "
elif column == "tax_id":
query += f"(tax_id == '{filter}') & "
elif column == "tax_ids":
query += f"(tax_id in {filter}) & "
elif column == "tax_rank":
query += f"(tax_rank == {filter}) & "
elif column == "tax_ranks":
query += f"(tax_rank in {filter}) & "
elif column == "tax_name":
query += f"(tax_name == {filter}) & "
elif column == "tax_names":
query += f"(tax_name in {filter}) & "
elif column == "tax_path":
rank = filter
else:
low, high = filter
if viz_utils.is_log_transform_column(column):
low = viz_utils.log_transform_slider(low)
high = viz_utils.log_transform_slider(high)
query += f"({low} <= {column} <= {high}) & "
query = query[:-2]
# print(query)
df_out = self.df.query(query)
if rank is not None:
mask = df_out["tax_path"].str.lower().str.contains(rank.lower())
df_out = df_out.loc[mask]
return df_out
def filter_tax_path(self, rank):
mask = self.df["tax_path"].str.contains(rank)
return self.df.loc[mask]
def _set_cmap(self):
# https://plotly.com/python/discrete-color/#color-sequences-in-plotly-express
# blue, orange, green, red, purple, brown, pink, grey, camouflage, turquoise
# cmap = px.colors.qualitative.D3
# cmap taken from http://www.cookbook-r.com/Graphs/Colors_%28ggplot2%29/
cmap = [
"#0072B2",
"#D55E00",
"#009E73",
"#CC79A7",
"#E69F00",
"#56B4E9",
"#F0E442",
]
N_cmap = len(cmap)
groupby = self.df.groupby("sample", sort=False)
symbol_counter = 0
d_cmap = {}
d_symbols = {}
markers = ["o", "s", "^", "v", "<", ">", "d"]
d_markers = {}
for i, (name, _) in enumerate(groupby):
if (i % N_cmap) == 0 and i != 0:
symbol_counter += 1
d_cmap[name] = cmap[i % N_cmap]
d_symbols[name] = symbol_counter % 44 # max Plotly symbol number
d_markers[name] = markers[symbol_counter % len(markers)]
self.cmap = cmap
self.d_cmap = d_cmap
self.d_symbols = d_symbols
self.d_markers = d_markers
self.d_cmap_fit = {"Forward": cmap[0], "Reverse": cmap[3], "Fit": cmap[2]}
def _set_hover_info(self):
placeholder = "_XXX_"
self.custom_data_columns = [
"sample",
"tax_name",
"tax_rank",
"tax_id",
# Bayesian Fits
# Frequentist fits
"lambda_LR",
"D_max",
"D_max_std",
"q",
"q_std",
"phi",
"phi_std",
"asymmetry",
"rho_Ac",
# Counts
"N_reads",
"N_alignments",
"N_sum_total",
"k_sum_total",
]
custom_data_columns_Bayesian = [
"Bayesian_z",
"Bayesian_D_max",
"Bayesian_D_max_std",
"Bayesian_q",
"Bayesian_q_std",
"Bayesian_phi",
"Bayesian_phi_std",
"Bayesian_rho_Ac",
]
self.hovertemplate = (
"<b>%{customdata[_XXX_]}</b><br><br>"
"<b>Tax</b>: <br>"
" Name: %{customdata[_XXX_]} <br>"
" Rank: %{customdata[_XXX_]} <br>"
" ID: %{customdata[_XXX_]} <br><br>"
"<b>MAP results</b>: <br>"
" LR: %{customdata[_XXX_]:9.2f} <br>"
" D max: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" q: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" phi: %{customdata[_XXX_]:.3s} ± %{customdata[_XXX_]:.3s} <br>"
" asymmetry:%{customdata[_XXX_]:9.3f} <br>"
" rho_Ac: %{customdata[_XXX_]:9.3f} <br><br>"
"<b>Counts</b>: <br>"
" N reads: %{customdata[_XXX_]:.3s} <br>"
" N alignments:%{customdata[_XXX_]:.3s} <br>"
" N sum total: %{customdata[_XXX_]:.3s} <br>"
" k sum total: %{customdata[_XXX_]:.3s} <br>"
"<extra></extra>"
)
hovertemplate_Bayesian = (
"<b>Fit results</b>: <br>"
" z: %{customdata[_XXX_]:9.2f} <br>"
" D max: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" q: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" phi: %{customdata[_XXX_]:.3s} ± %{customdata[_XXX_]:.3s} <br>"
" rho_Ac: %{customdata[_XXX_]:9.3f} <br><br>"
)
if self.forward_only:
index = self.hovertemplate.find("<b>MAP results</b>: <br>")
self.hovertemplate = (
self.hovertemplate[:index]
+ "<b>Forward only! </b><br><br>"
+ self.hovertemplate[index:]
)
# if Bayesian fits, include these results
if self.Bayesian:
index = self.custom_data_columns.index("lambda_LR")
self.custom_data_columns[index:index] = custom_data_columns_Bayesian
index = self.hovertemplate.find("<b>MAP results</b>: <br>")
self.hovertemplate = (
self.hovertemplate[:index]
+ hovertemplate_Bayesian
+ self.hovertemplate[index:]
)
# fill in the templates with data
data_counter = 0
i = 0
while True:
if self.hovertemplate[i : i + len(placeholder)] == placeholder:
# break
s_new = self.hovertemplate[:i]
s_new += str(data_counter)
s_new += self.hovertemplate[i + len(placeholder) :]
self.hovertemplate = s_new
data_counter += 1
i += 1
if i >= len(self.hovertemplate):
break
# if not self.with_LCA:
# self.custom_data_columns = remove_LCA_columns(self.custom_data_columns)
# self.customdata = self.df[self.custom_data_columns]
self.hovertemplate_fit = (
"Fit: <br>D(x) = %{y:.3f} ± %{error_y.array:.3f}<br>" "<extra></extra>"
)
def parse_click_data(self, click_data, column):
try:
index = self.custom_data_columns.index(column)
value = click_data["points"][0]["customdata"][index]
return value
except Exception as e:
raise e
def get_single_count_group(self, sample, tax_id, forward_reverse=""):
query = f"sample == '{sample}' & tax_id == '{tax_id}'"
group_wide = self.df.query(query)
group = wide_to_long_df(group_wide)
if forward_reverse.lower() == "forward":
return group.query(f"direction=='Forward'")
elif forward_reverse.lower() == "reverse":
return group.query(f"direction=='Reverse'")
else:
return group
def get_single_fit_prediction(self, sample, tax_id, forward_reverse=""):
query = f"sample == '{sample}' & tax_id == '{tax_id}'"
ds = self.df.query(query)
if len(ds) != 1:
raise AssertionError(f"Something wrong here, got: {ds}")
if self.forward_only:
if forward_reverse.lower() == "reverse":
return "FORWARD ONLY"
else:
forward_reverse = ""
group = self.get_single_count_group(sample, tax_id, forward_reverse)
if forward_reverse.lower() == "forward":
prefix = "forward_"
elif forward_reverse.lower() == "reverse":
prefix = "reverse_"
else:
if self.Bayesian:
prefix = "Bayesian_"
else:
prefix = ""
A = getattr(ds, f"{prefix}A").values
q = getattr(ds, f"{prefix}q").values
c = getattr(ds, f"{prefix}c").values
phi = getattr(ds, f"{prefix}phi").values
max_position = viz_utils.get_max_position_from_group(group)
abs_x = group["|x|"].values[:max_position]
N = group["N"].values[:max_position]
Dx = A * (1 - q) ** (abs_x - 1) + c
alpha = Dx * phi
beta = (1 - Dx) * phi
dist = sp_betabinom(n=N, a=alpha, b=beta)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
std = dist.std() / N
std[np.isnan(std)] = 0
text = self._ds_to_fit_text(ds)
d_out = {"mu": Dx, "std": std, "Dx": Dx, "|x|": abs_x, "text": text}
return d_out
def _ds_to_fit_text(self, ds):
d_columns_latex = viz_utils.get_d_columns_latex(self)[0]
sanitize = (
lambda s: d_columns_latex[s]
.strip(r"$")
.replace(r"\text{(MAP)}", r"")
.replace(r"\text", r"\mathrm")
)
text = r"$\mathrm{Bayesian}" if self.Bayesian else r"$\mathrm{MAP}"
text += r"\,\, \mathrm{fit}$" + "\n\n"
D_max_col = "Bayesian_D_max" if self.Bayesian else "D_max"
D_max_str = sanitize(D_max_col)
D_max = | |
#!/usr/bin/env python
""" Contains validation functions.
These functions are important to ensure that only valid values are passed to the processing functions.
Validation could likely be improved by moving WTForms, which Overwatch already depends upon for CSRF
protection.
.. codeauthor:: <NAME> <<EMAIL>>, Yale University
"""
# General
import json
from flask import request
# Used to parse GET parameters
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
# Config
from ..base import config
(serverParameters, filesRead) = config.readConfig(config.configurationType.webApp)
# Logging
import logging
# Setup logger
logger = logging.getLogger(__name__)
def validateLoginPostRequest(request):
""" Validates the login POST request.
Note:
The error format is different here. Instead of a list in a dict, we simply have a string.
Args:
request (Flask.request): The request object from Flask.
Return
tuple: (errorValue, username, password), where errorValue (str) contains the error that
may have occurred, username (str) is the username extracted from POST request, and
password (str) is the password extracted from POST request.
"""
errorValue = None
try:
# We enforce the type as as string here
username = request.form.get("username", type=str)
password = request.form.get("password", type=str)
except KeyError as e:
errorValue = "Key error in " + e.args[0] + ". Please enter a username and password in the form."
return (errorValue, username, password)
def validateTimeSlicePostRequest(request, runs):
""" Validates the time slice POST request.
The return tuple contains the validated values. The error value should always be checked first
before using the other return values (they will be safe, but may not be meaningful).
Warning:
If an error occurs in determining the run or subsystem, we cannot retrieve the rest of the
information necessary to validate the request, so the rest of the values in the return tuple are
set to ``None``.
Note:
For the error format in ``errorValue``, see the :doc:`web app README </webAppReadme>`.
Note:
The listed args (after the first two) are provided through the flask ``request.form`` dictionary.
Args:
request (Flask.request): The request object from Flask.
runs (BTree): Dict-like object which stores all run, subsystem, and hist information. Keys are the
in the ``runDir`` format ("Run123456"), while the values are ``runContainer`` objects.
minTime (float): Minimum time for the time slice.
maxTime (float): Maximum time for the time slice.
runDir (str): String containing the run number. For an example run 123456, it should be
formatted as ``Run123456``.
subsystemName (str): The current subsystem in the form of a three letter, all capital name (ex. ``EMC``).
scaleHists (str): True if the hists should be scaled by the number of events. Converted from string to bool.
hotChannelThreshold (int): Value of the hot channel threshold.
histGroup (str): Name of the requested hist group. It is fine for it to be an empty string.
histName (str): Name of the requested histogram. It is fine for it to be an empty string.
Returns:
tuple: (errorValue, minTime, maxTime, runDir, subsystemName, scrollAmount) where errorValue (dict)
containers any possible errors, minTime (float) is the minimum time for the time slice,
maxTime (float) is the maximum time for the time slice, runDir (str) is the run dir formatted
string for which the time slice should be performed, subsystemName (str) is the current subsystem
in the form of a three letter, all capital name (ex. ``EMC``), and scrollAmount (float) is the
amount to scroll down the page to return to precisely where the user was previously.
"""
error = {}
try:
# Enforce the particular types via ``get(...)``.
minTime = request.form.get("minTime", -1, type=float)
maxTime = request.form.get("maxTime", None, type=float)
runDir = request.form.get("runDir", None, type=str)
subsystemName = request.form.get("subsystem", None, type=str)
scaleHists = request.form.get("scaleHists", False, type=str)
hotChannelThreshold = request.form.get("hotChannelThreshold", -1, type=int)
histGroup = convertRequestToStringWhichMayBeEmpty("histGroup", request.form)
histName = convertRequestToStringWhichMayBeEmpty("histName", request.form)
# Will be set below, but we define it here so that we have valid return values.
inputProcessingOptions = {}
# See: https://stackoverflow.com/a/23139085
except KeyError as e:
# Format is:
# errors = {'hello2': ['world', 'world2'], 'hello': ['world', 'world2']}
# See: https://stackoverflow.com/a/2052206
error.setdefault("keyError", []).append("Key error in " + e.args[0])
# Validate values based on available runs.
try:
# Retrieve run
if runDir in runs.keys():
run = runs[runDir]
else:
error.setdefault("Run Dir", []).append("Run dir {runDir} is not available in runs!".format(runDir = runDir))
# Invalidate and we cannot continue
return (error, None, None, None, None, None, None, None, None)
# Retrieve subsystem
if subsystemName in run.subsystems.keys():
subsystem = run.subsystems[subsystemName]
else:
error.setdefault("subsystem", []).append("Subsystem name {subsystemName} is not available in {prettyName}!".format(subsystemName = subsystemName, prettyName = run.prettyName))
# Invalidate and we cannot continue
return (error, None, None, None, None, None, None, None, None)
# Check times
if minTime < 0:
error.setdefault("minTime", []).append("{minTime} less than 0!".format(minTime = minTime))
if maxTime > subsystem.runLength:
error.setdefault("maxTime", []).append("Max time of {maxTime} greater than the run length of {runLength}".format(maxTime = maxTime, runLength = subsystem.runLength))
if minTime > maxTime:
error.setdefault("minTime", []).append("minTime {minTime} is greater than maxTime {maxTime}".format(minTime = minTime, maxtime = maxTime))
# Validate histGroup and histName
# NOTE: It could be valid for both to be None!
validateHistGroupAndHistName(histGroup, histName, subsystem, run, error)
# Processing options
# Ensure scaleHists is a bool
if scaleHists is not False:
scaleHists = True
inputProcessingOptions["scaleHists"] = scaleHists
# Check hot channel threshold
# NOTE: The max hot channel threshold (hotChannelThreshold) is also defined here!
if hotChannelThreshold < 0 or hotChannelThreshold > 1000:
# NOTE: We also mention the hot channel limits here
error.setdefault("hotChannelThreshold", []).append("Hot channel threshold {hotChannelThreshold} is outside the possible range of 0-1000!".format(hotChannelThreshold = hotChannelThreshold))
inputProcessingOptions["hotChannelThreshold"] = hotChannelThreshold
# Handle an unexpected exception
except Exception as e:
error.setdefault("generalError", []).append("Unknown exception! " + str(e))
return (error, minTime, maxTime, runDir, subsystemName, histGroup, histName, inputProcessingOptions)
def validateRunPage(runDir, subsystemName, requestedFileType, runs):
""" Validates requests to the various run page types (handling individual run pages and root files).
The return tuple contains the validated values. The error value should always be checked first
before using the other return values (they will be safe, but may not be meaningful).
Note:
For the error format in ``error``, see the :doc:`web app README </webAppReadme>`.
Note:
The listed args (after the first four) are provided through the flask ``request.args`` dictionary.
Args:
runDir (str): String containing the run number. For an example run 123456, it should be
formatted as ``Run123456``
subsystemName (str): The current subsystem in the form of a three letter, all capital name (ex. ``EMC``).
requestedFileType (str): Either "runPage", which corresponds to a standard run page or "rootFiles", which
corresponds to the page displaying the available root files.
runs (BTree): Dict-like object which stores all run, subsystem, and hist information. Keys are the
in the ``runDir`` format ("Run123456"), while the values are ``runContainer`` objects. This should
be retrieved from the database.
jsRoot (bool): True if the response should use jsRoot instead of images.
ajaxRequest (bool): True if the response should be via AJAX.
requestedHistGroup (str): Name of the requested hist group. It is fine for it to be an empty string.
requestedHist (str): Name of the requested histogram. It is fine for it to be an empty string.
Returns:
tuple: (error, run, subsystem, requestedFileType, jsRoot, ajaxRequest, requestedHistGroup, requestedHist, timeSliceKey, timeSlice)
where error (dict) contains any possible errors, run (runContainer) corresponds to the current
run, subsystem (subsystemContainer) corresponds to the current subsystem, requestedFileType (str)
is the type of run page ("runPage" or "rootFiles"), jsRoot (bool) is True if the response should
use jsRoot, ajaxRequest (bool) is true if the response should be as AJAX, requestedHistGroup (str)
is the name of the requested hist group, requestedHist (str) is the name of the requested histogram,
timeSliceKey (str) is the time slice key, and timeSlice (timeSliceContainer) is the time slice object.
For more on the last two arguments, see ``retrieveAndValidateTimeSlice(...)``.
"""
error = {}
try:
# Set and validate run
if runDir in runs.keys():
run = runs[runDir]
else:
error.setdefault("Run Dir", []).append("{runDir} is not a valid run dir! Please select a different run!".format(runDir = runDir))
# Invalidate and we cannot continue
return (error, None, None, None, None, None, None, None, None, None)
# Set subsystem and validate
if subsystemName in run.subsystems.keys():
subsystem = | |
#2 program a car yourself
# There is a self driving car Carla,
# you can program it at the end of self driving car nanodegree.
#3 quiz state space
# class 1 = histogram filter, or monte carlo filter,
# discrete state space, multimodal distribution,
# exponential efficienty, approximate
# class 2 = kalman filter
# continuous state space, unimodal,
# quadratic efficiency, covariance matrix is quadratic, approximate
# class 3 = particle filters
# continuous state space, multimodal distributions, approximate,
# efficiency not sure, but no good for >4 dims
# advantage of particle filter is that it is easy to program.
#7 particle filters
# Here is a floor plan of environment, where robot is located, and it as to perform
# global localization, which is it has no clue where it is and it has to find out
# where it is just based on sensor measurements.
# The robot has range sensors, blue stripes, those use sonar sensors, which means
# sound, to range the distance of nearer obstacles, and it has to use these range
# sensrs to determine a good posterior distribution as to where it is. What the
# robot doesn't know is that it is starting in the middle of the corridor. In fact
# it is completely uncertain as to where it is.Now the particle filter represents
# this using particles, each of these red dots of which there several thousands here
# is a discreet guess where the robot might be. it is structured as an x coordinate
# y coordinate and a heading direction, and those 3 values together comprise a
# single guess. But a single guess is not a filter, it is the set of several
# thousand of such guesses that together comprise an approximate representation for
# the posterior of the robot.
# In the beginning, the particles are uniformly spread, but the particle filter make
# them survive, in proportion to how consistnt 1 of these particles is with the
# sensor measurement. Very quickly the robot has figured out it's in the corridor,
# but 2 clouds survive, based on symmetry of the corridor. As the robot enters one
# of the offices, the symmetry is broken and the correct set of particles survive.
# The essense of particle filter is to have these particles guess where the robot
# might be moving, but to have them survive using effectively survival of the fittest,
# so that particles that are more consistent with the measurements are more likely
# to survive and as a result places of high probability will collect more particles,
# and therefore be more representative of the robot's posterior belief.
# Those particles together, those thousands of particles, are now clustered in a
# single location. Those comprise the approximate belief of the robot as it localizes
# itself.
#10 moving robot
# # Make a robot called myrobot that starts at
# coordinates 30, 50 heading north (pi/2).
# Have your robot turn clockwise by pi/2, move
# 15 m, and sense. Then have it turn clockwise
# by pi/2 again, move 10 m, and sense again.
#
# Your program should print out the result of
# your two sense measurements.
#
# Don't modify the code below. Please enter
# your code at the bottom.
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]] #square
world_size = 100.0
class robot:
def __init__(self): #initialize object
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0; #added gaussian noise
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation): #set position and orientation
if new_x < 0 or new_x >= world_size:
raise ValueError('X coordinate out of bound')
if new_y < 0 or new_y >= world_size:
raise ValueError('Y coordinate out of bound')
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError('Orientation must be in [0..2pi]')
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise): #set noise based on values in __init__
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self): #sense the distances from where location to landmarks
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward): #move the robot based on direction
if forward < 0:
raise ValueError('Robot cant move backwards')
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
# end of class robot:
def eval(r, p): #some sort of error function
sum = 0.0;
for i in range(len(p)): # calculate mean error
dx = (p[i].x - r.x + (world_size/2.0)) % world_size - (world_size/2.0)
dy = (p[i].y - r.y + (world_size/2.0)) % world_size - (world_size/2.0)
err = sqrt(dx * dx + dy * dy)
sum += err
return sum / float(len(p))
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER CODE BELOW ####
myrobot = robot() #declare an instance of the object
myrobot.set_noise(5.0, 0.1, 5.0) #set noise for a particular move, #11
myrobot.set(30.0, 50.0, pi/2) #pointing up, I think
myrobot = myrobot.move(-pi/2, 15.0) #should be at (45,50), pointing at 0
print(myrobot.sense()) #these distances measurementscomprise of the measurement wor
myrobot = myrobot.move(-pi/2, 10.0) #should be at (45,40), pointing down
print(myrobot.sense()) #these distances measurementscomprise of the measurement world
# the world wraps around
#13 creating a particle
# The particle filter you are going to program contains a set of 1000 random
# guesses as to where the robot might be. Now I'm not going to draw 1000 dots here,
# but let me explain how each of these dots look like. Each of these dots is a
# vector which contains an X coordinate, a Y coordinate, and a heading direction,
# which is the angle at which the robot points relative to the X axis.
# So this robot moves forward, it will move slightly upwards. In fact, now a code,
# everytime you call the function robot, and assign it say to a particle, these
# elements p[i] x,y,orientation, which is the same as the heading, are initialized
# at random. So to make a particle set of 1000 particles, what you have to do is
# program a simple piece of code that assigns 1000 of these to a list.
# So, set N=1000 for 1000 particles, p is the initial set of particles, which is
# an empty list, and fill in code after which there are 1000 particles assigned to
# this vector over here.
# so it is a thousand vectors for a thousand particles, each vector as x y orientation
# initialized at random.
#
# Now we want to create particles,
# p[i] = robot(). In this assignment, write
# code that will assign 1000 such particles
# to a list.
#
# Your program should print out the length
# of your list (don't cheat by | |
ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
# during TRAINING:
# (features is a list of 5 elements)
# (features[0] has shape: (16, 256, h/4, w/4)) (not always exactly h/4, w/4)
# (features[1] has shape: (16, 256, h/8, w/8))
# (features[2] has shape: (16, 256, h/16, w/16))
# (features[3] has shape: (16, 256, h/32, w/32))
# (features[4] has shape: (16, 256, h/64, w/64))
#
# (targets is a list of 16 elements, each element is a BoxList (e.g. [BoxList(num_boxes=3, image_width=800, image_height=1066, mode=xyxy), BoxList(num_boxes=19, image_width=800, image_height=1201, mode=xyxy),...]))
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.sample_jittered_boxes(targets) #######################################################
# (proposals is a list of 16 elements, each element is a BoxList, num_boxes in each BoxList is M*{num_boxes for the corresponding BoxList in targets})
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# (x has shape: (num_preds, 1024)) (num_preds is different from batch to batch, e.g. 12032 or 19072 or 20992)
# final classifier that converts the features into predictions
iou_score = self.predictor(x)
# (iou_score has shape: (num_preds, 81)) (81 is the number of classes)
if not self.training:
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
result = self.post_processor(proposals, iou_score)
else:
result = proposals
with torch.enable_grad():
result = self.optimize_boxes(features, result)
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and not self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
x = self.feature_extractor(features, result)
# final classifier that converts the features into predictions
iou_score = self.predictor(x)
result = self.post_processor(result, iou_score)
return x, result, {}
if self.training:
loss_iou = self.loss_evaluator(
iou_score
)
# (loss_iou is just a tensor of a single value)
return (
x,
proposals,
dict(loss_iou=loss_iou),
)
else:
return x, iou_score, {}
class ROIIoUHead_kldis(torch.nn.Module): ############################################################################
"""
"""
def __init__(self, cfg, in_channels):
super(ROIIoUHead_kldis, self).__init__()
self.feature_extractor = make_roi_iou_feature_extractor(cfg, in_channels)
self.predictor = make_roi_iou_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_iou_post_processor(cfg)
self.loss_evaluator = make_roi_iou_loss_evaluator(cfg)
self.mode = cfg.MODEL.ROI_IOU_HEAD.LOSS_TYPE
self.cfg = cfg
def optimize_boxes(self, features, boxes):
# Optimize iounet boxes
step_length = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH
if isinstance(step_length, (tuple, list)):
if len(step_length) == 1:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[0], step_length[0]]).to(
features[0].device).view(1, 4)
elif len(step_length) == 2:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(
features[0].device).view(1, 4)
else:
raise ValueError
if self.mode == "L2":
box_refinement_space = "default"
else:
box_refinement_space = 'relative'
box_refinement_iter = self.cfg.MODEL.ROI_IOU_HEAD.NUM_REFINE_ITER
boxes_per_image = [b.bbox.shape[0] for b in boxes]
step_length = [step_length.clone().expand(b.bbox.shape[0], -1).contiguous() for b in boxes]
labels_list = [b.get_field("box_labels") for b in boxes]
labels = torch.cat(labels_list)
scores = [b.get_field("scores") for b in boxes]
for f in features:
f.requires_grad = True
if box_refinement_space == 'default':
# raise NotImplementedError
# omega1 = 0.001
# omega2 = -0.01
for i_ in range(box_refinement_iter):
# forward pass
# Assume box format is xyxy
bb_init = [BoxList(b.bbox.clone().detach(), b.size, b.mode) for b in boxes]
for b in bb_init:
b.bbox.requires_grad = True
x = self.feature_extractor(features, bb_init)
iou_score = self.predictor(x)
iou_score = iou_score[torch.arange(iou_score.shape[0]), labels]
iou_score.backward(gradient = torch.ones_like(iou_score))
# Update proposal
bb_refined = [BoxList((b.bbox + s * b.bbox.grad * (b.bbox[:, 2:] - b.bbox[:, :2]).repeat(1, 2)).detach(),
b.size, b.mode) for b, s in zip(bb_init, step_length)]
with torch.no_grad():
x = self.feature_extractor(features, bb_refined)
new_iou_score = self.predictor(x)
new_iou_score = new_iou_score[torch.arange(new_iou_score.shape[0]), labels]
refinement_failed = (new_iou_score < iou_score)
refinement_failed = refinement_failed.view(-1, 1)
refinement_failed = refinement_failed.split(boxes_per_image, dim=0)
boxes = [BoxList(b_i.bbox * r_f.float() + b_r.bbox * (1 - r_f).float(), b_i.size, b_i.mode)
for b_i, b_r, r_f in zip(bb_init, bb_refined, refinement_failed)]
# decay step length for failures
decay_factor = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH_DECAY
step_length = [s * (1 - r_f).float() + s * decay_factor * r_f.float()
for s, r_f in zip(step_length, refinement_failed)]
elif box_refinement_space == 'relative':
boxes = [b.convert("xywh") for b in boxes]
sz_norm = [b.bbox[:, 2:].clone() for b in boxes]
# TODO test this
boxes_rel = [BoxList(rect_to_rel(b.bbox, s), b.size, b.mode) for b, s in zip(boxes, sz_norm)]
for i_ in range(box_refinement_iter):
# forward pass
bb_init_rel = [BoxList(b.bbox.clone().detach(), b.size, b.mode) for b in boxes_rel]
for b in bb_init_rel:
b.bbox.requires_grad = True
bb_init = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(bb_init_rel, sz_norm)]
bb_init = [b.convert('xyxy') for b in bb_init]
x = self.feature_extractor(features, bb_init)
iou_score = self.predictor(x)
iou_score = iou_score[torch.arange(iou_score.shape[0]), labels]
iou_score.backward(gradient=torch.ones_like(iou_score))
# Update proposal
bb_refined_rel = [BoxList((b.bbox + s * b.bbox.grad).detach(), b.size, b.mode)
for b, s in zip(bb_init_rel, step_length)]
bb_refined = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(bb_refined_rel, sz_norm)]
bb_refined = [b.convert('xyxy') for b in bb_refined]
with torch.no_grad():
x = self.feature_extractor(features, bb_refined)
new_iou_score = self.predictor(x)
new_iou_score = new_iou_score[torch.arange(new_iou_score.shape[0]), labels]
refinement_failed = (new_iou_score < iou_score)
refinement_failed = refinement_failed.view(-1, 1)
refinement_failed = refinement_failed.split(boxes_per_image, dim=0)
boxes_rel = [BoxList(b_i.bbox * r_f.float() + b_r.bbox * (1 - r_f).float(), b_i.size, b_i.mode)
for b_i, b_r, r_f in zip(bb_init_rel, bb_refined_rel, refinement_failed)]
# decay step length for failures
decay_factor = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH_DECAY
step_length = [s*(1 - r_f).float() + s*decay_factor*r_f.float()
for s, r_f in zip(step_length, refinement_failed)]
boxes = [BoxList(rel_to_rect(b.bbox, s), b.size, b.mode) for b, s in zip(boxes_rel, sz_norm)]
boxes = [b.convert("xyxy") for b in boxes]
for b, s, l in zip(boxes, scores, labels_list):
b.add_field("scores", s)
b.add_field("labels", l)
b.add_field("box_labels", l)
return boxes
def forward(self, features, proposals=None, targets=None, iteration=None, original_image_ids=None): ###############################################################
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
# during TRAINING:
# (features is a list of 5 elements)
# (features[0] has shape: (16, 256, h/4, w/4)) (not always exactly h/4, w/4)
# (features[1] has shape: (16, 256, h/8, w/8))
# (features[2] has shape: (16, 256, h/16, w/16))
# (features[3] has shape: (16, 256, h/32, w/32))
# (features[4] has shape: (16, 256, h/64, w/64))
#
# (targets is a list of 16 elements, each element is a BoxList (e.g. [BoxList(num_boxes=3, image_width=800, image_height=1066, mode=xyxy), BoxList(num_boxes=19, image_width=800, image_height=1201, mode=xyxy),...]))
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.sample_jittered_boxes(targets) #######################################################
# (proposals is a list of 16 elements, each element is a BoxList, num_boxes in each BoxList is M*{num_boxes for the corresponding BoxList in targets})
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# (x has shape: (num_preds, 1024)) (num_preds is different from batch to batch, e.g. 12032 or 19072 or 20992)
# final classifier that converts the features into predictions
iou_score = self.predictor(x)
# (iou_score has shape: (num_preds, 81)) (81 is the number of classes)
if not self.training:
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
result = self.post_processor(proposals, iou_score)
else:
result = proposals
with torch.enable_grad():
result = self.optimize_boxes(features, result)
if self.cfg.MODEL.ROI_IOU_HEAD.PERFORM_FILTERING and not self.cfg.MODEL.ROI_IOU_HEAD.NMS_BEFORE:
x = self.feature_extractor(features, result)
# final classifier that converts the features into predictions
iou_score = self.predictor(x)
result = self.post_processor(result, iou_score)
return x, result, {}
if self.training:
loss_iou = self.loss_evaluator(
iou_score
)
# (loss_iou is just a tensor of a single value)
return (
x,
proposals,
dict(loss_iou=loss_iou),
)
else:
return x, iou_score, {}
class ROIIoUHead_nce(torch.nn.Module): ############################################################################
"""
"""
def __init__(self, cfg, in_channels):
super(ROIIoUHead_nce, self).__init__()
self.feature_extractor = make_roi_iou_feature_extractor(cfg, in_channels)
self.predictor = make_roi_iou_predictor(
cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_iou_post_processor(cfg)
self.loss_evaluator = make_roi_iou_loss_evaluator(cfg)
self.mode = cfg.MODEL.ROI_IOU_HEAD.LOSS_TYPE
self.cfg = cfg
def optimize_boxes(self, features, boxes):
# Optimize iounet boxes
step_length = self.cfg.MODEL.ROI_IOU_HEAD.STEP_LENGTH
if isinstance(step_length, (tuple, list)):
if len(step_length) == 1:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[0], step_length[0]]).to(
features[0].device).view(1, 4)
elif len(step_length) == 2:
step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(
features[0].device).view(1, 4)
else:
raise ValueError
if self.mode == "L2":
box_refinement_space = "default"
else:
box_refinement_space = 'relative'
box_refinement_iter = self.cfg.MODEL.ROI_IOU_HEAD.NUM_REFINE_ITER
boxes_per_image = [b.bbox.shape[0] for b in boxes]
step_length = [step_length.clone().expand(b.bbox.shape[0], -1).contiguous() for b in boxes]
labels_list = [b.get_field("box_labels") | |
#!/usr/bin/env python
"""
createSubStatsPlot.py
migrated from cactusAssemblathonSubErrorPlotter.py
16 March 2011
( 10 March 2011 )
dent earl, dearl(a) soe ucsc edu
"""
##############################
# Copyright (C) 2009-2011 by
# <NAME> (<EMAIL>, <EMAIL>)
# <NAME> (<EMAIL>, <EMAIL>)
# <NAME> (<EMAIL>)
# ... and other members of the Reconstruction Team of David Haussler's
# lab (BME Dept. UCSC).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##############################
import glob
import libAssemblySubset as las
import libGeneral as lgn
import libPlotting as lpt
import matplotlib.backends.backend_pdf as pltBack
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter # minor tick marks
import numpy
from optparse import OptionParser
import os
import signal # deal with broken pipes
import sys
import re
import xml.etree.ElementTree as ET
import xml.parsers.expat as expat # exception handling for empty xml
signal.signal( signal.SIGPIPE, signal.SIG_DFL ) # broken pipes
class Data:
""" Dummy class
"""
class Assembly:
""" Assembly objects are generated from lines
in the two sub summary files, lower and upper
"""
def __init__( self ):
self.ID = ''
self.subStatsLower = {}
self.subStatsUpper = {}
self.allUp = -1
self.allLo = -1
def initOptions( parser ):
parser.add_option( '--subStatsDir', dest='subStatsDir',
type='string',
help=('Directory with subStats. Names: A1.subStats.upper.xml .'))
parser.add_option( '--outputRanks', dest='outputRanks', default=False,
action='store_true',
help='Outputs tab delimited rankings. default=%default')
parser.add_option( '--raw', dest='raw', default=False,
action='store_true',
help=('Doesn\'t normalize errors by the "Correct (bits)" '
'field, print raw values. default=%default'))
def checkOptions( options, parser ):
dirs = { 'subStatsDir' : options.subStatsDir }
for d in dirs:
if not dirs[ d ]:
parser.error('specify --%s\n' % d )
if not os.path.exists( dirs[ d ] ):
parser.error('--%s %s does not exist!\n' % ( d, dirs[ d ] ))
if not os.path.isdir( dirs[ d ] ):
parser.error('--%s %s is not a directory!\n' % (d, dirs[ d ]) )
def readSubStatsDir( assembliesDict, options ):
lowerStatsFiles = glob.glob( os.path.join( options.subStatsDir, '*.subStats.lower.xml') )
upperStatsFiles = glob.glob( os.path.join( options.subStatsDir, '*.subStats.upper.xml') )
namereg = '^([A-Z0-9]{2,3})\.subStats.*'
namepat = re.compile( namereg )
for l in lowerStatsFiles:
m = re.match( namepat, os.path.basename( l ))
if not m:
sys.stderr.write('unable to match regex "%s" against filename "%s"' % ( namereg, l ))
sys.exit( 1 )
ID = m.group( 1 )
if options.subsetFile:
if ID not in options.assemblySubset:
continue
try:
xmlTree = ET.parse( l )
except expat.ExpatError: # broken xml file
continue
xmlTree = ET.parse( l )
root=xmlTree.getroot()
assembliesDict[ ID ] = Assembly()
assembliesDict[ ID ].ID = ID
for elm in root.attrib.keys():
assembliesDict[ ID ].subStatsLower[ elm ] = int(float( root.attrib[ elm ]))
for u in upperStatsFiles:
m = re.match( namepat, os.path.basename( u ))
if not m:
sys.stderr.write('unable to match regex "%s" against filename "%s"' % ( namepat, u ))
sys.exit( 1 )
ID = m.group( 1 )
if options.subsetFile:
if ID not in options.assemblySubset:
continue
if ID not in assembliesDict:
sys.stderr.write('unable to locate key %s in assembliesDict.\n')
sys.exit( 1 )
try:
xmlTree = ET.parse( u )
except expat.ExpatError: # broken xml file
continue
xmlTree = ET.parse( u )
root=xmlTree.getroot()
for elm in root.attrib.keys():
assembliesDict[ ID ].subStatsUpper[ elm ] = int(float( root.attrib[ elm ]))
return assembliesDict
def establishAxes( fig, options, data ):
""" create one axes per chromosome
"""
axDict = {}
options.axLeft = 0.1
options.axRight = 0.95
options.axWidth = options.axRight - options.axLeft
options.margin = 0.10
options.axTop = 0.96
options.axBot = 0.08
options.axHeight = options.axTop - options.axBot
axesNames = [ 'all', 'hom', 'het' ]
numberOfAxes = len( axesNames )
options.indHeight = float( options.axHeight - ( numberOfAxes - 1.0) * options.margin ) / numberOfAxes
yPos = options.indHeight
for ax in axesNames:
axDict[ ax ] = fig.add_axes( [ options.axLeft, options.axTop - yPos ,
options.axWidth , options.indHeight ] )
yPos += options.indHeight + options.margin
for ax in axDict:
for loc, spine in axDict[ ax ].spines.iteritems():
if loc in ['left', 'bottom']:
spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s' % loc )
# turn off ticks where there is no spine
axDict[ ax ].xaxis.set_ticks_position('bottom')
axDict[ ax ].yaxis.set_ticks_position('both')
# if ax != 'all':
# axDict[ ax ].set_xticks( [] )
return axDict
def drawData( assembliesDict, sortOrder, axDict, options, data ):
lGray = ( 0.8, 0.8, 0.8 )
# all plot
yMax = 0
yMin = sys.maxint
xNames = []
for aName in sortOrder:
a = assembliesDict[ aName ]
if yMax < float( a.allUp ):
yMax = float( a.allUp )
if float(a.allLo) == 0.0:
continue
if yMin > float( a.allLo ):
yMin = float( a.allLo )
if options.raw:
yMin = logLower( yMin )
# partitions
for i in xrange( 1, len( assembliesDict ) + 1):
if not i % 5:
axDict[ 'all' ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ yMin, yMax * 1.1],
linestyle='dotted',
color=lGray))
i = 0
for aName in sortOrder:
a = assembliesDict[ aName ]
i += 1
axDict[ 'all' ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ a.allLo, a.allUp ],
color='#1f77b4',
linewidth= 4.0,
solid_capstyle='round'))
if options.subsetFile:
xNames.append( lgn.idMap[ aName[0] ] )
else:
xNames.append( lgn.idMap[ aName[0] ]+'.'+aName[1:] )
#if not options.normalize:
axDict[ 'all' ].set_yscale('log')
if yMin > yMax:
sys.stderr.write( 'Error, yMin > yMax: %f > %f\n' % ( yMin, yMax ))
sys.exit( 1 )
axDict[ 'all' ].set_ylim( [ yMin * 0.9, yMax * 1.1] )
axDict[ 'all' ].set_xlim( 0, len(xNames) + 1 )
axDict[ 'all' ].set_xticks( range( 1, len(xNames) + 1 ))
axDict[ 'all' ].set_xticklabels( xNames )
for label in axDict[ 'all' ].xaxis.get_ticklabels():
label.set_rotation( 45 )
if not options.subsetFile:
for tick in axDict[ 'all' ].xaxis.get_major_ticks():
tick.label1.set_fontsize( 6 )
# all the other plots
axNames = { 'hom':'totalErrorsInHomozygous',
'het':'totalErrorsInHeterozygous'}
# 'indel':'Total-errors-in-one-haplotype-only' }
for key in axNames:
yMax = 0
yMin = sys.maxint
i = 0
for aName in sortOrder:
i += 1
a = assembliesDict[ aName ]
if yMax < float( a.subStatsUpper[ axNames[ key ] ]):
yMax = float( a.subStatsUpper[ axNames[ key ] ])
if a.subStatsLower[ axNames[ key ]] > 0:
if yMin > float( a.subStatsLower[ axNames[ key ] ]):
yMin = float( a.subStatsLower[ axNames[ key ] ])
if options.raw:
yMin = logLower( yMin )
# partitions
for i in xrange( 1, len( assembliesDict ) + 1):
if not i % 5:
axDict[ key ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ yMin, yMax * 1.1],
linestyle='dotted',
color=lGray))
i = 0
for aName in sortOrder:
a = assembliesDict[ aName ]
i += 1
axDict[ key ].add_line( lines.Line2D( xdata=[ i, i ],
ydata=[ a.subStatsLower[ axNames[ key ]],
a.subStatsUpper[ axNames[ key ]]],
color='#1f77b4',
linewidth=4.0,
solid_capstyle='round'))
#if not options.normalize:
axDict[ key ].set_yscale('log')
axDict[ key ].set_ylim( [ yMin, yMax] )
axDict[ key ].set_xlim( 0, len(xNames) + 1 )
axDict[ key ].set_xticks( range( 1, len(xNames) + 1 ))
# grid
for ax in axDict:
mts = axDict[ax].yaxis.get_majorticklocs()
for m in mts:
axDict[ax].add_line( lines.Line2D( xdata=[ 1, len(xNames) ],
ydata=[ m, m ],
linewidth=1,
color=lGray,
linestyle='dotted'))
if key == 'het':
axDict[ key ].set_xticklabels( xNames )
else:
axDict[ key ].set_xticklabels( [] )
for label in axDict[ key ].xaxis.get_ticklabels():
label.set_rotation( 45 )
if not options.subsetFile:
for tick in axDict[ key ].xaxis.get_major_ticks():
tick.label1.set_fontsize( 6 )
if not options.raw:
suffix = ' / Correct (bits)'
else:
suffix = ''
axDict['all'].set_title( 'Sum | |
r, or
# 2) we are putting things back after a failed transaction.
# In case 1, if r has a shape_i client, we will want to
# replace the shape_i of r with the shape of new_r. Say that
# r is *scheduled*.
# At that point, node is no longer a client of r, but of new_r
for (shpnode, idx) in (r.clients + [(node, i)]):
if isinstance(getattr(shpnode, 'op', None), Shape_i):
idx = shpnode.op.i
repl = self.shape_of[new_r][idx]
if repl.owner is shpnode:
# This mean the replacement shape object is
# exactly the same as the current shape object. So
# no need for replacement. This happen for example
# with the InputToGpuOptimizer optimizer.
continue
if (repl.owner and
repl.owner.inputs[0] is shpnode.inputs[0] and
isinstance(repl.owner.op, Shape_i) and
repl.owner.op.i == shpnode.op.i):
# The replacement is a shape_i of the same
# input. So no need to do this equivalent
# replacement.
continue
if shpnode.outputs[0] in theano.gof.graph.ancestors([repl]):
raise InconsistencyError(
"This substitution would insert a cycle in the graph:"
"node: %s, i: %i, r: %s, new_r: %s"
% (node, i, r, new_r))
self.scheduled[shpnode] = new_r
# In case 2, if r is a variable that we've scheduled for shape update,
# then we should cancel it.
unscheduled = [k for k, v in self.scheduled.items() if v == r]
for k in unscheduled:
del self.scheduled[k]
# In either case, r could be in shape_of.values(), that is, r itself
# is the shape of something. In that case, we want to update
# the value in shape_of, to keep it up-to-date.
for v in self.shape_of_reverse_index.get(r, []):
# The reverse index is only approximate. It is not updated on
# deletion of variables, or on change_input so it might be the
# case that there are a few extra `v`'s in it that no longer have
# a shape of r or possibly have been deleted from shape_of
# entirely. The important thing is that it permits to recall
# all variables with r in their shape.
for ii, svi in enumerate(self.shape_of.get(v, [])):
if svi == r:
self.set_shape_i(v, ii, new_r)
self.shape_of_reverse_index[r] = set()
def same_shape(self, x, y, dim_x=None, dim_y=None):
"""Return True if we are able to assert that x and y have the
same shape.
dim_x and dim_y are optional. If used, they should be an index
to compare only 1 dimension of x and y.
"""
sx = self.shape_of[x]
sy = self.shape_of[y]
if sx is None or sy is None:
return False
if dim_x is not None:
sx = [sx[dim_x]]
if dim_y is not None:
sy = [sy[dim_y]]
assert len(sx) == len(sy)
# We look on each dimensions we want to compare.
# If any of them can't be asserted to be equal, return False.
# Otherwise, we return True at the end.
for dx, dy in zip(sx, sy):
if dx is dy:
continue
# Need to try to find that they are the same shape. We
# need to compare the full graph. It could be slow. So I
# just implement for now the case of Shape_i.
if not dx.owner or not dy.owner:
return False
if (not isinstance(dx.owner.op, Shape_i) or
not isinstance(dy.owner.op, Shape_i)):
return False
opx = dx.owner.op
opy = dy.owner.op
if not (opx.i == opy.i):
return False
# FB I'm not sure if this handle correctly constants.
if dx.owner.inputs[0] == dy.owner.inputs[0]:
continue
# To be sure to cover all case, call equal_computation.
# Can't use theano.gof.graph.is_same_graph(dx, dy)
# As it currently expect that dx and dy aren't in a FunctionGraph
from theano.scan_module.scan_utils import equal_computations
if not equal_computations([dx], [dy]):
return False
return True
class ShapeOptimizer(Optimizer):
"""Optimizer that serves to add ShapeFeature as an fgraph feature."""
def __init__(self):
Optimizer.__init__(self)
def add_requirements(self, fgraph):
fgraph.attach_feature(ShapeFeature())
def apply(self, fgraph):
pass
# Register it after merge1 optimization at 0. We don't want to track
# the shape of merged node.
theano.compile.mode.optdb.register('ShapeOpt', ShapeOptimizer(),
0.1, 'fast_run', 'fast_compile')
def local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):
def local_elemwise_alloc(node):
"""
elemwise(alloc(x, shp), ..., y.TensorType(BROADCAST CONDITION))
-> elemwise(x, y.TensorType(BROADCAST CONDITION))
elemwise(dimshuffle(alloc(x, shp)),... ,y.TensorType(BROADCAST CONDITION))
-> elemwise(x.dimshuffle(...), y.TensorType(BROADCAST CONDITION))
BROADCAST CONDITION: the condition is that the one input that are
not to be optimized to have the same broadcast pattern as the
output.
We can change the alloc by a dimshuffle as the elemwise
already have the shape info. The dimshuffle will be faster
to exec.
"""
if not isinstance(node.op, ElemwiseOP):
return False
if len(node.outputs) > 1:
# Ensure all outputs have the same broadcast pattern
# This is a supposition that I'm not sure is always true.
assert all([o.type.broadcastable ==
node.outputs[0].type.broadcastable for o in
node.outputs[1:]])
# The broadcast pattern of the ouptut must match the broadcast
# pattern of at least one of the inputs.
if not any([i.type.broadcastable ==
node.outputs[0].type.broadcastable for i in node.inputs]):
return False
def dimshuffled_alloc(i):
return (isinstance(i.owner.op, DimShuffleOP) and
i.owner.inputs[0].owner and
isinstance(i.owner.inputs[0].owner.op, AllocOP))
# At least one input must have an owner that is either a AllocOP or a
# DimShuffleOP with an owner that is a AllocOP -- otherwise there is
# nothing to optimize.
if not any([i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i)) for i in node.inputs]):
return False
# Search for input that we can use as a baseline for the dimensions.
assert_op_idx = -1
for idx, i in enumerate(node.inputs):
if i.type.broadcastable == node.outputs[0].type.broadcastable:
# Prefer an input that is not a AllocOP nor a DimShuffleOP of a
# AllocOP so that all allocs can be optimized.
if not (i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i))):
assert_op_idx = idx
break
# It may be the case that only AllocOP and DimShuffleOP of AllocOP exist.
if assert_op_idx < 0:
# We want to optimize as many allocs as possible. When
# there is more than one then do all but one. number of
# inputs with alloc or dimshuffle alloc
l2 = [i for i in node.inputs
if (i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i)))]
# If only 1 alloc or dimshuffle alloc, it is the one we
# will use for the shape. So no alloc would be removed.
if len(l2) > 1:
# l containt inputs with alloc or dimshuffle alloc
# only. Its length will always be at least one, as we
# checked that before
l = [idx for idx, i in enumerate(node.inputs)
if i.broadcastable == node.outputs[0].broadcastable]
assert_op_idx = l[0] # The first one is as good as any to use.
else:
# Nothing would be optimized!
return False
assert_op = node.inputs[assert_op_idx]
cmp_op = assert_op
new_i = []
same_shape = node.fgraph.shape_feature.same_shape
for i in node.inputs:
# Remove alloc
if (i.owner and isinstance(i.owner.op, AllocOP) and
i.owner.inputs[0].type != i.owner.outputs[0].type):
# when i.owner.inputs[0].type == i.owner.outputs[0].type we
# will remove that alloc later
assert i.type.ndim == cmp_op.ndim
get_shape = node.fgraph.shape_feature.get_shape
if theano.config.experimental.local_alloc_elemwise_assert:
cond = []
for idx in xrange(i.type.ndim):
if (not i.type.broadcastable[idx] and
not same_shape(i, cmp_op, idx, idx)):
i_shp = get_shape(i, idx)
cmp_shp = get_shape(cmp_op, idx)
cond.append(T.eq(i_shp, cmp_shp))
if cond:
assert_op = assert_(assert_op, *cond)
new_i.append(i.owner.inputs[0])
# Remove Alloc in DimShuffle
elif i.owner and dimshuffled_alloc(i):
assert i.type.ndim == cmp_op.type.ndim
if theano.config.experimental.local_alloc_elemwise_assert:
assert_cond = [T.eq(i.shape[idx], cmp_op.shape[idx])
for idx in xrange(i.type.ndim)
if not i.type.broadcastable[idx] and
not same_shape(i, cmp_op, idx, idx)]
if assert_cond:
assert_op = assert_(assert_op, *assert_cond)
alloc_input = i.owner.inputs[0].owner.inputs[0]
if alloc_input.ndim != i.owner.inputs[0].ndim:
# The alloc can add dimension to the value
# We add a dimshuffle to add them.
# We let later optimization merge the multiple dimshuffle
nb_dim_to_add = i.owner.inputs[0].ndim - alloc_input.ndim
alloc_input = alloc_input.dimshuffle(
['x'] * nb_dim_to_add +
list(range(alloc_input.ndim)))
# We need to keep the dimshuffle. It could swap axes or
# add dimensions anywhere.
r_i = i.owner.op(alloc_input)
# Copy stack trace from i to new_i
copy_stack_trace(i, r_i)
new_i.append(r_i)
else:
new_i.append(i)
new_i[assert_op_idx] = assert_op
ret = node.op(*new_i, return_list=True)
# Copy over stack trace from previous outputs to new outputs.
copy_stack_trace(node.outputs, ret)
return ret
return local_elemwise_alloc
# TODO, global optimizer that lift the assert to the beginning of the graph.
# TODO, optimize all inputs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.