text stringlengths 38 1.54M |
|---|
import math
import time
from array import array
import x86
from ..core import Vector3
from .bbox import BBox
import renmas2.switch as proc
def clamp(x, minimum, maximum):
return max(minimum, min(maximum, x))
class Grid:
def __init__(self):
pass
def _bbox(self, shapes):
p0 = Vector3(9999999.0, 9999999.0, 9999999.0)
p1 = Vector3(-9999999.0, -9999999.0, -9999999.0)
bb_min = BBox(p0, p1, None)
for shape in shapes:
if hasattr(shape, "_grid"):
bbox = shape._grid.bbox
else:
bbox = shape.bbox()
if bbox.x0 < bb_min.x0: bb_min.x0 = bbox.x0
if bbox.y0 < bb_min.y0: bb_min.y0 = bbox.y0
if bbox.z0 < bb_min.z0: bb_min.z0 = bbox.z0
if bbox.x1 > bb_min.x1: bb_min.x1 = bbox.x1
if bbox.y1 > bb_min.y1: bb_min.y1 = bbox.y1
if bbox.z1 > bb_min.z1: bb_min.z1 = bbox.z1
self.bbox = bb_min
def setup(self, shapes):
self._bbox(shapes)
bbox = self.bbox
wx = bbox.x1 - bbox.x0
wy = bbox.y1 - bbox.y0
wz = bbox.z1 - bbox.z0
multiplier = 1.3 # about 8 times more cells than objects if multiplier is 2 TODO test this!
s = math.pow(wx * wy * wz / float(len(shapes)), 0.333333)
self.nx = nx = int(multiplier * wx / s + 1)
if nx > 192: self.nx = nx = 192
self.ny = ny = int(multiplier * wy / s + 1)
if ny > 192: self.ny = ny = 192
self.nz = nz = int(multiplier * wz / s + 1)
if nz > 192: self.nz = nz = 192
num_cells = int(nx * ny * nz)
self.cells = cells = [] # we need to initialize empty lists
for c in range(num_cells):
cells.append([])
# this is requierd for creation of array buffer
max_len = 0 # max length in one cell
num_arrays = 0
num_objects = 0
nx1 = nx - 1
ny1 = ny - 1
nz1 = nz - 1
nxwx = float(nx) / wx
nywy = float(ny) / wy
nzwz = float(nz) / wz
nxny = nx * ny
for shape in shapes:
if hasattr(shape, "_grid"):
bbox1 = shape._grid.bbox
else:
bbox1 = shape.bbox()
ixmin = int(clamp((bbox1.x0 - bbox.x0) * nxwx, 0, nx1))
iymin = int(clamp((bbox1.y0 - bbox.y0) * nywy, 0, ny1))
izmin = int(clamp((bbox1.z0 - bbox.z0) * nzwz, 0, nz1))
ixmax = int(clamp((bbox1.x1 - bbox.x0) * nxwx, 0, nx1))
iymax = int(clamp((bbox1.y1 - bbox.y0) * nywy, 0, ny1))
izmax = int(clamp((bbox1.z1 - bbox.z0) * nzwz, 0, nz1))
for k in range(izmin, izmax+1):
for j in range(iymin, iymax+1):
for i in range(ixmin, ixmax+1):
idx = i + nx * j + nx * ny * k
cells[idx].append(shape)
duzina = len(self.cells[idx])
num_objects += 1
if duzina == 1: num_arrays += 1
if duzina > max_len: max_len = duzina
self.max_length_in_cell = max_len
self.num_objects = num_objects
self.num_arrays = num_arrays
#linear array looks like nobjects:{ptr_obj, ptr_func}
def _create_grid(self, runtimes, intersector, visibility=False):
# we must alocate memory for 3d grid and array
cells = self.cells
nx = self.nx
ny = self.ny
nz = self.nz
num_cells = int(nx * ny * nz)
if visibility:
self.asm_cells_b = x86.MemData(num_cells*4)
self.asm_cells_b.fill()
self.lin_arrays_b = {}
else:
self.asm_cells = x86.MemData(num_cells*4)
self.asm_cells.fill()
self.lin_arrays = {}
for r in runtimes:
if visibility:
self.lin_arrays_b[r] = x86.MemData(self.num_arrays*4 + self.num_objects * 8 + 4) #we start of index[1] that why extra four bytes
x86.SetUInt32(self.lin_arrays_b[r].ptr(), 0, 0)
else:
self.lin_arrays_b = {}
self.lin_arrays[r] = x86.MemData(self.num_arrays*4 + self.num_objects * 8 + 4) #we start of index[1] that why extra four bytes
x86.SetUInt32(self.lin_arrays[r].ptr(), 0, 0)
offset = 4 # offset is in bytes
if visibility:
addr_cells = self.asm_cells_b.ptr()
else:
addr_cells = self.asm_cells.ptr()
for k in range(nz):
for j in range(ny):
for i in range(nx):
idx = i + nx * j + nx * ny * k
cell = cells[idx]
if len(cell) == 0:
pass
else:
adr = addr_cells + idx * 4
x86.SetUInt32(adr, offset, 0)
for r in runtimes:
if visibility:
addr_arr = self.lin_arrays_b[r].ptr()
else:
addr_arr = self.lin_arrays[r].ptr()
adr = addr_arr + offset
num = len(cell)
x86.SetUInt32(adr, num, 0)
x86.SetUInt32(adr+4, self._get_ptrs_obj_func(cell, r, intersector, visibility), 0)
offset = offset + len(cell) * 8 + 4
def _get_ptrs_obj_func(self, shapes, runtime, intersector, visibility):
adrs = []
for s in shapes:
adrs.append(intersector.address_off(s))
if visibility:
adrs.append(runtime.address_label(s.name() + "_intersect_bool"))
else:
adrs.append(runtime.address_label(s.name() + "_intersect"))
return tuple(adrs)
def _load_isect_functions(self, runtimes, assembler, structures, dyn_arrays, visibility=False):
for key, value in dyn_arrays.items():
if visibility:
key.isect_asm_b(runtimes, key.name() + "_intersect_bool", assembler, structures)
else:
key.isect_asm(runtimes, key.name() + "_intersect", assembler, structures)
def isect_shapes(self, ray, shapes):
min_dist = 999999.0
hit_point = False
for s in shapes:
hit = s.isect(ray, min_dist)
if hit is False: continue
if hit.t < min_dist:
min_dist = hit.t
hit_point = hit
return hit_point
def isect_b(self, ray, min_dist=999999.0): #ray direction must be normalized
hp = self.isect(ray, min_dist)
if hp:
return hp.t
return hp
def isect(self, ray, min_dist = 999999.0):
ox = ray.origin.x
oy = ray.origin.y
oz = ray.origin.z
dx = ray.dir.x
dy = ray.dir.y
dz = ray.dir.z
x0 = self.bbox.x0
y0 = self.bbox.y0
z0 = self.bbox.z0
x1 = self.bbox.x1
y1 = self.bbox.y1
z1 = self.bbox.z1
if dx == 0.0: dx = 0.00001
a = 1.0 / dx
if a >= 0:
tx_min = (x0 - ox) * a
tx_max = (x1 - ox) * a
else:
tx_min = (x1 - ox) * a
tx_max = (x0 - ox) * a
if dy == 0.0: dy = 0.00001
b = 1.0 / dy
if b >= 0:
ty_min = (y0 - oy) * b
ty_max = (y1 - oy) * b
else:
ty_min = (y1 - oy) * b
ty_max = (y0 - oy) * b
if dz == 0.0: dz = 0.00001
c = 1.0 / dz
if c >= 0:
tz_min = (z0 - oz) * c
tz_max = (z1 - oz) * c
else:
tz_min = (z1 - oz) * c
tz_max = (z0 - oz) * c
if tx_min > ty_min: t0 = tx_min
else: t0 = ty_min
if tz_min > t0: t0 = tz_min
if tx_max < ty_max: t1 = tx_max
else: t1 = ty_max
if tz_max < t1: t1 = tz_max
if t0 > t1:
return False #no intersection ocur
if self.bbox.inside(ray.origin):
ix = int(clamp((ox - x0) * self.nx / (x1 - x0), 0, self.nx - 1))
iy = int(clamp((oy - y0) * self.ny / (y1 - y0), 0, self.ny - 1))
iz = int(clamp((oz - z0) * self.nz / (z1 - z0), 0, self.nz - 1))
else:
p = ray.origin + ray.dir * t0
ix = int(clamp((p.x - x0) * self.nx / (x1 - x0), 0, self.nx - 1))
iy = int(clamp((p.y - y0) * self.ny / (y1 - y0), 0, self.ny - 1))
iz = int(clamp((p.z - z0) * self.nz / (z1 - z0), 0, self.nz - 1))
dtx = (tx_max - tx_min) / self.nx
dty = (ty_max - ty_min) / self.ny
dtz = (tz_max - tz_min) / self.nz
if dx > 0.0:
tx_next = tx_min + (ix + 1) * dtx
ix_step = 1
ix_stop = self.nx
else:
tx_next = tx_min + (self.nx - ix) * dtx
ix_step = -1
ix_stop = -1
if dx == 0.0:
tx_next = 9999999.9999
ix_step = -1
ix_stop = -1
if dy > 0.0:
ty_next = ty_min + (iy + 1) * dty
iy_step = 1
iy_stop = self.ny
else:
ty_next = ty_min + (self.ny - iy) * dty
iy_step = -1
iy_stop = -1
if dy == 0.0:
ty_next = 9999999.9999
iy_step = -1
iy_stop = -1
if dz > 0.0:
tz_next = tz_min + (iz + 1) * dtz
iz_step = 1
iz_stop = self.nz
else:
tz_next = tz_min + (self.nz - iz) * dtz
iz_step = -1
iz_stop = -1
if dz == 0.0:
tz_next = 9999999.9999
iz_step = -1
iz_stop = -1
while True:
cell = self.cells[ix + self.nx * iy + self.nx * self.ny * iz]
if tx_next < ty_next and tx_next < tz_next:
hp = self.isect_shapes(ray, cell)
if hp and hp.t < tx_next:
return hp
tx_next += dtx
ix += ix_step
if ix == ix_stop: return False
else:
if ty_next < tz_next:
hp = self.isect_shapes(ray, cell)
if hp and hp.t < ty_next:
return hp
ty_next += dty
iy += iy_step
if iy == iy_stop: return False
else:
hp = self.isect_shapes(ray, cell)
if hp and hp.t < tz_next:
return hp
tz_next += dtz
iz += iz_step
if iz == iz_stop: return False
return False
def isect_asm_b(self, runtimes, label, assembler, structures, dyn_arrays, intersector):
self._load_isect_functions(runtimes, assembler, structures, dyn_arrays, True)
self._create_grid(runtimes, intersector, True)
label_ray_shapes = "ray_objects_intersections_bool"
self.isect_ray_shapes_b(runtimes, label_ray_shapes, assembler, structures)
code = """
#DATA
"""
code += structures.structs(('ray',)) + """
uint32 ray_ptr
float one[4] = 1.0, 1.0, 1.0, 0.0
float zero[4] = 0.0, 0.0, 0.0, 0.0
uint32 ones = 0xFFFFFFFF
int32 ixyz[4]
float dtxyz[4]
int32 ix_step, iy_step, iz_step
int32 ix_stop, iy_stop, iz_stop
float tx_next, ty_next, tz_next
float khuge = 999999.999
float minimum_distance = 999999.0
int32 n[4]
uint32 grid_ptr
uint32 arr_ptr
float bbox_min[4]
float bbox_max[4]
float nbox_width[4]
float n_1[4]
float one_overn[4]
uint32 grid_size[4]
uint32 temp_avx
#CODE
"""
code += " global " + label + ":\n" + """
mov dword [ray_ptr], eax
mov ebp, dword [khuge]
mov dword [minimum_distance], ebp
"""
#TODO --- think if ray direction has zero component -- put some epsilon!!!
code += """
macro eq128 xmm0 = one / eax.ray.dir
"""
code += "macro eq128 xmm1 = bbox_min\n"
code += "macro eq128 xmm2 = bbox_max\n"
code += """
macro eq128 xmm1 = xmm1 - eax.ray.origin
macro eq128 xmm1 = xmm1 * xmm0
macro eq128 xmm2 = xmm2 - eax.ray.origin
macro eq128 xmm2 = xmm2 * xmm0
macro eq128 xmm3 = xmm1
macro eq128 xmm4 = xmm2
; tx_min, ty_min, tz_min
macro call minps xmm3, xmm2
; tx_max, ty_max, tz_max
macro call maxps xmm4, xmm1
macro broadcast xmm5 = xmm3[1]
macro call maxss xmm5, xmm3
macro broadcast xmm6 = xmm3[2]
;t0
macro call maxss xmm6, xmm5
macro broadcast xmm5 = xmm4[1]
macro call minss xmm5, xmm4
macro broadcast xmm7 = xmm4[2]
;t1
macro call minss xmm7, xmm5
macro if xmm7 > xmm6 goto next_section
mov eax, 0 ;no intersection ocur
ret
;now we must check this if self.bbox.inside(ray.origin)
next_section:
macro eq128 xmm0 = eax.ray.origin
macro eq128 xmm2 = xmm0
"""
code += "macro eq128 xmm1 = bbox_max\n"
code += """
; le - less or equal (xmm0 <= xmm1)
macro call cmpps xmm0, xmm1, 2
"""
code += "macro eq128 xmm5 = bbox_min\n"
code += """
macro call cmpps xmm5, xmm2, 2
macro call andps xmm0, xmm5
macro broadcast xmm1 = xmm0[1]
macro broadcast xmm2 = xmm0[2]
macro call andps xmm0, xmm1
macro call andps xmm0, xmm2
"""
if proc.AVX:
code += "macro eq32 temp_avx = xmm0 {xmm0} \n"
code += "mov ecx, dword [temp_avx] \n"
else:
code += "movd ecx, xmm0 \n"
code += """
cmp ecx, dword [ones]
je point_inside ; point is inside bbox
macro broadcast xmm6 = xmm6[0]
macro eq128 xmm0 = eax.ray.dir * xmm6 + eax.ray.origin
jmp next_section2
point_inside:
macro eq128 xmm0 = eax.ray.origin
next_section2:
"""
code += "macro eq128 xmm0 = xmm0 - bbox_min\n"
code += "macro eq128 xmm0 = xmm0 * nbox_width\n"
code += "macro eq128 xmm2 = n_1\n"
code += """
macro call zero xmm1
macro call minps xmm0, xmm2
macro call maxps xmm0, xmm1
; ix, iy, iz
"""
if proc.AVX:
code += """
vcvttps2dq xmm1, xmm0
vcvtdq2ps xmm0, xmm1
"""
else:
code += """
cvttps2dq xmm1, xmm0
cvtdq2ps xmm0, xmm1
"""
code += """
macro eq128 ixyz = xmm1 {xmm7}
macro eq128 xmm5 = xmm4
macro eq128 xmm5 = xmm5 - xmm3
"""
code += "macro eq128 xmm5 = xmm5 * one_overn\n"
code += """
; xmm5 = dtx, dty, dtz
macro eq128 dtxyz = xmm5 {xmm7}
;tx_next = tx_min + (ix + 1) * dtx
;tx_next = tx_min + (self.nx - ix) * dtx
macro eq128 xmm6 = one
macro eq128 xmm6 = xmm6 + xmm0
macro eq128 xmm6 = xmm6 * xmm5
macro eq128 xmm6 = xmm6 + xmm3
"""
code += "macro eq128 xmm7 = grid_size\n"
code += """
macro eq128 n = xmm7 {xmm0}
macro eq128 xmm2 = xmm7
macro call int_to_float xmm7, xmm7
macro eq128 xmm7 = xmm7 - xmm0
macro eq128 xmm7 = xmm7 * xmm5
macro eq128 xmm7 = xmm7 + xmm3
macro eq128 xmm0 = eax.ray.dir
"""
if proc.AVX:
code += "vcomiss xmm0, dword [zero]\n"
else:
code += "comiss xmm0, dword [zero]\n"
code += """
jz _equal1
jnc _greater1
mov dword [ix_step], -1
mov dword [ix_stop], -1
macro eq32 tx_next = xmm7 {xmm0}
jmp _next_dx
_greater1:
mov dword [ix_step], 1
macro eq32 ix_stop = xmm2 {xmm0}
macro eq32 tx_next = xmm6 {xmm0}
jmp _next_dx
_equal1:
mov ebp, dword [khuge]
mov dword [ix_step], -1
mov dword [ix_stop], -1
mov dword [tx_next], ebp
_next_dx:
macro broadcast xmm1 = xmm0[1]
"""
if proc.AVX:
code += "vcomiss xmm1, dword [zero]\n"
else:
code += "comiss xmm1, dword [zero]\n"
code += """
jz _equal2
jnc _greater2
mov dword [iy_step], -1
mov dword [iy_stop], -1
macro broadcast xmm5 = xmm7[1]
macro eq32 ty_next = xmm5 {xmm0}
jmp _next_dx2
_greater2:
mov dword [iy_step], 1
macro broadcast xmm4 = xmm2[1]
macro eq32 iy_stop = xmm4 {xmm0}
macro broadcast xmm5 = xmm6[1]
macro eq32 ty_next = xmm5 {xmm0}
jmp _next_dx2
_equal2:
mov ebp, dword [khuge]
mov dword [iy_step], -1
mov dword [iy_stop], -1
mov dword [ty_next], ebp
_next_dx2:
macro broadcast xmm1 = xmm0[2]
"""
if proc.AVX:
code += "vcomiss xmm1, dword [zero]\n"
else:
code += "comiss xmm1, dword [zero]\n"
code += """
jz _equal3
jnc _greater3
mov dword [iz_step], -1
mov dword [iz_stop], -1
macro broadcast xmm5 = xmm7[2]
macro eq32 tz_next = xmm5 {xmm0}
jmp _next_dx3
_greater3:
mov dword [iz_step], 1
macro broadcast xmm4 = xmm2[2]
macro eq32 iz_stop = xmm4 {xmm0}
macro broadcast xmm5 = xmm6[2]
macro eq32 tz_next = xmm5 {xmm0}
jmp _next_dx3
_equal3:
mov ebp, dword [khuge]
mov dword [iz_step], -1
mov dword [iz_stop], -1
mov dword [tz_next], ebp
_next_dx3:
_traverse:
;cell = self.cells[ix + self.nx * iy + self.nx * self.ny * iz]
mov eax, dword [n] ;self.nx
mov ebx, dword [n+4] ;self.ny
imul ebx, dword [ixyz+8]
imul ebx, eax
imul eax, dword [ixyz+4]
add eax, ebx
add eax, dword [ixyz] ; in eax we have index
imul eax, eax, 4 ; offset in bytes
;if tx_next < ty_next and tx_next < tz_next:
macro eq32 xmm0 = tx_next
macro if xmm0 > ty_next goto _next_part
macro if xmm0 > tz_next goto _next_part
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < tx_next: return hp
cmp eax, 0
je _next_calc
macro if xmm0 < tx_next goto _return_hp
_next_calc:
macro eq32 xmm0 = dtxyz + tx_next
macro eq32 tx_next = xmm0 {xmm7}
mov eax, dword [ix_step]
mov ebx, dword [ix_stop]
mov ecx, dword [ixyz]
add ecx, eax
mov dword [ixyz], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_next_part:
;if ty_next < tz_next:
macro eq32 xmm0 = ty_next
macro if xmm0 > tz_next goto _next_part2
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc2
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < ty_next: return hp
cmp eax, 0
je _next_calc2
macro if xmm0 < ty_next goto _return_hp
_next_calc2:
macro eq128 xmm0 = dtxyz
macro broadcast xmm0 = xmm0[1]
macro eq32 xmm0 = xmm0 + ty_next
macro eq32 ty_next = xmm0 {xmm7}
mov eax, dword [iy_step]
mov ebx, dword [iy_stop]
mov ecx, dword [ixyz+4]
add ecx, eax
mov dword [ixyz+4], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_next_part2:
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc3
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < tz_next: return hp
cmp eax, 0
je _next_calc3
macro if xmm0 < tz_next goto _return_hp
_next_calc3:
macro eq128 xmm0 = dtxyz
macro broadcast xmm0 = xmm0[2]
macro eq32 xmm0 = xmm0 + tz_next
macro eq32 tz_next = xmm0 {xmm7}
mov eax, dword [iz_step]
mov ebx, dword [iz_stop]
mov ecx, dword [ixyz+8]
add ecx, eax
mov dword [ixyz+8], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_return_hp:
mov eax, 1
ret
_end_isect:
mov eax, 0
ret
"""
mc = assembler.assemble(code, True)
#mc.print_machine_code()
name = "ray_scene_isect" + str(abs(hash(self)))
for r in runtimes:
if not r.global_exists(label):
ds = r.load(name, mc)
bbox = self.bbox
ds["grid_ptr"] = self.asm_cells_b.ptr()
ds['arr_ptr'] = self.lin_arrays_b[r].ptr()
ds["bbox_min"] = (bbox.x0, bbox.y0, bbox.z0, 0.0)
ds["bbox_max"] = (bbox.x1, bbox.y1, bbox.z1, 0.0)
nboxx = float(self.nx / (bbox.x1 - bbox.x0))
nboxy = float(self.ny / (bbox.y1 - bbox.y0))
nboxz = float(self.nz / (bbox.z1 - bbox.z0))
ds["nbox_width"] = (nboxx, nboxy, nboxz, 0.0)
ds["n_1"] = (float(self.nx-1), float(self.ny-1), float(self.nz-1), 0.0)
ds["one_overn"] = (1.0 / self.nx, 1.0 / self.ny, 1.0 / self.nz, 0.0)
ds["grid_size"] = (self.nx, self.ny, self.nz, 0)
# eax - pointer to ray
# ebx - pointer to hitpoint
def isect_asm(self, runtimes, label, assembler, structures, dyn_arrays, intersector):
self._load_isect_functions(runtimes, assembler, structures, dyn_arrays)
self._create_grid(runtimes, intersector)
label_ray_shapes = "ray_objects_intersections"
self.isect_ray_shapes(runtimes, label_ray_shapes, assembler, structures)
code = """
#DATA
"""
code += structures.structs(('ray', 'hitpoint')) + """
uint32 hp_ptr, ray_ptr
float one[4] = 1.0, 1.0, 1.0, 0.0
float zero[4] = 0.0, 0.0, 0.0, 0.0
uint32 ones = 0xFFFFFFFF
int32 ixyz[4]
float dtxyz[4]
int32 ix_step, iy_step, iz_step
int32 ix_stop, iy_stop, iz_stop
float tx_next, ty_next, tz_next
float khuge = 999999.999
float minimum_distance = 999999.0
int32 n[4]
uint32 grid_ptr
uint32 arr_ptr
float bbox_min[4]
float bbox_max[4]
float nbox_width[4]
float n_1[4]
float one_overn[4]
uint32 grid_size[4]
uint32 temp_avx
#CODE
"""
code += " global " + label + ":\n" + """
mov dword [hp_ptr], ebx
mov dword [ray_ptr], eax
mov ebp, dword [khuge]
mov dword [minimum_distance], ebp
"""
#TODO --- think if ray direction has zero component -- put some epsilon!!!
code += """
macro eq128 xmm0 = one / eax.ray.dir
"""
code += "macro eq128 xmm1 = bbox_min\n"
code += "macro eq128 xmm2 = bbox_max\n"
code += """
macro eq128 xmm1 = xmm1 - eax.ray.origin
macro eq128 xmm1 = xmm1 * xmm0
macro eq128 xmm2 = xmm2 - eax.ray.origin
macro eq128 xmm2 = xmm2 * xmm0
macro eq128 xmm3 = xmm1
macro eq128 xmm4 = xmm2
; tx_min, ty_min, tz_min
macro call minps xmm3, xmm2
; tx_max, ty_max, tz_max
macro call maxps xmm4, xmm1
macro broadcast xmm5 = xmm3[1]
macro call maxss xmm5, xmm3
macro broadcast xmm6 = xmm3[2]
;t0
macro call maxss xmm6, xmm5
macro broadcast xmm5 = xmm4[1]
macro call minss xmm5, xmm4
macro broadcast xmm7 = xmm4[2]
;t1
macro call minss xmm7, xmm5
macro if xmm7 > xmm6 goto next_section
mov eax, 0 ;no intersection ocur
ret
;now we must check this if self.bbox.inside(ray.origin)
next_section:
macro eq128 xmm0 = eax.ray.origin
macro eq128 xmm2 = xmm0
"""
code += "macro eq128 xmm1 = bbox_max\n"
code += """
; le - less or equal (xmm0 <= xmm1)
macro call cmpps xmm0, xmm1, 2
"""
code += "macro eq128 xmm5 = bbox_min\n"
code += """
macro call cmpps xmm5, xmm2, 2
macro call andps xmm0, xmm5
macro broadcast xmm1 = xmm0[1]
macro broadcast xmm2 = xmm0[2]
macro call andps xmm0, xmm1
macro call andps xmm0, xmm2
"""
if proc.AVX:
code += "macro eq32 temp_avx = xmm0 {xmm0} \n"
code += "mov ecx, dword [temp_avx] \n"
else:
code += "movd ecx, xmm0 \n"
code += """
cmp ecx, dword [ones]
je point_inside ; point is inside bbox
macro broadcast xmm6 = xmm6[0]
macro eq128 xmm0 = eax.ray.dir * xmm6 + eax.ray.origin
jmp next_section2
point_inside:
macro eq128 xmm0 = eax.ray.origin
next_section2:
"""
code += "macro eq128 xmm0 = xmm0 - bbox_min\n"
code += "macro eq128 xmm0 = xmm0 * nbox_width\n"
code += "macro eq128 xmm2 = n_1\n"
code += """
macro call zero xmm1
macro call minps xmm0, xmm2
macro call maxps xmm0, xmm1
; ix, iy, iz
"""
if proc.AVX:
code += """
vcvttps2dq xmm1, xmm0
vcvtdq2ps xmm0, xmm1
"""
else:
code += """
cvttps2dq xmm1, xmm0
cvtdq2ps xmm0, xmm1
"""
code += """
macro eq128 ixyz = xmm1 {xmm7}
macro eq128 xmm5 = xmm4
macro eq128 xmm5 = xmm5 - xmm3
"""
code += "macro eq128 xmm5 = xmm5 * one_overn\n"
code += """
; xmm5 = dtx, dty, dtz
macro eq128 dtxyz = xmm5 {xmm7}
;tx_next = tx_min + (ix + 1) * dtx
;tx_next = tx_min + (self.nx - ix) * dtx
macro eq128 xmm6 = one
macro eq128 xmm6 = xmm6 + xmm0
macro eq128 xmm6 = xmm6 * xmm5
macro eq128 xmm6 = xmm6 + xmm3
"""
code += "macro eq128 xmm7 = grid_size\n"
code += """
macro eq128 n = xmm7 {xmm0}
macro eq128 xmm2 = xmm7
macro call int_to_float xmm7, xmm7
macro eq128 xmm7 = xmm7 - xmm0
macro eq128 xmm7 = xmm7 * xmm5
macro eq128 xmm7 = xmm7 + xmm3
macro eq128 xmm0 = eax.ray.dir
"""
if proc.AVX:
code += "vcomiss xmm0, dword [zero]\n"
else:
code += "comiss xmm0, dword [zero]\n"
code += """
jz _equal1
jnc _greater1
mov dword [ix_step], -1
mov dword [ix_stop], -1
macro eq32 tx_next = xmm7 {xmm0}
jmp _next_dx
_greater1:
mov dword [ix_step], 1
macro eq32 ix_stop = xmm2 {xmm0}
macro eq32 tx_next = xmm6 {xmm0}
jmp _next_dx
_equal1:
mov ebp, dword [khuge]
mov dword [ix_step], -1
mov dword [ix_stop], -1
mov dword [tx_next], ebp
_next_dx:
macro broadcast xmm1 = xmm0[1]
"""
if proc.AVX:
code += "vcomiss xmm1, dword [zero]\n"
else:
code += "comiss xmm1, dword [zero]\n"
code += """
jz _equal2
jnc _greater2
mov dword [iy_step], -1
mov dword [iy_stop], -1
macro broadcast xmm5 = xmm7[1]
macro eq32 ty_next = xmm5 {xmm0}
jmp _next_dx2
_greater2:
mov dword [iy_step], 1
macro broadcast xmm4 = xmm2[1]
macro eq32 iy_stop = xmm4 {xmm0}
macro broadcast xmm5 = xmm6[1]
macro eq32 ty_next = xmm5 {xmm0}
jmp _next_dx2
_equal2:
mov ebp, dword [khuge]
mov dword [iy_step], -1
mov dword [iy_stop], -1
mov dword [ty_next], ebp
_next_dx2:
macro broadcast xmm1 = xmm0[2]
"""
if proc.AVX:
code += "vcomiss xmm1, dword [zero]\n"
else:
code += "comiss xmm1, dword [zero]\n"
code += """
jz _equal3
jnc _greater3
mov dword [iz_step], -1
mov dword [iz_stop], -1
macro broadcast xmm5 = xmm7[2]
macro eq32 tz_next = xmm5 {xmm0}
jmp _next_dx3
_greater3:
mov dword [iz_step], 1
macro broadcast xmm4 = xmm2[2]
macro eq32 iz_stop = xmm4 {xmm0}
macro broadcast xmm5 = xmm6[2]
macro eq32 tz_next = xmm5 {xmm0}
jmp _next_dx3
_equal3:
mov ebp, dword [khuge]
mov dword [iz_step], -1
mov dword [iz_stop], -1
mov dword [tz_next], ebp
_next_dx3:
_traverse:
;cell = self.cells[ix + self.nx * iy + self.nx * self.ny * iz]
mov eax, dword [n] ;self.nx
mov ebx, dword [n+4] ;self.ny
imul ebx, dword [ixyz+8]
imul ebx, eax
imul eax, dword [ixyz+4]
add eax, ebx
add eax, dword [ixyz] ; in eax we have index
imul eax, eax, 4 ; offset in bytes
;if tx_next < ty_next and tx_next < tz_next:
macro eq32 xmm0 = tx_next
macro if xmm0 > ty_next goto _next_part
macro if xmm0 > tz_next goto _next_part
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
mov ebx, dword [hp_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < tx_next: return hp
cmp eax, 0
je _next_calc
mov ebx, dword [hp_ptr]
macro eq32 xmm0 = ebx.hitpoint.t
macro if xmm0 < tx_next goto _return_hp
_next_calc:
macro eq32 xmm0 = dtxyz + tx_next
macro eq32 tx_next = xmm0 {xmm7}
mov eax, dword [ix_step]
mov ebx, dword [ix_stop]
mov ecx, dword [ixyz]
add ecx, eax
mov dword [ixyz], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_next_part:
;if ty_next < tz_next:
macro eq32 xmm0 = ty_next
macro if xmm0 > tz_next goto _next_part2
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc2
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
mov ebx, dword [hp_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < ty_next: return hp
cmp eax, 0
je _next_calc2
mov ebx, dword [hp_ptr]
macro eq32 xmm0 = ebx.hitpoint.t
macro if xmm0 < ty_next goto _return_hp
_next_calc2:
macro eq128 xmm0 = dtxyz
macro broadcast xmm0 = xmm0[1]
macro eq32 xmm0 = xmm0 + ty_next
macro eq32 ty_next = xmm0 {xmm7}
mov eax, dword [iy_step]
mov ebx, dword [iy_stop]
mov ecx, dword [ixyz+4]
add ecx, eax
mov dword [ixyz+4], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_next_part2:
mov ebp, dword [grid_ptr]
add ebp, eax ;address + offset in bytes
mov eax, dword [ebp]
cmp eax, 0 ;empty cell
je _next_calc3
;eax - ray ,ebx = hp, edx - ptr array
mov edx, dword [arr_ptr]
add edx, eax
mov eax, dword [ray_ptr]
mov ebx, dword [hp_ptr]
"""
code += "call " + label_ray_shapes + "\n" + """
;if hp and hp.t < tz_next: return hp
cmp eax, 0
je _next_calc3
mov ebx, dword [hp_ptr]
macro eq32 xmm0 = ebx.hitpoint.t
macro if xmm0 < tz_next goto _return_hp
_next_calc3:
macro eq128 xmm0 = dtxyz
macro broadcast xmm0 = xmm0[2]
macro eq32 xmm0 = xmm0 + tz_next
macro eq32 tz_next = xmm0 {xmm7}
mov eax, dword [iz_step]
mov ebx, dword [iz_stop]
mov ecx, dword [ixyz+8]
add ecx, eax
mov dword [ixyz+8], ecx
cmp ecx, ebx
jne _traverse
mov eax, 0
ret
_return_hp:
mov eax, 1
ret
_end_isect:
mov eax, 0
ret
"""
mc = assembler.assemble(code, True)
#mc.print_machine_code()
name = "ray_scene_isect" + str(abs(hash(self)))
for r in runtimes:
if not r.global_exists(label):
ds = r.load(name, mc)
bbox = self.bbox
ds["grid_ptr"] = self.asm_cells.ptr()
ds['arr_ptr'] = self.lin_arrays[r].ptr()
ds["bbox_min"] = (bbox.x0, bbox.y0, bbox.z0, 0.0)
ds["bbox_max"] = (bbox.x1, bbox.y1, bbox.z1, 0.0)
nboxx = float(self.nx / (bbox.x1 - bbox.x0))
nboxy = float(self.ny / (bbox.y1 - bbox.y0))
nboxz = float(self.nz / (bbox.z1 - bbox.z0))
ds["nbox_width"] = (nboxx, nboxy, nboxz, 0.0)
ds["n_1"] = (float(self.nx-1), float(self.ny-1), float(self.nz-1), 0.0)
ds["one_overn"] = (1.0 / self.nx, 1.0 / self.ny, 1.0 / self.nz, 0.0)
ds["grid_size"] = (self.nx, self.ny, self.nz, 0)
# eax = pointer to ray structure
# ebx = pointer to hitpoint structure
# edx = address in linear grid array --- n:{obj,func}, {obj,func}, ...
def isect_ray_shapes(self, runtimes, label, assembler, structures):
code = """
#DATA
"""
code += structures.structs(('ray', 'hitpoint')) + """
uint32 isect_ocur
float min_dist = 999999.0
float max_dist = 999999.0
uint32 ptr_ray
uint32 ptr_hp
uint32 nobjects
uint32 ptr_objfuncs
#CODE
"""
code += " global " + label + ":\n" + """
mov dword [ptr_ray], eax
mov dword [ptr_hp], ebx
mov esi, dword [edx]
mov dword [nobjects], esi
add edx, 4
mov dword [ptr_objfuncs], edx
mov dword [isect_ocur], 0
mov edi, dword [max_dist]
mov dword [min_dist], edi
_objects_loop:
mov eax, dword [ptr_ray]
mov esi, dword [ptr_objfuncs]
mov ebx, dword [esi]
mov ecx, min_dist
mov edx, dword [ptr_hp]
call dword [esi + 4] ; function pointer
cmp eax, 0
je _next_object
mov dword [isect_ocur], 1
; update distance
mov eax, dword [ptr_hp]
mov ebx, dword [eax + hitpoint.t]
mov dword [min_dist], ebx
_next_object:
add dword [ptr_objfuncs], 8
sub dword [nobjects], 1
jnz _objects_loop
mov eax, dword [isect_ocur]
ret
"""
mc = assembler.assemble(code, True)
#mc.print_machine_code()
name = "ray_objcest_isects" + str(abs(hash(self)))
for r in runtimes:
if not r.global_exists(label):
r.load(name, mc)
# eax = pointer to ray structure
# edx = address in linear grid array --- n:{obj,func}, {obj,func}, ...
def isect_ray_shapes_b(self, runtimes, label, assembler, structures):
code = """
#DATA
"""
code += structures.structs(('ray', 'hitpoint')) + """
uint32 isect_ocur
float min_dist = 999999.0
float max_dist = 999999.0
uint32 ptr_ray
uint32 nobjects
uint32 ptr_objfuncs
#CODE
"""
code += " global " + label + ":\n" + """
mov dword [ptr_ray], eax
mov esi, dword [edx]
mov dword [nobjects], esi
add edx, 4
mov dword [ptr_objfuncs], edx
mov dword [isect_ocur], 0
mov edi, dword [max_dist]
mov dword [min_dist], edi
_objects_loop:
mov eax, dword [ptr_ray]
mov esi, dword [ptr_objfuncs]
mov ebx, dword [esi]
mov ecx, min_dist
call dword [esi + 4] ; function pointer
cmp eax, 0
je _next_object
mov dword [isect_ocur], 1
; update distance
macro eq32 min_dist = xmm0 {xmm7}
_next_object:
add dword [ptr_objfuncs], 8
sub dword [nobjects], 1
jnz _objects_loop
mov eax, dword [isect_ocur]
macro eq32 xmm0 = min_dist
ret
"""
mc = assembler.assemble(code, True)
#mc.print_machine_code()
name = "ray_objcest_isects" + str(abs(hash(self)))
for r in runtimes:
if not r.global_exists(label):
r.load(name, mc)
|
import math
import time
start = time.time()
# fairly verbose solution
def extract_digits(n):
digits = [n%10]
while n//10 != 0 :
n = n//10
digits.append(n%10)
return digits
def factors(n):
result = []
for i in range(2,(n//2)+1):
if i in result:
break
if n%i == 0:
result.extend((i,(n/i)))
# result = list(set(result))
return result
def unique_digits(n):
return (len(n) == len(set(n)))
def is_pandigital(number,product_1,product_2):
digits = [x for x in range(1,10)]
digits_number = extract_digits(number)
digits_product_1 = extract_digits(product_1)
digits_product_2 = extract_digits(product_2)
if (0 in digits_number) or (0 in digits_product_1) or (0 in digits_product_2):
return False
if (unique_digits(i) for i in zip(digits_number,digits_product_1,digits_product_2)): #checking if inputs have unique digits
if len(set(digits_number)^set(digits_product_1)^set(digits_product_2)) == (len(digits_number)+len(digits_product_2)+len(digits_product_1)):
digits = list(set(digits)^set(digits_number)^set(digits_product_1)^set(digits_product_2))
else:
return False
else:
return False
if len(digits) == 0:
return True
solution = 0
for num in range(10000):
factor_number = factors(num)
for i in xrange(0,len(factor_number),2):
if is_pandigital(num,factor_number[i],factor_number[i+1]):
print(num,factor_number[i],factor_number[i+1])
solution += num
break
elapsed = time.time() - start
print ("%s found in %s seconds" % (solution,elapsed))
|
from sklearn.preprocessing import OrdinalEncoder
import category_encoders as ce
import pandas as pd
def encode(train, test):
'''
This function has 2 parts: encoding ordinal and nominal categories.
'''
# Joining both data frames. Have to remove SalePrice from the train df because it is not in the test df
frames = [train, test]
new_df = pd.concat(frames, sort=False)
# Save how many rows train/test have
train_len = len(train.index)
test_len = len(test.index)
# The following are ordinal categorial features
ordinalEnc = ["OverallQual","OverallCond","ExterQual","ExterCond","BsmtQual","BsmtCond","BsmtExposure",\
"BsmtFinType1","HeatingQC","KitchenQual","GarageQual","GarageCond"]
# The following are nominal categorial features
nominalEnc = ["MSZoning","LotShape","LandContour","LotConfig","Neighborhood","Condition1","BldgType",\
"HouseStyle","RoofStyle","Exterior1st","Exterior2nd","MasVnrType","Foundation",\
"CentralAir","Electrical","Functional","GarageType","GarageFinish","PavedDrive","MoSold",\
"SaleType","SaleCondition"]
# The following function maps each category into a number category. If none is found from the list, 0 is assigned
def custom_ordinal_encode(ordEncod):
encoding = {'EX': 5, 'GO': 4, 'AA': 3, 'Avg':2, 'BA':1, \
'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa':2, 'Po':1, \
'Av': 3, 'Mn': 2, 'No': 1, \
'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec':3, 'LwQ':2, 'Unf':1}
return encoding.get(ordEncod, 0)
# Encode ordinal categorical features
for col in ordinalEnc:
encoder_grade = ce.OrdinalEncoder(mapping=[{'col': col, 'mapping': custom_ordinal_encode}], return_df=True)
new_df = encoder_grade.fit_transform(new_df)
# Encode nominal categorical features
enc = OrdinalEncoder()
new_df[nominalEnc] = enc.fit_transform(new_df[nominalEnc])
# Separate train and test df again with the new feature values. Also, drop "SalePrice" from test
train_encoded = new_df.head(train_len)
test_encoded = new_df.tail(test_len)
test_encoded = test_encoded.drop(columns=['SalePrice'])
return train_encoded, test_encoded |
# -*- coding: utf-8 -*-
from project.api.mixins import PaginateMixin, UserAccessMixin
from project.apps.search.models import Page
from rest_framework import mixins, viewsets
from .serializers import PageSerializer, PageDetailSerializer
class PageViewSet(
UserAccessMixin,
PaginateMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
model = Page
queryset = model.objects.all().order_by("-id")
serializer_class = PageSerializer
def list(self, request, *args, **kwargs):
self.serializer_class = PageDetailSerializer
return super(PageViewSet, self).list(request, *args, **kwargs)
|
import jpype
# Start up the JVM with coverage instrumentation
jpype.startJVM("-javaagent:project/coverage/org.jacoco.agent-0.8.5-runtime.jar=destfile=jacoco.exec,includes=org.jpype.*:jpype.*,classdumpdir=dump",
classpath=["native/org.jpype.jar", "test/classes/"])
# Execute some paths
print(jpype.JString("hello"))
# Force a report from jacoco (Not sure why shutdown isn't trigger this)
RT = jpype.JClass("org.jacoco.agent.rt.RT")
agent = RT.getAgent()
print(agent.getSessionId())
agent.dump(False)
# Shutdown the JVM
jpype.shutdownJVM()
|
"""
A gui tool for editing.
Originally was a basic proof-of-concept and a test of the Python API:
The qt integration for ui together with the manually wrapped entity-component data API
and the viewer non-qt event system for mouse events thru the py plugin system.
Later has been developed to be an actually usable editing tool, and currently is the only tool for that for Naali.
TODO (most work is in api additions on the c++ side, then simple usage here):
- local & global movement
- (WIP, needs network event refactoring) sync changes from over the net to the gui dialog: listen to scene objectupdates
(this is needed/nice when someone else is moving the same obj at the same time,
works correctly in slviewer, i.e. the dialogs there update on net updates)
- hilite the selected object
(- list all the objects to allow selection from there)
"""
import rexviewer as r
from circuits import Component
from PythonQt.QtUiTools import QUiLoader
from PythonQt.QtCore import QFile
from conversions import quat_to_euler, euler_to_quat #for euler - quat -euler conversions
from PythonQt.QtGui import QVector3D as Vec
from PythonQt.QtGui import QQuaternion as Quat
try:
window
manipulator
except: #first run
try:
import window
import manipulator
except ImportError, e:
print "couldn't load window and manipulator:", e
else:
window = reload(window)
manipulator = reload(manipulator)
#NOTE: these are not ported yet after using OIS was dropped, so don't work
OIS_KEY_ALT = 256
OIS_KEY_CTRL = 16
OIS_KEY_M = 50
OIS_KEY_S = 31
OIS_KEY_R = 19
OIS_KEY_U = 22
OIS_KEY_D = 32
OIS_KEY_Z = 44
OIS_KEY_ESC = 1
OIS_KEY_DEL = 211
class ObjectEdit(Component):
EVENTHANDLED = False
UPDATE_INTERVAL = 0.05 #how often the networkUpdate will be sent
MANIPULATE_FREEMOVE = 0
MANIPULATE_MOVE = 1
MANIPULATE_SCALE = 2
MANIPULATE_ROTATE = 3
SELECTIONRECT = "pymodules/objectedit/selection.ui"
def __init__(self):
self.sels = []
Component.__init__(self)
self.window = window.ObjectEditWindow(self)
self.resetValues()
self.worldstream = r.getServerConnection()
self.mouse_events = {
#r.LeftMouseClickPressed: self.LeftMousePressed,
r.InWorldClick: self.LeftMousePressed,
r.LeftMouseClickReleased: self.LeftMouseReleased,
r.RightMouseClickPressed: self.RightMousePressed,
r.RightMouseClickReleased: self.RightMouseReleased
}
self.shortcuts = {
#r.PyObjectEditDeselect: self.deselect,
r.PyObjectEditToggleMove: self.window.manipulator_move,#"ALT+M", #move
r.PyObjectEditToggleScale: self.window.manipulator_scale,#"ALT+S" #, #scale
r.Delete: self.deleteObject,
r.Undo: self.undo,
r.PyDuplicateDrag: self.duplicateStart,
r.ObjectLink: self.linkObjects,
r.ObjectUnlink: self.unlinkObjects,
}
self.resetManipulators()
loader = QUiLoader()
selectionfile = QFile(self.SELECTIONRECT)
self.selection_rect = loader.load(selectionfile)
rectprops = r.createUiWidgetProperty(2)
#~ print type(rectprops), dir(rectprops)
#print rectprops.WidgetType
#uiprops.widget_name_ = "Selection Rect"
#uiprops.my_size_ = QSize(width, height) #not needed anymore, uimodule reads it
proxy = r.createUiProxyWidget(self.selection_rect, rectprops)
uism = r.getUiSceneManager()
uism.AddProxyWidget(proxy)
proxy.setWindowFlags(0) #changing it to Qt::Widget
self.selection_rect.setGeometry(0,0,0,0)
self.selection_rect_startpos = None
r.c = self #this is for using objectedit from command.py
def resetValues(self):
self.left_button_down = False
self.right_button_down = False
self.sel_activated = False #to prevent the selection to be moved on the intial click
self.prev_mouse_abs_x = 0
self.prev_mouse_abs_y = 0
self.dragging = False
self.time = 0
self.keypressed = False
self.windowActive = False
self.canmove = False
#self.selection_box = None
self.selection_rect_startpos = None
def resetManipulators(self):
self.manipulatorsInit = False
self.manipulators = {}
self.manipulators[self.MANIPULATE_MOVE] = manipulator.MoveManipulator(self)
self.manipulators[self.MANIPULATE_SCALE] = manipulator.ScaleManipulator(self)
self.manipulators[self.MANIPULATE_FREEMOVE] = manipulator.FreeMoveManipulator(self)
self.manipulators[self.MANIPULATE_ROTATE] = manipulator.RotationManipulator(self)
self.manipulator = self.manipulators[self.MANIPULATE_FREEMOVE]
def baseselect(self, ent):
ent, children = self.parentalCheck(ent)
self.sel_activated = False
self.worldstream.SendObjectSelectPacket(ent.id)
#self.updateSelectionBox(ent)
self.highlight(ent)
self.changeManipulator(self.MANIPULATE_FREEMOVE)
return ent, children
def parentalCheck(self, ent):
exitLoop = False
parent = False
children = []
while(not exitLoop):
qprim = ent.prim
if qprim is not None:
if qprim.ParentId != 0:
#~ r.logInfo("Entity had a parent, lets pick that instead!")
ent = r.getEntity(qprim.ParentId)
else:
#~ r.logInfo("Entity had no parent, maybe it has children?")
children = qprim.GetChildren()
if len(children)>0:
parent = True
exitLoop = True
return ent, children
def select(self, ent):
self.deselect_all()
ent, children = self.baseselect(ent)
self.sels.append(ent)
self.window.selected(ent, False)
self.canmove = True
self.highlightChildren(children)
def multiselect(self, ent):
self.sels.append(ent)
ent, children = self.baseselect(ent)
self.window.selected(ent, True)
self.highlightChildren(children)
def highlightChildren(self, children):
for child_id in children:
child = r.getEntity(int(child_id))
self.window.addToList(child)
self.window.highlightEntityFromList(child)
self.highlight(child)
#self.sels.append(child)
def deselect(self, ent):
self.remove_highlight(ent)
for _ent in self.sels: #need to find the matching id in list 'cause PyEntity instances are not reused yet XXX
if _ent.id == ent.id:
self.sels.remove(_ent)
self.window.deselectSelection(_ent.id)
def deselect_all(self):
if len(self.sels) > 0:
#XXX might need something here?!
for ent in self.sels:
self.remove_highlight(ent)
self.sels = []
#self.hideSelector()
self.hideManipulator() #manipulator
self.prev_mouse_abs_x = 0
self.prev_mouse_abs_y = 0
self.canmove = False
self.window.deselected()
# def updateSelectionBox(self, ent):
# if ent is not None:
# bb = list(ent.boundingbox)
# height = abs(bb[4] - bb[1])
# width = abs(bb[3] - bb[0])
# depth = abs(bb[5] - bb[2])
# self.selection_box.placeable.Position = ent.placeable.Position
# self.selection_box.placeable.Scale = Vec(height, width, depth)#depth, width, height
# self.selection_box.placeable.Orientation = ent.placeable.Orientation
def highlight(self, ent):
try:
ent.highlight
except AttributeError:
ent.createComponent("EC_Highlight")
#print "created a new Highlight component"
h = ent.highlight
#print type(h), h
if not h.IsVisible():
h.Show()
else:
r.logInfo("objectedit.highlight called for an already hilited entity: %d" % ent.id)
def remove_highlight(self, ent):
try:
h = ent.highlight
except AttributeError:
r.logInfo("objectedit.remove_highlight called for a non-hilited entity: %d" % ent.id)
else:
h.Hide()
def changeManipulator(self, id):
#r.logInfo("changing manipulator to " + str(id))
newmanipu = self.manipulators[id]
if newmanipu.NAME != self.manipulator.NAME:
#r.logInfo("was something completely different")
self.manipulator.hideManipulator()
self.manipulator = newmanipu
#ent = self.active
self.manipulator.showManipulator(self.sels)
def hideManipulator(self):
self.manipulator.hideManipulator()
# def hideSelector(self):
# try: #XXX! without this try-except, if something is selected, the viewer will crash on exit
# if self.selection_box is not None:
# self.selection_box.placeable.Scale = Vec(0.0, 0.0, 0.0)
# self.selection_box.placeable.Position = Vec(0.0, 0.0, 0.0)
# except RuntimeError, e:
# r.logDebug("hideSelector failed")
def getSelectedObjectIds(self):
ids = []
for ent in self.sels:
qprim = ent.prim
children = qprim.GetChildren()
for child_id in children:
#child = r.getEntity(int(child_id))
id = int(child_id)
if id not in ids:
ids.append(id)
ids.append(ent.id)
return ids
def linkObjects(self):
ids = self.getSelectedObjectIds()
self.worldstream.SendObjectLinkPacket(ids)
self.deselect_all()
def unlinkObjects(self):
ids = self.getSelectedObjectIds()
self.worldstream.SendObjectDelinkPacket(ids)
self.deselect_all()
def LeftMousePressed(self, mouseinfo):
#r.logDebug("LeftMousePressed") #, mouseinfo, mouseinfo.x, mouseinfo.y
#r.logDebug("point " + str(mouseinfo.x) + "," + str(mouseinfo.y))
self.dragStarted(mouseinfo) #need to call this to enable working dragging
# if self.selection_box is None:
# self.selection_box = r.createEntity("Selection.mesh", -10000)
self.left_button_down = True
results = []
results = r.rayCast(mouseinfo.x, mouseinfo.y)
ent = None
if results is not None and results[0] != 0:
id = results[0]
ent = r.getEntity(id)
if not self.manipulatorsInit:
self.manipulatorsInit = True
for manipulator in self.manipulators.values():
manipulator.initVisuals()
self.manipulator.initManipulation(ent, results)
if ent is not None:
#print "Got entity:", ent, ent.editable
if not self.manipulator.compareIds(ent.id) and ent.editable: #ent.id != self.selection_box.id and
#if self.sel is not ent: #XXX wrappers are not reused - there may now be multiple wrappers for same entity
r.eventhandled = self.EVENTHANDLED
found = False
for entity in self.sels:
if entity.id == ent.id:
found = True
if self.active is None or self.active.id != ent.id: #a diff ent than prev sel was changed
if self.validId(ent.id):
if not found:
self.select(ent)
elif self.active.id == ent.id: #canmove is the check for click and then another click for moving, aka. select first, then start to manipulate
self.canmove = True
else:
self.selection_rect_startpos = (mouseinfo.x, mouseinfo.y)
#print "canmove:", self.canmove
self.canmove = False
self.deselect_all()
def dragStarted(self, mouseinfo):
width, height = r.getScreenSize()
normalized_width = 1/width
normalized_height = 1/height
mouse_abs_x = normalized_width * mouseinfo.x
mouse_abs_y = normalized_height * mouseinfo.y
self.prev_mouse_abs_x = mouse_abs_x
self.prev_mouse_abs_y = mouse_abs_y
def LeftMouseReleased(self, mouseinfo):
self.left_button_down = False
if self.active: #XXX something here?
if self.sel_activated and self.dragging:
for ent in self.sels:
#~ print "LeftMouseReleased, networkUpdate call"
parent, children = self.parentalCheck(ent)
r.networkUpdate(ent.id)
for child in children:
child_id = int(child)
r.networkUpdate(child_id)
self.sel_activated = True
if self.dragging:
self.dragging = False
self.manipulator.stopManipulating()
self.duplicateDragStart = False #XXXchange?
if self.selection_rect_startpos is not None:
self.selection_rect.hide()
rectx, recty, rectwidth, rectheight = self.selectionRectDimensions(mouseinfo)
if rectwidth != 0 and rectheight != 0:
r.logInfo("The selection rect was at: (" +str(rectx) + ", " +str(recty) + ") and size was: (" +str(rectwidth) +", "+str(rectheight)+")")
self.selection_rect.setGeometry(0,0,0,0)
self.selection_rect_startpos = None
def selectionRectDimensions(self, mouseinfo):
rectx = self.selection_rect_startpos[0]
recty = self.selection_rect_startpos[1]
rectwidth = (mouseinfo.x - rectx)
rectheight = (mouseinfo.y - recty)
if rectwidth < 0:
rectx += rectwidth
rectwidth *= -1
if rectheight < 0:
recty += rectheight
rectheight *= -1
return rectx, recty, rectwidth, rectheight
def RightMousePressed(self, mouseinfo):
#r.logInfo("rightmouse down")
if self.windowActive:
self.right_button_down = True
results = []
results = r.rayCast(mouseinfo.x, mouseinfo.y)
ent = None
if results is not None and results[0] != 0:
id = results[0]
ent = r.getEntity(id)
found = False
if ent is not None:
#print "Got entity:", ent.id
for entity in self.sels:
if entity.id == ent.id:
found = True #clicked on an already selected entity
#print "multiselect clicked entity is already in selection"
#if self.active is None or self.active.id != ent.id: #a diff ent than prev sel was changed
if self.validId(ent.id):
if not found:
#print "new ent to add to multiselect found:", ent.id
self.multiselect(ent)
else: #remove this entity which was previously in the selection
self.deselect(ent)
self.canmove = True
#r.logInfo(str(self.sels))
def validId(self, id):
if id != 0 and id > 50: #terrain seems to be 3 and scene objects always big numbers, so > 50 should be good, though randomly created local entities can get over 50...
if id != r.getUserAvatarId(): #add other avatar id's check
if not self.manipulator.compareIds(id): #and id != self.selection_box.id:
return True
return False
def RightMouseReleased(self, mouseinfo):
#r.logInfo("rightmouse up")
self.right_button_down = False
def on_mouseclick(self, click_id, mouseinfo, callback):
if self.windowActive: #XXXnot self.canvas.IsHidden():
if self.mouse_events.has_key(click_id):
self.mouse_events[click_id](mouseinfo)
#~ r.logInfo("on_mouseclick %d %s" % (click_id, self.mouse_events[click_id]))
#r.logInfo("on_mouseclick %d" % (click_id))
def on_mousemove(self, event_id, mouseinfo, callback):
"""for hilighting manipulator parts when hovering over them"""
#print "m"
if self.windowActive:# and event_id == :
#~ print "m"
results = []
results = r.rayCast(mouseinfo.x, mouseinfo.y)
if results is not None and results[0] != 0:
id = results[0]
if self.manipulator.compareIds(id):
self.manipulator.highlight(results)
else:
self.manipulator.resethighlight()
def on_mousedrag(self, move_id, mouseinfo, callback):
"""dragging objects around - now free movement based on view,
dragging different axis etc in the manipulator to be added."""
#print "mousedrag:", move_id, mouseinfo
if self.windowActive:
width, height = r.getScreenSize()
normalized_width = 1/width
normalized_height = 1/height
mouse_abs_x = normalized_width * mouseinfo.x
mouse_abs_y = normalized_height * mouseinfo.y
movedx = mouse_abs_x - self.prev_mouse_abs_x
movedy = mouse_abs_y - self.prev_mouse_abs_y
if self.left_button_down:
if self.selection_rect_startpos is not None:# and self.active is None:
rectx, recty, rectwidth, rectheight = self.selectionRectDimensions(mouseinfo)
self.selection_rect.setGeometry(rectx, recty, rectwidth, rectheight)
self.selection_rect.show() #XXX change?
#r.logInfo("The selection rect was at: (" +str(rectx) + ", " +str(recty) + ") and size was: (" +str(rectwidth) +", "+str(rectheight)+")")
rect = self.selection_rect.rect #0,0 - x, y
rect.translate(mouseinfo.x, mouseinfo.y)
#print rect.left(), rect.top(), rect.right(), rect.bottom()
rend = r.getQRenderer()
hits = rend.FrustumQuery(rect) #the wish
#hits = r.frustumQuery(rect.left(), rect.top(), rect.right(), rect.bottom()) #current workaround
print hits
else:
if self.duplicateDragStart:
for ent in self.sels:
self.worldstream.SendObjectDuplicatePacket(ent.id, ent.prim.UpdateFlags, 0, 0, 0) #nasty hardcoded offset
self.duplicateDragStart = False
ent = self.active
#print "on_mousemove + hold:", mouseinfo
if ent is not None and self.sel_activated and self.canmove:
self.dragging = True
self.manipulator.manipulate(self.sels, movedx, movedy)
self.prev_mouse_abs_x = mouse_abs_x
self.prev_mouse_abs_y = mouse_abs_y
self.window.update_guivals(ent)
def on_input(self, evid, callback):
#print "input", evid
if self.windowActive:
if evid in self.shortcuts:#self.shortcuts.has_key((keycode, keymod)):
self.keypressed = True
self.shortcuts[evid]()
callback(True)
def on_inboundnetwork(self, evid, name, callback):
return False
#print "editgui got an inbound network event:", id, name
def undo(self):
#print "undo clicked"
ent = self.active
if ent is not None:
self.worldstream.SendObjectUndoPacket(ent.prim.FullId)
self.window.update_guivals(ent)
self.modified = False
self.deselect_all()
#~ def redo(self):
#~ #print "redo clicked"
#~ ent = self.sel
#~ if ent is not None:
#~ #print ent.uuid
#~ #worldstream = r.getServerConnection()
#~ self.worldstream.SendObjectRedoPacket(ent.uuid)
#~ #self.sel = []
#~ self.update_guivals()
#~ self.modified = False
def duplicate(self):
#print "duplicate clicked"
#ent = self.active
#if ent is not None:
for ent in self.sels:
self.worldstream.SendObjectDuplicatePacket(ent.id, ent.prim.UpdateFlags, 1, 1, 1) #nasty hardcoded offset
def duplicateStart(self):
self.duplicateDragStart = True
def createObject(self):
avatar_id = r.getUserAvatarId()
avatar = r.getEntity(avatar_id)
pos = avatar.placeable.Position#r.getUserAvatarPos()
start_x = pos.x()
start_y = pos.y()
start_z = pos.z()
end_x = start_x
end_y = start_y
end_z = start_z
r.sendObjectAddPacket(start_x, start_y, start_z, end_x, end_y, end_z)
#XXX change to use worldstream and remove this py func from the hand made api
def deleteObject(self):
if self.active is not None:
for ent in self.sels:
#r.logInfo("deleting " + str(ent.id))
ent, children = self.parentalCheck(ent)
for child_id in children:
child = r.getEntity(int(child_id))
#~ self.window.addToList(child)
#~ print "deleting", child
#~ self.worldstream.SendObjectDeRezPacket(child.id, r.getTrashFolderId())
self.window.objectDeleted(str(child.id))
#~ if len(children) == 0:
self.worldstream.SendObjectDeRezPacket(ent.id, r.getTrashFolderId())
self.window.objectDeleted(str(ent.id))
#~ else:
#~ r.logInfo("trying to delete a parent, need to fix this!")
self.manipulator.hideManipulator()
#self.hideSelector()
self.deselect_all()
self.sels = []
def float_equal(self, a,b):
#print abs(a-b), abs(a-b)<0.01
if abs(a-b)<0.01:
return True
else:
return False
def changepos(self, i, v):
#XXX NOTE / API TODO: exceptions in qt slots (like this) are now eaten silently
#.. apparently they get shown upon viewer exit. must add some qt exc thing somewhere
#print "pos index %i changed to: %f" % (i, v)
ent = self.active
if ent is not None:
qpos = ent.placeable.Position
pos = list((qpos.x(), qpos.y(), qpos.z())) #should probably wrap Vector3, see test_move.py for refactoring notes.
if not self.float_equal(pos[i],v):
pos[i] = v
#converted to list to have it mutable
newpos = Vec(pos[0], pos[1], pos[2])
ent.placeable.Position = newpos
ent.network.Position = newpos
self.manipulator.moveTo(self.sels)
self.modified = True
if not self.dragging:
r.networkUpdate(ent.id)
def changescale(self, i, v):
ent = self.active
if ent is not None:
qscale = ent.placeable.Scale
oldscale = list((qscale.x(), qscale.y(), qscale.z()))
scale = list((qscale.x(), qscale.y(), qscale.z()))
if not self.float_equal(scale[i],v):
scale[i] = v
if self.window.mainTab.scale_lock.checked:
#XXX BUG does wrong thing - the idea was to maintain aspect ratio
diff = scale[i] - oldscale[i]
for index in range(len(scale)):
#print index, scale[index], index == i
if index != i:
scale[index] += diff
ent.placeable.Scale = Vec(scale[0], scale[1], scale[2])
if not self.dragging:
r.networkUpdate(ent.id)
#self.window.update_scalevals(scale)
self.modified = True
#self.updateSelectionBox(ent)
def changerot(self, i, v):
#XXX NOTE / API TODO: exceptions in qt slots (like this) are now eaten silently
#.. apparently they get shown upon viewer exit. must add some qt exc thing somewhere
#print "pos index %i changed to: %f" % (i, v)
ent = self.active
if ent is not None:
qquat = ent.placeable.Orientation
euler = list(quat_to_euler((qquat.x(), qquat.y(), qquat.z(), qquat.scalar())))
if not self.float_equal(euler[i],v):
euler[i] = v
ort = euler_to_quat(euler)
#print euler, ort
#print euler, ort
ort = Quat(ort[3], ort[0], ort[1], ort[2])
ent.placeable.Orientation = ort
ent.network.Orientation = ort
if not self.dragging:
r.networkUpdate(ent.id)
self.modified = True
#self.window.update_rotvals(ort)
#self.selection_box.placeable.Orientation = ort
def getActive(self):
if len(self.sels) > 0:
ent = self.sels[-1]
return ent
return None
active = property(getActive)
def on_exit(self):
r.logInfo("Object Edit exiting...")
self.deselect_all()
self.window.on_exit()
r.logInfo(" ...exit done.")
def on_hide(self, shown):
self.windowActive = shown
if self.windowActive:
self.sels = []
try:
self.manipulator.hideManipulator()
#if self.move_arrows is not None:
#ent = self.move_arrows.id
#is called by qt also when viewer is exiting,
#when the scene (in rexlogic module) is not there anymore.
except RuntimeError, e:
r.logDebug("on_hide: scene not found")
else:
self.deselect_all()
else:
self.deselect_all()
def update(self, time):
#print "here", time
if self.windowActive:
self.time += time
if self.sels:
ent = self.active
if self.time > self.UPDATE_INTERVAL:
try:
#sel_pos = self.selection_box.placeable.Position
arr_pos = self.manipulator.getManipulatorPosition()
ent_pos = ent.placeable.Position
#if sel_pos != ent_pos:
self.time = 0 #XXX NOTE: is this logic correct?
# self.selection_box.placeable.Position = ent_pos
if arr_pos != ent_pos:
self.manipulator.moveTo(self.sels)
except RuntimeError, e:
r.logDebug("update: scene not found")
def on_logout(self, id):
r.logInfo("Object Edit resetting due to Logout.")
self.deselect_all()
self.sels = []
self.selection_box = None
self.resetValues()
self.resetManipulators()
|
from nltk.tokenize import word_tokenize as wt
from nltk.tokenize import sent_tokenize as st
from nltk.util import ngrams
from nltk.tokenize import RegexpTokenizer as rt
text = "Hi Mr. Smith! I'm going to buy some vegetables (tomatoes and cucumbers). From the store, you know?"
#print(wt(text))
#print(st(text))
'''
# ngrams
words = wt(text)
print(list(ngrams(words,4)))
'''
# regexp tokenizer
#cap_tokenizer = rt("[A-Z]\w+") # it's a specification of regexp tokenizer
#print(cap_tokenizer.tokenize(text))
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division
import sys
#sys.path.insert(0, __file__+'/pyBusPirateLite')
sys.path.insert(0, './nfc/pyBusPirateLite')
from pyBusPirateLite.I2C import *
#[0x78 0x0 0xae] [0x78 0 0xd5 0x80] [0x78 0 0x3f] [0x78 0 0xd3 0] [0x78 0 0x40] [0x78 0 0x8d 0x14] [0x78 0 0x20 0] [0x78 0 0xA1] [0x78 0 0xC8] [0x78 0 0xda 0x12] [0x78 0 0x81 0xcf] [0x78 0 0xd9 0xf1] [0x78 0 0xdb 0x40] [0x78 0 0xa4] [0x78 0 0xa6]
#[0x78 0 0xaf][0x78 0 0x21, 0, 127][0x78 0 0x22, 0, 7]
#[0x78 0x40 0:1024]
class ssd1306(object):
def __init__(self, isc):
self.i2c=i2c
assert i2c.BBmode()
assert i2c.enter_I2C()
assert i2c.cfg_pins(PinCfg.POWER)
assert i2c.set_speed(I2CSpeed._100KHZ)
self.reset()
def reset(self):
asdf
def write(data):
i2c.send_start_bit()
i2c.bulk_trans(len(data),data)
i2c.send_stop_bit()
if __name__ == '__main__':
i2c = I2C("/dev/ttyUSB0", 115200)
oled = ssd1306(spi)
i2c.resetBP()
|
from core.queries.sql import (
transform_table_dim_artists,
transform_table_dim_songs,
transform_table_dim_time,
transform_table_dim_users,
transform_table_fact_songplays,
)
from settings.envs import (
DWH_DB_PUBLIC_VAULT,
DWH_DB_RAW_VAULT,
)
transform_data = [
{
"query": transform_table_dim_artists,
"raw_vault": DWH_DB_RAW_VAULT,
"public_vault": DWH_DB_PUBLIC_VAULT,
"raw_table": "raw__song_data",
"public_table": "dim_artists",
},
{
"query": transform_table_dim_songs,
"raw_vault": DWH_DB_RAW_VAULT,
"public_vault": DWH_DB_PUBLIC_VAULT,
"raw_table": "raw__song_data",
"public_table": "dim_songs",
},
{
"query": transform_table_dim_time,
"raw_vault": DWH_DB_RAW_VAULT,
"public_vault": DWH_DB_PUBLIC_VAULT,
"raw_table": "raw__log_data",
"public_table": "dim_time",
},
{
"query": transform_table_dim_users,
"raw_vault": DWH_DB_RAW_VAULT,
"public_vault": DWH_DB_PUBLIC_VAULT,
"raw_table": "raw__log_data",
"public_table": "dim_users",
},
{
"query": transform_table_fact_songplays,
"raw_vault": DWH_DB_RAW_VAULT,
"public_vault": DWH_DB_PUBLIC_VAULT,
"raw_table": ("raw__log_data", "raw__song_data"),
"public_table": "fact_songplays",
},
]
|
import unittest
import main
class TestGame(unittest.TestCase):
def test_input(self):
guess = 5
answer = 5
result = main.run_guess(5, 5)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
#
# Parser for token and entropy files.
# Looks up and records tokens and entropies.
#
# Lance Simmons, November 2016
import csv
import errno
import fnmatch
import os
import random
import sys
import time
# Seed rng
random.seed()
def createDirectory(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main():
# User specifies directory to find token files in with command line arg
if (len(sys.argv) != 2):
print("Please specify directory to search as second argument.")
exit()
# Recursively search directory for needed files
directoryToSearch = sys.argv[1]
tokenFilePaths = []
for root, dirnames, filenames in os.walk(directoryToSearch):
for filename in fnmatch.filter(filenames, '*.code.tokens'):
tokenFilePaths.append(os.path.join(root, filename))
tokenFilePaths.sort()
# Clean up paths on windows
for x in range(0,len(tokenFilePaths)):
# print(tokenFilePaths[x])
# print(tokenFilePaths[x].replace("\\", "/"))
tokenFilePaths[x] = tokenFilePaths[x].replace("\\", "/")
#for entry in tokenFilePaths:
# print(entry)
# time.sleep(3)
# exit()
# Holds the list of buggyLine files
bugFilePaths = []
# Each project file name acts a hash to a list of subdirectories
projectDirectories = {}
# Build dictionary of lines and bug values
# We must have an entropy file for each line
for item in os.listdir(directoryToSearch):
if os.path.isfile(os.path.join(directoryToSearch, item)):
# print("BuggyLineFile: " + item)
bugFilePaths.append(os.path.join(directoryToSearch, item))
else:
# print("ProjectDir: " + item)
# Adding a new entry to the projectDirectories dictionary
projectDirectories[item] = []
tempDir = os.path.join(directoryToSearch, item)
for subDirName in os.listdir(tempDir):
if not os.path.isfile(os.path.join(tempDir, subDirName)):
# print(" SnapshotDir: " + subDirName)
projectDirectories[item].append(subDirName)
# Sanity testing file/dir paths
# print(bugFilePaths)
# exit()
# print(projectDirectories)
# time.sleep(2)
# This is a hash of tokenfiles to line numbers
# Which are in turn hashes to buggy statuses
codeTokenPaths = {}
# go through each bugFile
# assembling hashes relating files to token line numbers to bug status
for bugFile in bugFilePaths:
print(bugFile)
# exit()
fileHandler = open(bugFile, 'r')
skipFirstLineFlag = True
for line in fileHandler:
# skip the first line, as it's only denoting field names
if skipFirstLineFlag:
skipFirstLineFlag = False
continue
# Get the various field values, storing the ones we need
lineValues = line.split(",")
# print(lineValues)
project = lineValues[0][1:-1] # project
# print(project)
snapshot = lineValues[1] # snapshot
# print(snapshot)
filename = lineValues[2][1:-1] # filename (of Java file)
# print(filename)
token_line = lineValues[4] # token_line (number)
# print(token_line)
is_bug = lineValues[7] # is_bug (0 or 1)
# print(is_bug)
# fullTokenFileName will hold the name of the appopriate token file
fullTokenFileName = project + "/" + snapshot + "/" + filename
# replace java file extension
fullTokenFileName = "projects/" + fullTokenFileName[:-4] + "code.tokens"
# print(fullTokenFileName)
# print("\n")
# check to see if the snapshot is being used. If not, continute to next line
if snapshot not in projectDirectories[project]:
# print (project)
# print (snapshot)
# print (projectDirectories[project])
# print ("DIFFERENT!!!!!")
continue
else:
# snapshot of line matches a snapshot we're using
# So map some hashes...
# print("Same.")
# If this file not already in codeTokenPaths, add it
if fullTokenFileName not in codeTokenPaths:
codeTokenPaths[fullTokenFileName] = {}
# Set this line equal to a bug flag
codeTokenPaths[fullTokenFileName][token_line] = is_bug
# print(project)
# print (snapshot)
# print(projectDirectories[project])
# print(fullTokenFileName)
# print(codeTokenPaths[fullTokenFileName]) #lines with bug flags
# print("\n")
# print(token_line)
# print(codeTokenPaths[fullTokenFileName][token_line]) # is_bug
pass
# Sanity check
# Print out files, followed by the hashes of lines to bugflags
# for filePath in codeTokenPaths:
# codeTokenPaths[filePath]
# for subDir in codeTokenPaths[filePath]:
# print(filePath)
# print(codeTokenPaths[filePath])
# print("\n")
#for entry in codeTokenPaths:
# print(entry)
#print("\n\n----\n\n")
#time.sleep(1)
# Lists of names of files to use for training and testing
# These lists are populated from tokenFilePaths, which is filled by
# scanning the given directory for token files
tokenTrainSet = []
tokenTestSet = []
# entropyTrainSet = []
# entropyTestSet = []
# Select 20% of files to be used for training
indicesOfTestSet = random.sample(range(len(tokenFilePaths)), len(tokenFilePaths)/5)
for index in range(0,len(tokenFilePaths)):
if index in indicesOfTestSet:
tokenTestSet.append(tokenFilePaths[index])
else:
tokenTrainSet.append(tokenFilePaths[index])
# createDirectory("processedData")
print("Files in token train set: " + str(len(tokenTrainSet)))
print("Files in token test set: " + str(len(tokenTestSet)))
## TRAINING SET
fileHandlerWriter = open("trainingLinesWithBugIndicators.txt", 'w')
for index in range(0,len(tokenTrainSet)):
fileHandlerToken = open(tokenTrainSet[index], 'r')
pathname = tokenTrainSet[index]
# print(pathname)
tokenLines = fileHandlerToken.readlines()
# entropyLines = fileHandlerEntropy.readlines()
print("Building training set from: " + pathname)
# If this is true, then we have a match
lineLookupFlag = False
if pathname in codeTokenPaths:
lineLookupFlag = True
fileHandlerWriter.write("<START_FILE> 0\n")
for lineIndex in range(0,len(tokenLines)):
tempLine = ""
tempLine = tempLine + tokenLines[lineIndex]
tempLine = tempLine.split('\n')[0]
tempLine = ' '.join(tempLine.split())
# append entropy scores
# tempLine = tempLine + " " + entropyLines[lineIndex].split(',')[1]
# Look up the line's bugginess in the appropriate hash
# If we found a match, resolve the hash and append the bugflag
if lineLookupFlag and (str((lineIndex+1)) in codeTokenPaths[pathname]):
tempLine = tempLine + " " + str(codeTokenPaths[pathname][str(lineIndex+1)])
else:
# No match, so assume the line is not buggy (we have no information about it)
tempLine = tempLine + " 0"
fileHandlerWriter.write(tempLine + "\n")
fileHandlerWriter.write("<END_FILE> 0\n")
## TESTING SET
fileHandlerWriter = open("testingLinesWithBugIndicators.txt", 'w')
for index in range(0,len(tokenTestSet)):
fileHandlerToken = open(tokenTestSet[index], 'r')
pathname = tokenTestSet[index]
# print(pathname)
tokenLines = fileHandlerToken.readlines()
# entropyLines = fileHandlerEntropy.readlines()
print("Building testing set from: " + pathname)
# If this is true, then we have a match
lineLookupFlag = False
if pathname in codeTokenPaths:
lineLookupFlag = True
fileHandlerWriter.write("<START_FILE> 0\n")
for lineIndex in range(0,len(tokenLines)):
tempLine = ""
tempLine = tempLine + tokenLines[lineIndex]
tempLine = tempLine.split('\n')[0]
tempLine = ' '.join(tempLine.split())
# append entropy scores
# tempLine = tempLine + " " + entropyLines[lineIndex].split(',')[1]
# Look up the line's bugginess in the appropriate hash
# If we found a match, resolve the hash and append the bugflag
if lineLookupFlag and (str((lineIndex+1)) in codeTokenPaths[pathname]):
tempLine = tempLine + " " + str(codeTokenPaths[pathname][str(lineIndex+1)])
else:
# No match, so assume the line is not buggy (we have no information about it)
tempLine = tempLine + " 0"
fileHandlerWriter.write(tempLine + "\n")
fileHandlerWriter.write("<END_FILE> 0\n")
print("Generated Training and Test set files.")
exit()
if __name__ == "__main__":
main()
|
from sklearn.ensemble import AdaBoostClassifier
'''
pruning
'''
class Boosting:
def __init__(self):
self.clfs = []
def get_classifer(self, x, y):
b = AdaBoostClassifier(
base_estimator=None,
n_estimators=50,
learning_rate=0.5,
algorithm='SAMME.R',
random_state=23
)
return [(b, 'Boosting', 'boosting_model')]
def save_figures(self, clf):
self.clfs.append(clf)
pass
def plot(self, x_train, y_train, x_test, y_test):
pass |
#收集主机mac地址。提示使用tcpdump -nn -i eth0 port 68 -l
#应用环境,在kickstart集中部署时使用
#coding:UTF8
from subprocess import Popen,PIPE
import time
import sys
import os
def get_data():
# p = Popen('ping 192.168.88.1 -n 10',stdin=PIPE,stdout=PIPE,shell=True)
p = Popen('tcpdump -c1000 -nn -i br0 port 68 -l',stdin=PIPE,stdout=PIPE,stderr=PIPE,shell=True)
data = p.stdout
return data
def write_file(file,lines):
with open(file,'a') as fd:
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
fd.write(lines + ' '+ t +'\n')
def get_mac(file):
lines = []
data = get_data()
while True:
line = data.readline()
if line:
new_line=line.split(',')[1].strip()
if new_line.startswith('Request'):
new = new_line.split()[2]
# lines.append(new)
print(new)
write_file(file,new)
else:
break
if __name__ == '__main__':
try:
file=sys.argv[1]
if os.path.isdir(file):
print "%s Is a directory" %file
sys.exit()
else:
pass
except:
print "%s follow file" %__file__
sys.exit()
get_mac(file)
#python get_mac.py mac.txt
|
"""Test the links.streams module."""
# Builtins
# Packages
from phylline.links.streams import StreamLink
LOWER_EVENTS = ['foo,', 'bar,', 'foobar!']
LOWER_BUFFERS = [event.encode('utf-8') for event in LOWER_EVENTS]
HIGHER_EVENTS = ['Hello,', 'world!']
HIGHER_BUFFERS = [event.encode('utf-8') for event in HIGHER_EVENTS]
HIGHER_STREAM = ''.join(HIGHER_EVENTS).encode('utf-8')
def test_stream_link():
"""Exercise StreamLink's interface."""
print('Testing Stream Link:')
stream_link = StreamLink()
assert repr(stream_link) == '⇌~ StreamLink ~⇌'
for buffer in LOWER_BUFFERS:
stream_link.to_read(buffer)
result = stream_link.read()
assert result == b'foo,bar,foobar!'
for buffer in HIGHER_BUFFERS:
stream_link.write(buffer)
result = stream_link.to_write()
assert result == HIGHER_STREAM
|
from glob import glob
from pathlib import Path
import numpy as np
import SimpleITK as sitk
import snorkel
from skimage import io
from skimage.filters import (threshold_li, threshold_otsu, threshold_sauvola,
threshold_yen)
from skimage.measure import label, regionprops
from skimage.morphology import binary_dilation
from snorkel.labeling import LFApplier, labeling_function
from snorkel.labeling.model import LabelModel
import re
from dataset import io_load_image, io_save_image
def sorting(s): return int(re.findall(r'\d+', s)[-1])
def getLargestCC(segmentation):
segmentation = binary_dilation(segmentation, np.ones((3,3,3)))
labels:np.ndarray = label(segmentation, connectivity=2)
assert( labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
return largestCC
def ignore_black_backgroudn(im):
im = im.flatten()
return im[np.where(im != 0)]
def li_thresholding(im):
im_mod = ignore_black_backgroudn(im)
threshold = threshold_li(im_mod)
return np.array(im > threshold, dtype=np.uint8)
def otsu_thresholding(im):
im_mod = ignore_black_backgroudn(im)
threshold = threshold_otsu(im_mod)
return np.array(im > threshold, dtype=np.uint8)
def yen_thresholding(im):
im_mod = ignore_black_backgroudn(im)
threshold = threshold_yen(im_mod)
return np.array(im > threshold, dtype=np.uint8)
def sauvola_thresholding(im):
threshold = threshold_sauvola(im)
return np.array(im > threshold, dtype=np.uint8)
def static_thresholding(im):
threshold = 15
return np.array(im > threshold, dtype=np.uint8)
class Image():
def __init__(self, labels, shape):
self.labels = labels
self.shape = shape
def labeling_applier(lfs:list, dataset:list, filenames:list, original_images:list = None, save_perfix:str = 'data/ircad_snorkel', log:bool = False):
"""Function to generating label images.
Parameters
----------
lfs -
LFs that this applier executes on examples
dataset -
List of numpy images
filenames -
list of filenames corresponding to dataset numpy images
save_perfix -
folder save path
log -
if true print status information
"""
labeled_images = []
size = 0
for array in dataset:
mul = 1
for e in array.shape:
mul *= e
size += mul
lab_arr = np.zeros((size, len(lfs)), dtype=np.uint8)
if log: print('Prepare arrays', 'size:', size, 'bytes')
index = 0
for array in dataset:
labeled_array = []
for func in lfs:
labeled_array.append(func(array).flatten())
T = np.array(labeled_array).T
lab_arr[index:index+T.shape[0], :] = T
labeled_images.append(Image(T, array.shape))
index += T.shape[0]
#[[1 0 0 1], [0 1 0 1]]
if log: print('Training')
LM = LabelModel(cardinality=2, verbose=True, device='cuda')
LM.fit(lab_arr, seed = 3333, log_freq=1, class_balance=[0.965, 0.035])
if log: print('Predict')
iterator = zip(labeled_images, filenames, range(len(filenames)), range(len(filenames)))
if original_images is not None:
iterator = zip(labeled_images, filenames, range(len(filenames)), original_images)
for array, name, idx, image in iterator:
save_path = str(Path(save_perfix) / name)
if log: print('Image: ' + str(idx + 1) + '/' + str(len(filenames)) + ' Save path: ' + save_path)
im_flat = np.zeros(array.shape, dtype=np.uint8).flatten()
#[[1 0 0 1], [0 1 0 1]]
p = LM.predict(array.labels)
#[[1] [0] [1]...]
p = np.reshape(p, array.shape)
p = getLargestCC(p)
p[p > 0] = 255
new_im = sitk.GetImageFromArray(np.array(p, dtype=np.uint8))
if original_images is not None:
new_im.CopyInformation(image)
writer = sitk.ImageFileWriter()
writer.SetFileName(save_path)
writer.Execute(new_im)
def load_dataset(path:str = 'F:/Deep Learning/Data/vesselness_ircad_ICPR/train/*', im_name:str = 'antiga.nii'):
dataset = list()
filenames = list()
original_images = list()
for dir_path in glob(str(Path(path))):
im_path = Path(dir_path) / im_name
img = sitk.ReadImage(str(im_path))
im = sitk.GetArrayFromImage(img)
dataset.append(im)
original_images.append(img)
filenames.append(Path(dir_path).parts[-1] + '_' + im_name)
return dataset, filenames, original_images
def load_full_dataset(path:str = 'F:/Deep Learning/Data/vesselness_ircad_ICPR/all/*', im_names = ['antiga.nii', 'rorpo.nii']):
dataset = list()
filenames = list()
original_images = list()
for dir_path in glob(str(Path(path))):
for im_name in im_names:
im_path = Path(dir_path) / im_name
img = sitk.ReadImage(str(im_path))
im = sitk.GetArrayFromImage(img)
dataset.append(im)
original_images.append(img)
filenames.append(Path(dir_path).parts[-1] + '_' + im_name)
return dataset, filenames, original_images
#########
#################################################################################
#################################################################################
#################################################################################
def test():
patient = 'maskedLiverIso.nii'
filenames = ['antiga.nii', 'jerman.nii']
labeling_functions = [li_thresholding, otsu_thresholding]
path = 'F:/Deep Learning/Data/snorkel/*'
dataset = []
size = 0
for dir_path in sorted(glob(str(Path(path))), key=sorting):
ip: Path = Path(dir_path) / patient
arr, im = io_load_image(str(ip))
shape = arr.shape
arr = arr.flatten()
labels = []
for fn in filenames:
for func in labeling_functions:
i = Path(dir_path) / fn
array, image = io_load_image(str(i))
array = func(array.flatten())
size += array.shape[-1]
labels.append(array)
dataset.append([im, arr, labels, ip.parts[-2], shape])
array_reshape = (size // len(filenames) // len(labeling_functions), len(filenames) * len(labeling_functions))
print(array_reshape)
lab:np.ndarray = np.zeros((size), dtype='float16').reshape(array_reshape)
print(size, lab.shape)
s = 0
for data in dataset:
_, _, label, _, _ = data
T: np.ndarray = np.array(label).T
si = T.shape[0]
lab[s:s+si, :] = T
s += si
LM: LabelModel = LabelModel(cardinality=2, verbose=True, device='cuda')
LM.fit(lab, seed = 12345, log_freq=1, n_epochs=100, class_balance=[0.985, 0.015])
s = 0
for data in dataset:
im, arr, label, fn, shape = data
print(fn)
T: np.ndarray = np.array(label).T
p:np.ndarray = LM.predict(T)
p = p.reshape(shape)
p = getLargestCC(p)
p[p > 0] = 255
p = np.array(p, dtype='uint8')
io_save_image('temp/' + fn + '.nii', p, im)
if __name__ == '__main__':
test()
exit()
train_images = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
# calc(PATH_NEW = 'F:/Deep Learning/U-Net-3D-IRCAD/data/ircad_snorkel/antiga_3/*')
# exit(1)
dataset, filenames, original_images = load_full_dataset('F:/Deep Learning/Data/vesselness_ircad_ICPR/train/*', im_names=['antiga.nii'])
lfs = [li_thresholding, otsu_thresholding, yen_thresholding]
labeling_applier(lfs, dataset, filenames, original_images, log=True)
#applier = LFApplier(lfs=lfs)
#L_train = applier.apply(dataset)
#print(L_train)
|
from django.urls import path
from .views import GroupView, GetGroupView
urlpatterns = [
path('group', GroupView.as_view()),
path('group/<str:screen_name>', GetGroupView.as_view()),
]
|
import random
def verify_conditions(participants, outcome):
for o in outcome:
participant = o[0]
assignee = o[1]
participant_partner = participants[participant]
# participant does not get themselves nor their partners
if participant == assignee or participant_partner == assignee:
return False
# all participants get an assignee that match with the conditions
return True
def match_participants(participants):
matches_conditions = False
participant_emails = list(participants.keys())
assignee_emails = participant_emails.copy()
outcome = []
while not matches_conditions:
random.shuffle(assignee_emails)
outcome = list(zip(participant_emails, assignee_emails))
matches_conditions = verify_conditions(participants, outcome)
return outcome
|
'''
This is called doc string
Created on May 23, 2020
@author: Admin_2
'''
#=========================================================================
# There are 3 types of methods in python
# 1. instance method - are object related methods
# 2. static method - are general utility methods
# 3. class method - are class related methods
#=========================================================================
class Student:
# cname is a class Variable
cname = "Python Class"
# Constructor Method
def __init__(self, rno, name):
# self is an instance variable
self.rno = rno
self.name = name
# Instance method uses instance variable
def display(self):
print("*" * 25)
print("Roll no. is ", self.rno)
print("Student Name is ", self.name)
@classmethod
def getClassName(self):
print("Class Name is ", self.cname)
@staticmethod
def avgMarks(marks1, marks2):
avg = (marks1 + marks2) / 2
print("Average Score is ", avg)
# calling the Student class methods
s = Student(10, "Durga")
s.display()
s.getClassName()
s.avgMarks(56, 87)
# Other ways of calling Student method
s = Student(11, "Praveen")
s.display()
Student.getClassName()
Student.avgMarks(85, 75) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Callable, Iterable, Sequence
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToSqlOperator(BaseOperator):
"""Load Data from S3 into a SQL Database.
You need to provide a parser function that takes a filename as an input
and returns an iterable of rows
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToSqlOperator`
:param schema: reference to a specific schema in SQL database
:param table: reference to a specific table in SQL database
:param s3_bucket: reference to a specific S3 bucket
:param s3_key: reference to a specific S3 key
:param sql_conn_id: reference to a specific SQL database. Must be of type DBApiHook
:param sql_hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
:param aws_conn_id: reference to a specific S3 / AWS connection
:param column_list: list of column names to use in the insert SQL.
:param commit_every: The maximum number of rows to insert in one
transaction. Set to `0` to insert all rows in one transaction.
:param parser: parser function that takes a filepath as input and returns an iterable.
e.g. to use a CSV parser that yields rows line-by-line, pass the following
function:
.. code-block:: python
def parse_csv(filepath):
import csv
with open(filepath, newline="") as file:
yield from csv.reader(file)
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"schema",
"table",
"column_list",
"sql_conn_id",
)
template_ext: Sequence[str] = ()
ui_color = "#f4a460"
def __init__(
self,
*,
s3_key: str,
s3_bucket: str,
table: str,
parser: Callable[[str], Iterable[Iterable]],
column_list: list[str] | None = None,
commit_every: int = 1000,
schema: str | None = None,
sql_conn_id: str = "sql_default",
sql_hook_params: dict | None = None,
aws_conn_id: str = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.table = table
self.schema = schema
self.aws_conn_id = aws_conn_id
self.sql_conn_id = sql_conn_id
self.column_list = column_list
self.commit_every = commit_every
self.parser = parser
self.sql_hook_params = sql_hook_params
def execute(self, context: Context) -> None:
self.log.info("Loading %s to SQL table %s...", self.s3_key, self.table)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
s3_obj = s3_hook.get_key(key=self.s3_key, bucket_name=self.s3_bucket)
with NamedTemporaryFile() as local_tempfile:
s3_obj.download_fileobj(local_tempfile)
local_tempfile.flush()
local_tempfile.seek(0)
self.db_hook.insert_rows(
table=self.table,
schema=self.schema,
target_fields=self.column_list,
rows=self.parser(local_tempfile.name),
commit_every=self.commit_every,
)
@cached_property
def db_hook(self):
self.log.debug("Get connection for %s", self.sql_conn_id)
conn = BaseHook.get_connection(self.sql_conn_id)
hook = conn.get_hook(hook_params=self.sql_hook_params)
if not callable(getattr(hook, "insert_rows", None)):
raise AirflowException(
"This hook is not supported. The hook class must have an `insert_rows` method."
)
return hook
|
# Exercise 8
# Make a two player Rock, Paper, Scissors game
import random
print("Rock, Paper,Scissors")
loop = True
move = ["rock", "paper", "scissors"]
while loop:
bot = random.choice(move)
p1 = input("What do you choose\n")
p1 = p1.lower()
if p1 == bot:
print("Tie")
if input("Do you want to continue\n") == "yes":
continue
else:
print("Game over")
break
elif (p1 == "rock" and bot == "scissors") or (p1 == "paper" and bot == "rock") or (p1 == "scissors" and bot == "paper"):
print("Player 1 wins")
if input("Do you want to continue\n") == "yes":
continue
else:
print("Game over")
break
elif (p1 == "rock" and bot =="paper") or (p1 =="scissors" and bot == "rock") or (p1 =="paper" and bot =="scissors"):
print("Player 2 Lost")
if input("Do you want to continue\n") == "yes":
continue
else:
print("Game over")
break
else:
print("Try again, invalid entry")
|
# coding=utf8
'''
Given two integer arrays sorted in ascending order and an integer k. Define sum
= a + b, where a is an element from the first array and b is an element from
the second one. Find the kth smallest sum out of all possible sums.
Example
Given [1, 7, 11] and [2, 4, 6].
For k = 3, return 7.
For k = 4, return 9.
For k = 8, return 15.
'''
# heap
class Solution:
# @param {int[]} A an integer arrays sorted in ascending order
# @param {int[]} B an integer arrays sorted in ascending order
# @param {int} k an integer
# @return {int} an integer
def kthSmallestSum(self, A, B, k):
# Write your code here
import heapq
if not A or not B:
return 0
length_a, length_b = len(A), len(B)
heap = []
for i in range(min(k, length_b)):
heapq.heappush(heap, (A[0] + B[i], 0, i))
while k > 1:
min_value, index_a, index_b = heapq.heappop(heap)
if index_a + 1 < length_a:
heapq.heappush(heap, (A[index_a + 1] + B[index_b], index_a + 1, index_b))
k -= 1
return heapq.heappop(head)[0]
|
#coding=utf-8
# @Author: yangenneng
# @Time: 2018-05-09 21:17
# @Abstract:Wolfe Line Search method
from LinearSearchMethods.StepSize.Zoom import zoom
from LinearSearchMethods.StepSize.Interpolation import *
def f(x):
return (x-3)*(x-3)
def f_grad(x):
return 2 * (x - 3)
def f_grad_2(x):
return 2
def WolfeLineSearch(x_k):
alpha_0 = 0
alpha_max = 1
alpha_1 = 0.7
c1 = 1e-4 # c1: Armijo condition
c2 = 0.9 # c2: curvature condition
alpha_pre = alpha_0
alpha_cur = alpha_1
alpha_min = 1e-7
i = 0
eps = 1e-16
while abs(alpha_cur-alpha_pre)>=eps:
phi_alpha_cur = f(x_k + alpha_cur*-(f_grad(x_k)))
phi_alpha_pre = f(x_k + alpha_pre*-(f_grad(x_k)))
phi_alpha_0 = f(x_k)
phi_grad_alpha_0 = f(x_k) * (-f_grad(x_k))
if phi_alpha_cur > phi_alpha_0 + c1 * alpha_cur *phi_grad_alpha_0 or (phi_alpha_cur> phi_alpha_pre and i> 0):
return zoom(x_k,alpha_pre,alpha_cur)
phi_grad_alpha_cur = f(x_k+alpha_cur * (-f_grad(x_k))) * (-f_grad(x_k))
if abs(phi_grad_alpha_cur)<= -c2* phi_grad_alpha_0: # satisfy Wolfe condition
return alpha_cur
if phi_grad_alpha_cur >= 0:
aaa=zoom(x_k,alpha_cur,alpha_max)
return zoom(x_k,alpha_cur,alpha_max)
alpha_new = QuadraticInterpolation(alpha_cur, phi_alpha_cur, phi_alpha_0, phi_grad_alpha_0)
alpha_pre = alpha_cur
alpha_cur = alpha_new
i+=1
return alpha_min
|
class Create_mail:
expected_name = "alex2019hillel@ukr.net"
create_button = "//div[@id='content']/aside/button"
fild_input = "//input[@name='toFieldInput']"
fild_subject = "//input[@name='subject']"
submit_button = "//div[@id='screens']/div/div/div/button" |
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.models import Title
# Source https://bokeh.pydata.org/en/latest/docs/gallery/color_scatter.html
cardata = pd.read_csv('cars-sample.csv')
TOOLS = "hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,"
colors = {'bmw': '#F8766D', 'ford': '#A3A500', 'honda': '#00BF7D',
'mercedes': '#00B0F6', 'toyota': '#E76BF3'}
plot = figure(tools=TOOLS, title = "Weight vs MPG")
plot.add_layout(Title(text="Weight", align="center"), "below")
plot.add_layout(Title(text="MPG", align="center"), "left")
plot.scatter(cardata['Weight'], cardata['MPG'], radius=(cardata['Weight'] / 400) ** 2,
fill_alpha=0.5,
#color=cardata['Manufacturer'].apply(lambda x: colors[x])
#legend = colors.get([cardata['Manufacturer']]),
color=cardata['Color']
)
output_file("car-plot.html", title="Bokeh-Plot Example")
#legend1 = Legend(items=[("bmw", [plot.circle(x=0, y = 0, fill_color = '#F8766D')])], location = (0,0), orientation="vertical")
show(plot)
|
from django.core.management.base import BaseCommand, CommandError
from polls.models import Poll
from django.utils import timezone
class Command(BaseCommand):
args = '<question question ...>'
help = 'Adds a poll with the specified question(s)'
def handle(self, *args, **options):
for question in args:
p = Poll(question=question, pub_date=timezone.now())
p.save()
self.stdout.write('Successfully created poll with question "%s"' % p.question ) |
#coding=utf-8
from django.db import models
class Tag(models.Model):
tag_name = models.CharField(max_length=20)
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.tag_name
class BlogPost(models.Model):
title = models.CharField(max_length=150)
author = models.CharField(max_length=50)
tag = models.ManyToManyField(Tag, blank=True)
summary = models.TextField()
timestamp = models.DateTimeField()
def __unicode__(self):
return u'%s %s %s' % (self.title, self.author, self.timestamp)
class Meta(object):
ordering = ['-timestamp']
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('blog.views.blog_show', args=[str(self.id)])
# 下面的Paragraph、Photo、Code类代表着博客正文中的文字段落、代码以及图片
# 它们各有一个sequence属性,用来表示这些models在博客正文中所出现的顺序。
class Paragraph(models.Model):
discern = "paragraph"
describe = models.CharField(max_length=1000)
sequence = models.PositiveSmallIntegerField()
tag = models.ManyToManyField(BlogPost)
paragraph = models.TextField()
def __unicode__(self):
return self.describe
class Photo(models.Model):
discern = "photo"
sequence = models.PositiveSmallIntegerField()
title = models.CharField(max_length=100)
#image = models.ImageField(upload_to='photos', blank=True)
tag = models.ManyToManyField(BlogPost)
url = models.CharField(max_length=100)
def __unicode__(self):
return self.title
class Code(models.Model):
discern = "code"
sequence = models.PositiveSmallIntegerField()
title = models.CharField(max_length=150)
content = models.TextField()
tag = models.ManyToManyField(BlogPost)
def __unicode__(self):
return self.title
|
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_snmp_agent_cfg
from ydk.providers import NetconfServiceProvider
from ydk.models.ietf import ietf_interfaces
from ydk.services import CRUDService
from ydk.services import NetconfService, Datastore
if __name__ == '__main__':
sp_instance = NetconfServiceProvider(address='sbx-iosxr-mgmt.cisco.com',
port=10000,
username='admin',
password='C1sco12345',
protocol='ssh')
crud = CRUDService()
# Create the top-level container
snmp = Cisco_IOS_XR_snmp_agent_cfg.Snmp()
# Create the list instance
rule = Cisco_IOS_XR_snmp_agent_cfg.Snmp.Correlator.Rules.Rule()
rule.name = 'abc'
# Instantiate and assign the presence class
rule.non_stateful = Cisco_IOS_XR_snmp_agent_cfg.Snmp.Correlator.Rules.Rule.NonStateful()
rule.non_stateful.timeout = 3
# Append the list instance to its parent
snmp.correlator.rules.rule.append(rule)
# Call the CRUD create on the top-level snmp object
# (assuming you have already instantiated the service and provider)
result = crud.create(sp_instance, snmp) |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from asset.models import *
from asset.forms import *
from django.conf import settings
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
@login_required(login_url="login")
def domain_list(request):
# 定义域名列表
domainlist = DomainList.objects.all()
return render(request, 'asset/asset_domain_list.html', {'DomainList': domainlist})
@login_required(login_url="login")
def domain_manage(request, aid=None, action=None):
# 定义域名新建和删除页面
page_name = 'Domain manage'
if aid:
domain_list = get_object_or_404(DomainList, pk=aid)
if action == 'edit':
page_name = 'Edit domain'
if action == 'delete':
domain_list.delete()
return redirect('Domain list')
else:
domain_list = DomainList()
action = 'add'
page_name = 'add_domain'
if request.method == 'POST':
form = DomainForms(request.POST, instance=domain_list)
if form.is_valid():
if action == 'add':
form.save()
return redirect('Domain list')
if action == 'edit':
form.save()
return redirect('Domain list')
else:
form = DomainForms(instance=domain_list)
return render(request, 'asset/asset_domain_manage.html',
{"form": form, "page_name": page_name, "action": action})
|
from django.db import models
from django.contrib.auth import get_user_model
from common.choices import AdvertisementStatus, AdvertisementType
from address.models import AddressField
from django.core.exceptions import ValidationError
class LandlordUser(get_user_model()):
phone_number = models.CharField(max_length=20, null=False, blank=False, unique=True)
register_address = AddressField(related_name='+', blank=False, null=False)
birthday = models.DateField(verbose_name="Birthday", null=False)
skype_id = models.CharField(max_length=128, blank=True, null=True, default='')
is_verified = models.BooleanField(null=False, blank=False)
is_visible = models.BooleanField(default=True)
is_premium = models.BooleanField(default=False)
# Check if each user can multiple agents!
agent = models.ForeignKey('AgentApp.AgentUser', related_name='client_list', on_delete=models.DO_NOTHING, blank=True, null=True, default=None)
class TimeSlot(models.Model):
start_time = models.DateTimeField(blank=False, null=False)
end_time = models.DateTimeField(blank=False, null=False)
max_people = models.IntegerField(default=1)
open_house = models.ForeignKey('OpenHouse', on_delete=models.CASCADE, related_name='time_slots', blank=False, null=False)
visitors = models.ManyToManyField('HouseeApp.HouseeUser')
def add_visitor(self, new_visitor):
if self.visitors.count() >= self.max_people:
return False
self.visitors.add(new_visitor)
self.save()
class OpenHouse(models.Model):
estate = models.ForeignKey('EstateAdvertisement', on_delete=models.CASCADE, related_name='open_house', blank=False, null=False)
def get_time_slots(self):
return sorted(self.time_slots.all(), key=lambda x: x.start_time)
class EstateAdvertisement(models.Model):
"""
get number of ad likes by doing self.ad_likes.count()
"""
# estate = models.ForeignKey('Estate', related_name='estate_ads', on_delete=models.CASCADE, blank=False, null=False)
owner = models.ForeignKey('LandlordUser', related_name='ads', on_delete=models.CASCADE, blank=False, null=False)
description = models.TextField(max_length=5000, null=False, blank=False)
is_special = models.BooleanField(default=False)
pets_allowed = models.BooleanField(default=False)
status = models.CharField(max_length=20, choices=AdvertisementStatus.CHOICES, null=True, blank=True)
type = models.CharField(max_length=20, choices=AdvertisementType.CHOICES, null=True, blank=True)
is_location_visible = models.BooleanField(blank=False, null=False)
evacuation_date = models.DateField(null=False, blank=False)
published_at = models.DateTimeField(auto_now_add=True)
expires_at = models.DateTimeField(null=False, blank=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=2, decimal_places=1)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1, blank=True, null=True)
# isVisibleInAr
# hasSmsNotifications
# hasEmailNotifications
# isSeen
# visitedCount
# agents
def get_images(self):
return self.estate_media.all()
def get_features(self):
return self.estate_feature.all()
def clean(self):
if self.get_images().count() > 10:
raise ValidationError('Exceeding total number of images allowed!')
features_list = list( x.value for x in self.get_features())
if len(features_list) > 32:
raise ValidationError('Exceeding total number of features!')
if len(set(features_list)) != len(features_list):
raise ValidationError('Duplicated value in feature list')
if self.expires_at < self.published_at:
raise ValidationError('Expiry date should be greater than publishing date!')
class EstateMedia(models.Model):
image = models.ImageField(upload_to='listings/%Y/%m/%d/', blank=True, null=True, default=None)
image_360 = models.ImageField(upload_to='listings/360/%Y/%m/%d/', blank=True, null=True, default=None)
is_360 = models.BooleanField(default=False)
estate = models.ForeignKey(EstateAdvertisement, related_name='estate_media', on_delete=models.CASCADE, blank=True, null=True, default=None)
class features(models.Model):
value = models.CharField(max_length=64)
estate = models.ForeignKey(EstateAdvertisement, related_name='estate_feature', on_delete=models.CASCADE, blank=True,
null=True, default=None)
class TenantAdvertisementLike(models.Model):
advertisement = models.ForeignKey('EstateAdvertisement', related_name='ad_likes', on_delete=models.CASCADE, null=False, blank=False)
tenant = models.ForeignKey('TenantApp.TenantUser', related_name='ads_liked', on_delete=models.CASCADE, null=False, blank=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2015-12-23 16:29:55
# @Author : killingwolf (killingwolf@qq.com)
import re
if __name__ == '__main__':
# 15-1
p151 = r'[bh][aiu]t,?'
print re.search(p151, 'hut').group()
# 15-2
p152 = r'\w+ \w+'
print re.search(p152, 'abc cba').group()
# 15-3
p153 = r'\w+, [a-zA-Z]'
print re.search(p153, 'abc, cba').group()
# 15-4
p154 = r'^[a-zA-Z_]+\w*'
print re.search(p154, '_').group()
# 15-5
p155 = r'^[1-9][0-9]{3} [a-zA-Z ]*'
print re.search(p155, '1180 b').group()
# 15-6
p156 = r'^www\.[a-zA-Z0-9\-]+\.(com|edu|net)$'
print re.search(p156, 'www.abc-c.net').group()
print re.search(p156, 'www.abc.com').group()
print re.search(p156, 'www.abc.edu').group()
# 15-7
p157 = r'^(\+|-)?((0|[1-9]\d*)|(0[0-7])|(0[xX][0-9a-fA-F])+)$'
print re.search(p157, '-0X1').group()
# 15-8
p158 = r'^(\+|-)?((0|[1-9]\d*)|(0[0-7])|(0[xX][0-9a-fA-F])+)$'
print re.search(p158, '1222').group()
# 15-9
p159 = r'^[+-]?(0|[1-9]+)\.\d+$'
print re.search(p159, '-1.2').group()
# 15-10
# p1510 = r'([+-]?(0|[1-9]\d*)?)?[+-]?([1-9]|\d*)?i$'
p1510 = r'([+-]?(\d*)?)?[+-]?([1-9]|\d*)?i$'
print re.search(p1510, '-1-2i').group()
# 15-11
p1511 = r'^(\.?[\w-]+)+@([\w-]+\.[\w-]+)+$'
print re.search(p1511, 'a.b-@b.com.cn').group()
# 15-12
p1512 = r'^https?://([\w-]+\.?[a-zA-Z/]+)+$'
print re.search(p1512, 'http://c.com/bbb/ccc.html').group()
# 15-14
p1514 = r'1[0-2]'
print re.search(p1514, '10').group()
print re.search(p1514, '11').group()
print re.search(p1514, '12').group()
# 15-19
str1 = 'Tue Jul 21 17:19:18 1987::wmaoavw@bxboftm.org::553857558-7-7'
p1519 = r'''
(\b(Wed|Tue|Mon|Sun|Sat|Fri|Thu)\b\s
\b(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\b\s
(\d{2})\s
((\d{2}:){2}\d{2})\s
(\d{4}))
'''
print re.search(p1519, str1, re.X).group()
# 15-20
p1520 = r'[a-z]+@[a-z]+\.(com|edu|net|org|gov)'
print re.search(p1520, str1).group()
# 15-21
p1521 = r'\b(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\b'
print re.search(p1521, str1).group()
# 15-22
p1522 = r'\d{4}'
print re.search(p1522, str1).group()
# 15-23
p1523 = r'\b((\d{2}:){2}\d{2})\b'
print re.search(p1523, str1).group()
# 15-24
p1524 = r'([a-z]+)@([a-z]+\.(com|edu|net|org|gov))'
match = re.search(p1524, str1).groups()
print match[0] + match[1]
# 15-25
p1525 = r'([a-z]+)@([a-z]+\.(com|edu|net|org|gov))'
match = re.search(p1524, str1).groups()
print match[0], match[1]
# 15-26
p1526 = r'[a-z]+@[a-z]+\.(com|edu|net|org|gov)'
print re.sub(p1526, 'aaa@bbb.com', str1)
# 15-27
p1527 = r'''
(\b(Wed|Tue|Mon|Sun|Sat|Fri|Thu)\b\s
\b(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\b\s
(\d{2})\s
((\d{2}:){2}\d{2})\s
(\d{4}))
'''
match = re.search(p1527, str1, re.VERBOSE).groups()
print "%s %s, %s" % (match[2], match[3], match[6])
# 15-28
p1528 = r'(\d{3}-){1,2}\d{4}'
print re.search(p1528, '800-555-1212').group()
print re.search(p1528, '555-1212').group()
# 15-29
p1529 = r'(^\(\d{3}\)?|^\d{3}-)?\d{3}-\d{4}'
print re.search(p1529, '800-555-1212').group()
print re.search(p1529, '555-1212').group()
print re.search(p1529, '(800)555-1212').group()
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import umap
from embeddings.embedding_manager import EmbeddingMan, GloveEmbeddings, BPEmbeddings, CombinedEmbeddings
from dataset import Document, DataSet
from flair.embeddings import WordEmbeddings, FlairEmbeddings, StackedEmbeddings, BertEmbeddings, Embeddings
from flair.data import Sentence
class DataEmbedder:
def __init__(self, create_sent_emb=False, wiki_path=""):
self.create_sent_emb = create_sent_emb
self.wiki_freq_dict = None
self.total_wiki_dict_count = 0
self.wiki_path = wiki_path
self.embedding_dim = 0
def reduce_dataset(self, ds, path=""):
embedded, word_list, annotation_list = self.embed_dataset(ds)
print("Embedded dataset. Performing Dimensionality Reduction")
reducer = umap.UMAP()
reduced = reducer.fit_transform(embedded)
res_df = pd.DataFrame(reduced)
res_df["word"] = word_list
res_df["annotated"] = annotation_list
if path:
res_df.to_csv(path)
return res_df
def plot_reduced(self, df=None, path=None, save=None):
assert df is not None or path is not None, "Needs a dataframe or path to a stored dataframe to visualize"
if df is None:
df = pd.read_csv(path, index_col=0)
plt.scatter(df.iloc[:, 0], df.iloc[:, 1], c=df["annotated"])
if save is not None:
plt.savefig(save)
plt.show()
def embed_dataset(self, ds):
embedding_list = []
word_list = []
annotation_list = []
for doc in ds.documents:
doc_embeddings = self._embed_document(doc)
for w_list, a_list, emb_list in zip(doc.sentences, doc.annotated, doc_embeddings):
for w, a, e in zip(w_list, a_list, emb_list):
word_list.append(w)
annotation_list.append(a)
embedding_list.append(e)
embedding_matrix = np.reshape(np.array(embedding_list), (len(word_list), -1))
print("Embedding matrix has shape {}".format(embedding_matrix.shape))
return embedding_matrix, word_list, annotation_list
def _embed_sentence(self, sent):
raise NotImplementedError
def _embed_document(self, doc):
doc_embeddings = []
for sent in doc.sentences:
doc_embeddings.append(self._embed_sentence(sent))
if not self.create_sent_emb:
return doc_embeddings
simple_sent_embeddings = self.get_simple_sentence_vecs(doc_embeddings, doc)
new_doc_embeddings = []
for sent_list, sent_emb in zip(doc_embeddings, simple_sent_embeddings):
new_sent_list = []
for w in sent_list:
new_w = np.concatenate((np.reshape(w, (self.embedding_dim, 1)),
np.reshape(sent_emb, (self.embedding_dim, 1))))
new_sent_list.append(new_w)
new_doc_embeddings.append(new_sent_list)
return new_doc_embeddings
def get_simple_sentence_vecs(self, doc_embeddings, doc):
"""
:param list[list[numpy.array]] doc_embeddings:
:param Document doc:
:return:
"""
if self.wiki_freq_dict is None:
self._load_in_wiki_dic(self.wiki_path)
sent_vecs = np.zeros((self.embedding_dim, len(doc.sentences)))
for i in range(len(doc.sentences)):
res = self._get_temp_sentence_vec(doc_embeddings[i], doc.sentences[i])
sent_vecs[:, i] = res.reshape(self.embedding_dim)
u, _, _ = np.linalg.svd(sent_vecs)
svd_mat = np.matmul(u[:, 0].reshape((self.embedding_dim, 1)),
u[:, 0].reshape((self.embedding_dim, 1)).transpose())
sentence_embeddings = []
for i in range(len(doc.sentences)):
vec = sent_vecs[:, i].reshape((self.embedding_dim, 1)) - \
np.matmul(svd_mat, sent_vecs[:, i].reshape((self.embedding_dim, 1)))
sentence_embeddings.append(vec)
return sentence_embeddings
def _get_temp_sentence_vec(self, sentence, word_list, a=0.000000000001):
"""
creates weighted sentence vector needed for the sentence embedding from "tough to beat baseline"
:param list[numpy.array] sentence:
:param list[str] word_list:
:param a: factor for word weights
:return: preliminary embedding vector for the given sentence
"""
vec = np.zeros((self.embedding_dim, 1))
for w_embed, word in zip(sentence, word_list):
temp_vec = np.reshape(w_embed, (self.embedding_dim, 1))
if word in self.wiki_freq_dict:
vec = vec + 100 * temp_vec * a / (a + self.wiki_freq_dict[word])
else:
vec = vec + 100 * temp_vec * a / (a + 1)
return vec / len(sentence)
def _load_in_wiki_dic(self, path):
"""
requires https://raw.githubusercontent.com/IlyaSemenov/wikipedia-word-frequency/master/results/enwiki-20190320-words-frequency.txt
loads in a dictionary with relative word frequencies from wikipedia
:param path: path to the file containing the word frequencies
:return:
"""
with open(path, encoding="utf8") as f:
wiki_dict = {}
total_count = 0
for line in f.readlines():
word = line.split()[0]
count = int(line.split()[1])
total_count += count
wiki_dict[word] = count
self.wiki_freq_dict = wiki_dict
self.total_wiki_dict_count = total_count
class FlairDataEmbedder(DataEmbedder):
def __init__(self, embeddings, create_sent_emb=False, wiki_path=""):
super(FlairDataEmbedder, self).__init__(create_sent_emb=create_sent_emb, wiki_path=wiki_path)
self.embeddings = embeddings
self.embedding_dim = self.embeddings.embedding_length
def _embed_sentence(self, sent):
"""
:param list[str] sent:
:return: list[numpy.array]
"""
s_list = []
flair_sent = Sentence(" ".join(sent))
self.embeddings.embed(flair_sent)
for w in flair_sent:
s_list.append(w.embedding.numpy())
return s_list
class SimpleDataEmbedder(DataEmbedder):
def __init__(self, embedding_man, create_sent_emb=False, wiki_path=""):
"""
:param DataSet ds:
:param EmbeddingMan embedding_man:
"""
super(SimpleDataEmbedder, self).__init__(create_sent_emb=create_sent_emb, wiki_path=wiki_path)
self.embedding_man = embedding_man
self.word_set = set()
self.embedding_dim = self.embedding_man.dim
def _embed_sentence(self, sent):
s_list = []
for w in sent:
s_list.append(self.embedding_man.get_embedding_vec(w))
return s_list
if __name__ == "__main__":
ds1 = DataSet("../data/standardized/conll_test.txt")
ds1.read_data()
ds2 = DataSet("../data/standardized/itac_test.txt")
ds2.read_data()
ds3 = DataSet("../data/standardized/rsics_test.txt")
ds3.read_data()
g_man = GloveEmbeddings("glove/glove.6B.50d.txt", 50)
embedder = SimpleDataEmbedder(g_man, create_sent_emb=False)
embedder.reduce_dataset(ds1, path="dim_reductions/glove50_conll.csv")
embedder.reduce_dataset(ds2, path="dim_reductions/glove50_itac.csv")
embedder.reduce_dataset(ds3, path="dim_reductions/glove50_rsics.csv")
b_man = BPEmbeddings(dim=100, bp_vocab_size=50000)
embedder = SimpleDataEmbedder(b_man, create_sent_emb=False)
embedder.reduce_dataset(ds1, path="dim_reductions/bp-d100-vs50000_conll.csv")
embedder.reduce_dataset(ds2, path="dim_reductions/bp-d100-vs50000_itac.csv")
embedder.reduce_dataset(ds3, path="dim_reductions/bp-d100-vs50000_rsics.csv")
c_man = CombinedEmbeddings([g_man, b_man])
embedder = SimpleDataEmbedder(c_man, create_sent_emb=False)
embedder.reduce_dataset(ds1, path="dim_reductions/bp-glove_conll.csv")
embedder.reduce_dataset(ds2, path="dim_reductions/bp-glove_itac.csv")
embedder.reduce_dataset(ds3, path="dim_reductions/bp-glove_rsics.csv")
embedder = FlairDataEmbedder(WordEmbeddings("en-crawl"))
embedder.reduce_dataset(ds1, path="dim_reductions/fasttext_en-crawl_conll.csv")
embedder.reduce_dataset(ds2, path="dim_reductions/fasttext_en-crawl_itac.csv")
embedder.reduce_dataset(ds3, path="dim_reductions/fasttext_en-crawl_rsics.csv")
embedder = FlairDataEmbedder(StackedEmbeddings([FlairEmbeddings('news-forward-fast'), FlairEmbeddings('news-backward-fast')]))
embedder.reduce_dataset(ds1, path="dim_reductions/flair-forward-backward_conll.csv")
embedder.reduce_dataset(ds2, path="dim_reductions/flair-forward-backward_itac.csv")
embedder.reduce_dataset(ds3, path="dim_reductions/flair-forward-backward_rsics.csv")
|
import sys
import re
from utils.log import log
puzzle_input = [x for x in sys.stdin.read().split('\n')]
regex = re.compile(r'(\d+)-(\d+)\s([a-z]):\s([a-z]+)')
def count_letter(l, s):
num = 0
for c in s:
if c == l:
num += 1
return num
@log
def a(puzzle_input):
valid_pws = 0
for s in puzzle_input:
(least, most, letter, pw) = re.search(regex, s).groups()
count = count_letter(letter, pw)
if count >= int(least) and count <= int(most):
valid_pws += 1
return valid_pws
def validate(p1, p2, letter, password):
p1, p2 = p1 - 1, p2 - 1
if password[p1] == letter and password[p2] != letter:
return True
if password[p2] == letter and password[p1] != letter:
return True
if password[p1] == letter and password[p2] == letter:
return False
if password[p1] == letter or password[p2] == letter:
return True
return False
@log
def b(puzzle_input):
valid_pws = 0
for s in puzzle_input:
(pos1, pos2, letter, pw) = re.search(regex, s).groups()
if validate(int(pos1), int(pos2), letter, pw):
valid_pws += 1
return valid_pws
a(puzzle_input)
b(puzzle_input)
|
# pylint: skip-file
"""
Fix missing anchors from timestamp and date nodes.
This must be removed once incorporated into ruamel.yaml, likely at version
0.17.22.
Source: https://sourceforge.net/p/ruamel-yaml/tickets/440/
Copyright 2022 Anthon van der Neut, William W. Kimball Jr. MBA MSIS
"""
import ruamel.yaml
from ruamel.yaml.constructor import ConstructorError
from ruamel.yaml.anchor import Anchor
from ruamel.yaml.timestamp import TimeStamp
from typing import Any, Dict, Union # NOQA
import datetime
import copy
class AnchoredTimeStamp(TimeStamp):
"""Extend TimeStamp to track YAML Anchors."""
def __init__(self, *args: Any, **kw: Any) -> None:
"""Initialize a new instance."""
self._yaml: Dict[Any, Any] = dict(t=False, tz=None, delta=0)
def __new__(cls, *args: Any, **kw: Any) -> Any: # datetime is immutable
"""Create a new, immutable instance."""
anchor = kw.pop('anchor', None)
ts = TimeStamp.__new__(cls, *args, **kw)
if anchor is not None:
ts.yaml_set_anchor(anchor, always_dump=True)
return ts
def __deepcopy__(self, memo: Any) -> Any:
"""Deeply copy this instance to another."""
ts = AnchoredTimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
ts._yaml = copy.deepcopy(self._yaml)
return ts
@property
def anchor(self) -> Any:
"""Access the YAML Anchor."""
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any: bool = False) -> Any:
"""Get the YAML Anchor."""
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
"""Set the YAML Anchor."""
self.anchor.value = value
self.anchor.always_dump = always_dump
class AnchoredDate(AnchoredTimeStamp):
"""Define AnchoredDate."""
pass
def construct_anchored_timestamp(
self, node: Any, values: Any = None
) -> Union[AnchoredTimeStamp, AnchoredDate]:
"""Construct an AnchoredTimeStamp."""
try:
match = self.timestamp_regexp.match(node.value)
except TypeError:
match = None
if match is None:
raise ConstructorError(
None,
None,
f'failed to construct timestamp from "{node.value}"',
node.start_mark,
)
values = match.groupdict()
dd = ruamel.yaml.util.create_timestamp(**values) # this has delta applied
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
minutes = values['tz_minute']
tz_minute = int(minutes) if minutes else 0
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
if isinstance(dd, datetime.datetime):
data = AnchoredTimeStamp(
dd.year, dd.month, dd.day, dd.hour, dd.minute, dd.second, dd.microsecond, anchor=node.anchor
)
else:
data = AnchoredDate(dd.year, dd.month, dd.day, 0, 0, 0, 0, anchor=node.anchor)
return data
if delta:
data._yaml['delta'] = delta
tz = values['tz_sign'] + values['tz_hour']
if values['tz_minute']:
tz += ':' + values['tz_minute']
data._yaml['tz'] = tz
else:
if values['tz']: # no delta
data._yaml['tz'] = values['tz']
if values['t']:
data._yaml['t'] = True
return data
ruamel.yaml.constructor.RoundTripConstructor.add_constructor('tag:yaml.org,2002:timestamp', construct_anchored_timestamp)
def represent_anchored_timestamp(self, data: Any):
"""Render an AnchoredTimeStamp."""
try:
anchor = data.yaml_anchor()
except AttributeError:
anchor = None
inter = 'T' if data._yaml['t'] else ' '
_yaml = data._yaml
if _yaml['delta']:
data += _yaml['delta']
if isinstance(data, AnchoredDate):
value = data.date().isoformat()
else:
value = data.isoformat(inter)
if _yaml['tz']:
value += _yaml['tz']
return self.represent_scalar('tag:yaml.org,2002:timestamp', value, anchor=anchor)
ruamel.yaml.representer.RoundTripRepresenter.add_representer(AnchoredTimeStamp, represent_anchored_timestamp)
ruamel.yaml.representer.RoundTripRepresenter.add_representer(AnchoredDate, represent_anchored_timestamp)
|
import random
import numpy as np
import pandas as pd
import pytest
from mizarlabs.static import CLOSE
from mizarlabs.transformers.sample_weights import SampleWeightsByReturns
from mizarlabs.transformers.sample_weights import SampleWeightsByTimeDecay
from mizarlabs.transformers.targets.labeling import EVENT_END_TIME
@pytest.mark.usefixtures(
"dollar_bar_target_labels", "dollar_bar_dataframe", "dollar_bar_ind_matrix"
)
@pytest.mark.parametrize("index_to_check", random.choices(range(0, 200), k=10))
def test_sample_weights_by_returns(
dollar_bar_dataframe,
dollar_bar_target_labels,
dollar_bar_ind_matrix,
dollar_bar_ind_matrix_indices,
index_to_check,
):
"""Test sample weights by returns calculated values are correct"""
df = dollar_bar_dataframe.merge(
dollar_bar_target_labels, left_index=True, right_index=True
)
sample_weights_transformer = SampleWeightsByReturns()
sample_weights = sample_weights_transformer.transform(df)
assert (sample_weights).all() >= 0
returns = np.log(df[CLOSE]).diff()
num_concurrent_events = pd.Series(
dollar_bar_ind_matrix.tocsc().sum(axis=1).A1,
index=dollar_bar_ind_matrix_indices
).loc[df.index]
start_time = df.index[index_to_check]
end_time = df[EVENT_END_TIME].iloc[index_to_check]
weight = abs(
(
returns.loc[start_time:end_time]
/ num_concurrent_events.loc[start_time:end_time]
).sum()
)
assert sample_weights[index_to_check] == weight
@pytest.mark.usefixtures(
"dollar_bar_target_labels",
)
def test_sample_weights_by_time_decay(dollar_bar_target_labels):
time_decay_weights = SampleWeightsByTimeDecay(minimum_decay_weight=1).transform(
dollar_bar_target_labels
)
assert set(time_decay_weights.values) == {
1
}, "Time decay weights should be equal to 1 when the minimum is set to be 1"
time_decay_weights = SampleWeightsByTimeDecay(minimum_decay_weight=0.5).transform(
dollar_bar_target_labels
)
assert (
time_decay_weights.values >= 0
).all(), "Time decay weights should be greater or equal to 0"
assert (
time_decay_weights.is_monotonic_increasing
), "Time decay weights should be monotonic increasing"
time_decay_smaller_weights = SampleWeightsByTimeDecay(
minimum_decay_weight=0.1
).transform(dollar_bar_target_labels)
assert (time_decay_weights[:-1] > time_decay_smaller_weights[:-1]).all(), (
"The time dacay weights with smaller minimum should have"
" smaller values compared to the time decay weights with larger minimum"
)
time_decay_weights = SampleWeightsByTimeDecay(
minimum_decay_weight=-0.001
).transform(dollar_bar_target_labels)
assert round(time_decay_weights[0], 2) == 0, "The oldest element should be 0"
|
import math
def find_first(fn, arr):
for item in arr:
if fn(item): return item
return None
def arr_from(arr, size, padding):
if len(arr) >= size:
return arr[:size]
return arr + ([padding] * (size - len(arr)))
def kfold(x, y, partitions=5):
def to_partition(coll):
sz = math.ceil(len(coll) / partitions)
return [coll[sz*i: sz*(i+1)] for i in range(partitions)]
x_parts = to_partition(x)
y_parts = to_partition(y)
for test_i in range(partitions):
yield ([j for i in range(len(x_parts)) for j in x_parts[i] if i != test_i],
[j for i in range(len(y_parts)) for j in y_parts[i] if i != test_i],
x_parts[test_i],
y_parts[test_i])
def split_arr(arr, nitems):
for i in range(0, len(arr), nitems):
yield arr[i:i+nitems]
|
import logging
import os
import shutil
import numpy as np
from lt_sdk.common import py_file_utils
from lt_sdk.graph import full_graph_pipeline, lgf_graph
from lt_sdk.graph.graph_collections import graph_collection
from lt_sdk.graph.import_graph import graph_importer_map
from lt_sdk.graph.run_graph import graph_runner, histogram_graph_runner
from lt_sdk.graph.transform_graph.graph_transformers import convert_to_debug_mode
from lt_sdk.proto import graph_types_pb2, performance_data_pb2, sim_params_pb2
from lt_sdk.visuals import sim_result_to_trace
SWEEP_NAME = "performance_sweep_data.pb"
class PerformanceSweep(object):
"""Interface for performance sweeps"""
def __init__(self,
output_dir,
default_data_dir=None,
graph_path=None,
fine_tuning_fn=None):
"""
Params:
output_dir: directory to store the sweep data
default_data_dir: default top level directory where data and graphs are
stored
graph_path: graph path to use when default_data_dir is not provided
fine_tuning_fn: optional function to use for fine tuning
"""
self._output_dir = output_dir
self._graph_type = self.graph_type()
self._data_dir = ""
self._default_data_dir = ""
self._base_data_dir = ""
self._fine_tuning_fn = fine_tuning_fn
if graph_path:
self._graph_path = graph_path
elif default_data_dir:
self._default_data_dir = default_data_dir
if not self.base_data_dir().startswith("/"):
self._base_data_dir = os.path.join(self._default_data_dir,
self.base_data_dir())
else:
self._base_data_dir = self.base_data_dir()
self._data_dir = os.path.join(self._base_data_dir, self.data_dir())
self._graph_path = os.path.join(self.base_graph_dir(), self.graph_dir())
else:
raise ValueError("Must specify either default_data_dir or graph_path")
def get_calibration_inputs(self, sw_config):
"""
Returns:
calibration_inputs: an inference_pb2.BatchedInferenceInput() object
corresponding padded and batched calibration data
"""
raise NotImplementedError()
def num_test_shards(self):
"""Returns the number of test shards."""
raise NotImplementedError()
def get_test_inputs(self, sw_config, shard_indx):
"""
Returns:
test_inputs: an inference_pb2.BatchedInferenceInput() object
corresponding padded and batched test data for the given
shard_indx
"""
raise NotImplementedError()
def get_test_labels(self, shard_indx):
"""Return labels for the given shard_indx."""
raise NotImplementedError()
def num_fine_tuning_shards(self):
"""Return the number of fine tuning shards."""
return NotImplementedError()
def get_fine_tuning_inputs(self, sw_config, shard_indx):
"""
Returns:
fine_tuning_inputs: an inference_pb2.BatchesInferenceInput() object
corresponding batched fine_tuning data for the given shard_indx.
Padding is not allowed.
"""
raise NotImplementedError()
def get_fine_tuning_labels(self, sw_config, shard_indx):
"""
Return fine tuning labels for the given shard_indx.
Padding is not allowed.
"""
raise NotImplementedError()
def logits_tensor_name(self):
"""Return name of logits tensor for fine tuning."""
raise NotImplementedError()
def update_quality_metrics(self, performance_data, test_outputs, labels):
"""
Params:
performance_data: a performance_pb2.PerformanceData() protobuf
test_outputs: a inference_pb2.BatchedInferenceOutput() object, where
test_outputs.batches[i] corresponds to the outputs from
test_inputs.batches[i]
labels: The label information. Could be a single numpy array or
something more complicated if necessary.
Mutates the given performance_data protobuf to have the most up to date
quality metrics
"""
raise NotImplementedError()
def base_data_dir(self):
"""Returns the base data directory, if something other than
performance_sweep_map.DATA_DIR
"""
return NotImplementedError()
def base_graph_dir(self):
return self._base_data_dir
def data_dir(self):
"""Return the data dir relative to the base data dir."""
return "datasets"
def full_data_dir(self):
"""Return the absolute path for data dir."""
return self._data_dir
def graph_dir(self):
"""Return the trained graph dir relative to what data_dir() returns."""
raise NotImplementedError()
def graph_type(self):
"""Return the graph_types_pb2.GraphType of the stored graph."""
raise NotImplementedError()
def compilation_batch_size(self):
"""Return the compilation batch size to use."""
raise NotImplementedError()
def py_batch_size(self):
"""Return the python batch size to use"""
raise NotImplementedError()
def ignore_nodes_filter(self):
"""
Override to return a node_filters.NodeFilter() object to filter out nodes to
ignore during graph processing
"""
return None
def init_new_config(self):
"""Override to re-initialize class variables for each new config in the sweep"""
pass
def end_of_config(self):
"""Override to clean up class variables at the end of a config in the sweep"""
pass
def _get_execution_stats(self, performance_data, test_inputs, test_outputs):
performance_data.execution_stats.CopyFrom(test_outputs.batches[0].stats)
def get_importer(self, sw_config):
input_edges = (full_graph_pipeline.extract_edge_from_data(
self.get_test_inputs(sw_config,
0)))
return graph_importer_map.GRAPH_IMPORTER_MAP[self._graph_type](
self._graph_path,
sw_config,
input_edges=input_edges)
def read_graph(self, sw_config):
importer = self.get_importer(sw_config)
return importer.as_light_graph()
def _copy_proto(self, proto):
copy = proto.__class__()
copy.CopyFrom(proto)
return copy
def _save_debug_info(self,
performance_data,
cal_hist_pb_map=None,
plot_title_map=None):
sw_config = performance_data.config.sw_config
hw_specs = performance_data.config.hw_specs
sim_params = performance_data.config.sim_params
if sw_config.debug_info.debug_dir:
sim_result_to_trace.instruction_trace(self.get_trace_path(sw_config),
performance_data.execution_stats,
hw_specs,
sim_params)
lgf_graph.LightGraph.write_lgf_pb(
performance_data.graph,
os.path.join(sw_config.debug_info.debug_dir,
"lgf.pb"))
if sw_config.sweep_info.collect_memory_layout:
with open(os.path.join(sw_config.debug_info.debug_dir,
"mem_layout.pb"),
"wb") as f:
f.write(performance_data.simulation_metrics.memory_layout.
SerializeToString())
if sw_config.sweep_info.convert_graph_to_debug_mode:
assert (cal_hist_pb_map is not None)
# Make hist directories
hist_dir = os.path.join(sw_config.debug_info.debug_dir, "histograms")
if os.path.exists(hist_dir):
shutil.rmtree(hist_dir)
os.makedirs(hist_dir)
# Save the protobufs
protobuf_dir = os.path.join(hist_dir, "protobufs")
os.mkdir(protobuf_dir)
for key, cal_hist_pb in cal_hist_pb_map.items():
hist_path = os.path.join(protobuf_dir,
plot_title_map.get(key,
str(key)) + ".pb")
with open(hist_path, "wb") as f:
f.write(cal_hist_pb.SerializeToString())
def _run_streamed_test_data(self, runner, performance_data):
# Run data through the graph
for shard_indx in range(self.num_test_shards()):
logging.info("-Running inference on test data shard {}".format(shard_indx))
test_inputs = self.get_test_inputs(performance_data.config.sw_config,
shard_indx)
test_outputs = runner.run(test_inputs)
# Update quality metrics each shard
self.update_quality_metrics(performance_data,
test_outputs,
self.get_test_labels(shard_indx))
# Just use last shard for execution stats
self._get_execution_stats(performance_data, test_inputs, test_outputs)
def _init_graph_coll(self, light_graph, graph_coll, performance_data):
# Unpack performance data
sw_config = performance_data.config.sw_config
hw_specs = performance_data.config.hw_specs
# Simulation metrics
sim_metrics_coll = graph_coll.simulation_metrics_collection()
sim_metrics_coll.set_collect_bit_activity(
sw_config.sweep_info.collect_bit_activity)
sim_metrics_coll.set_collect_memory_layout(
sw_config.sweep_info.collect_memory_layout)
sim_metrics_coll.initialize_simulation_metrics(hw_specs)
# Default values to return
run_graph = light_graph
runner_cls = graph_runner.GraphRunner
debug_kwargs = {}
# Special cases
if sw_config.sweep_info.convert_graph_to_debug_mode:
hist_coll = graph_coll.histogram_collection()
transform = convert_to_debug_mode.ConvertToDebugMode(sw_config, hist_coll)
run_graph = transform.process_transforms(light_graph)
runner_cls = histogram_graph_runner.HistogramGraphRunner
debug_kwargs = {"plot_title_map": transform.get_key_map()}
return run_graph, runner_cls, debug_kwargs
def _get_extra_debug_kwargs(self, debug_kwargs, graph_coll, performance_data):
sw_config = performance_data.config.sw_config
# Special cases
if sw_config.sweep_info.convert_graph_to_debug_mode:
cal_hist_pb_map = {
k: graph_coll.histogram_collection().get_histogram(k)
for k in debug_kwargs["plot_title_map"]
}
debug_kwargs.update({"cal_hist_pb_map": cal_hist_pb_map})
def _run_single_config_helper(self, performance_data):
"""Run the config and update performance_data"""
sw_config = performance_data.config.sw_config
hw_specs = performance_data.config.hw_specs
sim_params = performance_data.config.sim_params
# Use defaults from perf_sweep if necessary
if sw_config.sweep_info.py_batch_size == 0:
sw_config.sweep_info.py_batch_size = self.py_batch_size()
sim_params.compiled_batch_size = self.compilation_batch_size()
if sw_config.sweep_info.num_py_batches > 0:
sim_params.compiled_batch_size = min(
sw_config.sweep_info.num_py_batches * sw_config.sweep_info.py_batch_size,
sim_params.compiled_batch_size)
# Graph transformations
if performance_data.config.do_transform:
transform_hw_specs = self._copy_proto(hw_specs)
transform_sw_config = self._copy_proto(sw_config)
transform_sim_params = self._copy_proto(sim_params)
transform_sw_config.debug_info.debug_dir = ""
transform_sim_params.arch_params.arch_type = \
sim_params_pb2.ArchitectureParams.VIRTUAL
# Full graph pipeline
calibration_data = self.get_calibration_inputs(transform_sw_config)
tmp_dir = py_file_utils.mkdtemp()
lgf_pb_path = os.path.join(tmp_dir, "modified_lgf.pb")
full_graph_pipeline.main(self._graph_path,
self._graph_type,
lgf_pb_path,
graph_types_pb2.LGFProtobuf,
calibration_data,
transform_hw_specs,
transform_sw_config,
transform_sim_params)
# Read light graph
light_graph = lgf_graph.LightGraph.lgf_pb_to_graph(
lgf_graph.LightGraph.read_lgf_pb(lgf_pb_path))
# Cleanup
shutil.rmtree(tmp_dir)
else:
light_graph = self.read_graph(performance_data.config.sw_config)
# Fine tuning
if (performance_data.config.do_fine_tuning
and sw_config.sweep_info.num_fine_tuning_epochs > 0):
if self._fine_tuning_fn is None:
raise ValueError("Must provide fine tuning function")
num_fine_tuning_shards = self.num_fine_tuning_shards()
tot_num_shards = int(sw_config.sweep_info.num_fine_tuning_epochs *
num_fine_tuning_shards)
# Get an ordered list of shards to be used for fine tuning
shard_list = []
while len(shard_list) < tot_num_shards:
shard_list.extend(np.random.permutation(range(num_fine_tuning_shards)))
shard_list = shard_list[:tot_num_shards]
for i, shard_indx in enumerate(shard_list):
fine_tuning_data = self.get_fine_tuning_inputs(
performance_data.config.sw_config,
shard_indx)
fine_tuning_labels = self.get_fine_tuning_labels(
performance_data.config.sw_config,
shard_indx)
light_graph = self._fine_tuning_fn(light_graph,
fine_tuning_data,
fine_tuning_labels,
performance_data.config.hw_specs,
performance_data.config.sw_config,
performance_data.config.sim_params,
self.logits_tensor_name())
# Create debug_dir if necessary
debug_dir = sw_config.debug_info.debug_dir
if debug_dir:
if os.path.exists(debug_dir):
shutil.rmtree(debug_dir)
os.makedirs(debug_dir)
with graph_collection.GraphCollection() as graph_coll:
# Initialize graph for running test data
run_graph, runner_cls, debug_kwargs = self._init_graph_coll(
light_graph, graph_coll, performance_data)
# Run test data
runner = runner_cls(light_graph, hw_specs, sw_config, sim_params, graph_coll)
self._run_streamed_test_data(runner, performance_data)
# Get extra information after running
self._get_extra_debug_kwargs(debug_kwargs, graph_coll, performance_data)
# Save simulation metrics
performance_data.simulation_metrics.CopyFrom(
graph_coll.simulation_metrics_collection().get_simulation_metrics())
# Save graph and debug info
performance_data.graph.CopyFrom(light_graph.as_lgf_pb())
self._save_debug_info(performance_data, **debug_kwargs)
def run_single_config(self, config, indx):
"""
Params:
config: a performance_data_pb2.ConfigInfo() protobuf
indx: unique index for the config
Returns:
performance_data: a performance_data_pb2.PerformanceData() protobuf
"""
# Initialize some things
self.init_new_config()
performance_data = performance_data_pb2.PerformanceData()
performance_data.config.CopyFrom(config)
performance_data_path = os.path.join(self._output_dir,
"performance_data_{}.pb".format(indx))
# Do not re-run a config if it is already on disk
if os.path.exists(performance_data_path):
logging.warning(
("Found performance data on disk, skipping configuration {0}. " +
"To re-run this configuration, remove or rename the " +
"following file: {1}").format(indx,
performance_data_path))
with open(performance_data_path, "rb") as f:
performance_data.ParseFromString(f.read())
else:
self._run_single_config_helper(performance_data)
with open(performance_data_path, "wb") as f:
f.write(performance_data.SerializeToString())
# Clean up and return
self.end_of_config()
return performance_data
def get_trace_path(self, sw_config):
return os.path.join(sw_config.debug_info.debug_dir, "batch0.trace")
def save_performance_sweep_data(self, performance_sweep_data):
performance_sweep_data_path = os.path.join(self._output_dir, SWEEP_NAME)
with open(performance_sweep_data_path, "wb") as f:
f.write(performance_sweep_data.SerializeToString())
def run_configs(self, configs):
"""
Params:
configs: a list of performance_data_pb2.ConfigInfo() protobufs
Returns:
performance_sweep_data: a performance_data_pb2.PerformanceSweepData()
protobuf where performance_data.data[i] corresponds to configs[i]
"""
# Sweep all configs
performance_sweep_data = performance_data_pb2.PerformanceSweepData()
for indx, config in enumerate(configs):
logging.info("------- Running config {0}: {1} ---------".format(
indx,
config.description))
performance_data = self.run_single_config(config, indx)
performance_sweep_data.data.add().CopyFrom(performance_data)
return performance_sweep_data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: kidjourney
# @Date: 2015-06-17 22:30:53
# @Last Modified by: kidjourney
# @Last Modified time: 2015-06-17 22:35:01
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
s = s.strip().split()[::-1]
s = " ".join(s)
return s |
import angr
import logging
logger = logging.getLogger('Concretizer')
class Concretizer(angr.exploration_techniques.ExplorationTechnique):
def __init__(self, addrs):
super(Concretizer, self).__init__()
self.addrs = addrs
def step(self, simgr, stash, **kwargs):
for addr in self.addrs:
for s in simgr.active:
var = s.memory.load(addr, s.arch.bits // s.arch.byte_width, endness="Iend_LE")
if not var.symbolic:
return simgr.step(stash=stash)
vals = s.solver.eval_upto(var, 2)
if len(vals) == 1:
new_var = s.solver.BVV(vals[0], s.arch.bits)
s.memory.store(addr, new_var, endness="Iend_LE")
logger.info('Concretized {} @ {} to {}'.format(var, hex(addr), hex(vals[0])))
return simgr.step(stash=stash)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('matching', '0004_auto_20150407_0933'),
]
operations = [
migrations.AddField(
model_name='beach',
name='lat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='beach',
name='lng',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='venue',
name='contacts',
field=models.ManyToManyField(to='matching.Contact', blank=True),
),
]
|
{
'variables': {
'component%': 'static_library',
'visibility%': 'hidden',
'library%': 'static_library',
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="android"', {
'target_defaults': {
'cflags': [ '-pthread' ],
# V8 headers cause a build error on recent gcc.
# Adding -fpermissive to solve this.
# See https://issues.adblockplus.org/ticket/4950
# We might be able to do without after upgrading V8.
'cflags_cc': [ '-fno-rtti', '-std=c++11', '-fexceptions', '-fpermissive' ],
'cflags!': [ '-Werror', ],
'ldflags': [ '-pthread', ],
},
}],
['OS=="mac"', {
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11',
'CLANG_CXX_LIBRARY': 'libc++',
},
}],
],
'target_conditions': [
['OS=="android" and not "host" in toolsets', {
'target_defaults': {
'cflags!': [
'-pthread', # Not fully supported by Android toolchain. Built-in suport of major functions, no need to link with it.
],
'ldflags!': [
'-llog'
'-pthread', # Not fully supported by Android toolchain. Built-in suport of major functions, no need to link with it.
],
},
}],
],
'target_defaults': {
'configurations': {
'Debug': {},
'Release': {}
},
'msvs_cygwin_shell': 0,
'target_conditions': [
['_type=="static_library"', {
'standalone_static_library': 1
}]
]
}
}
|
from weapon_selector_screen import WeaponSelectorScreen
from credits_screen import CreditsScreen
from main_menu_screen import MainMenuScreen
from pygame import QUIT, Surface
from guns import Weapon
class MainMenu:
def __init__(self, screen: Surface):
self.state = 'MENU'
self.main_menu_screen = MainMenuScreen(self.change_state, screen)
self.credits_screen = CreditsScreen(self.change_state, screen)
self.weapon_selector_screen = WeaponSelectorScreen(self.change_state,
screen,
self.gun_receiver)
self.gun = None
self.screen_selector_by_state = {'MENU': self.main_menu_screen,
'WEAPON SELECTOR': self.weapon_selector_screen,
'CREDITS': self.credits_screen,
'EXITING': exit}
'''PAINTING'''
def pintar(self):
painting = self.screen_selector_by_state[self.state]
if painting != exit:
painting.paint()
'''EVENT PROCESSOR'''
def processa_eventos(self, evento, mouse):
event_processor = self.screen_selector_by_state[self.state]
if evento.type == QUIT or event_processor == exit:
exit()
else:
event_processor.process_events(evento, mouse)
'''RULES CALCULATOR'''
def calculate_rules(self, mouse: tuple):
_, mouse_position = mouse
ruler = self.screen_selector_by_state[self.state]
if ruler != exit:
ruler.calculate_rules(mouse_position)
else:
ruler()
'''AUXILIARY METHODS'''
def allowed_to_play(self):
return self.state == 'PLAYING'
def gun_receiver(self, gun: Weapon) -> None:
self.gun = gun
def reset(self) -> None:
self.state = 'MENU'
def change_state(self, new_state: str):
self.state = new_state
def get_gun(self):
return self.gun
|
dp = [[0 for _ in range(1000)] for _ in range(1001)]
def sumOfProduct(arr,n,k):
# filling last column : n-1 th element
for r in range(k+1):
dp[r][n-1]=0
# filling row 1:
dp[1][n-1]=arr[n-1]
for c in range(n-2,-1,-1):
dp[1][c] = ( dp[1][c+1] + arr[c] ) % 1000000007
for c in range(n-2,-1, -1):
for r in range(2,k+1):
dp[r][c] = dp[r][c+1] + dp[r-1][c+1] * arr[c]
dp[r][c] = dp[r][c] % 1000000007
return dp[k][0]
if __name__=="__main__":
t = int(input('Enter the number of test cases:- '))
for _ in range(t):
line = input('Enter the size of the array and the subarray:- ').strip().split()
n=int(line[0])
k = int(line[1])
arr = [int(x) for x in input('Enter the elements of the array:- ').strip().split()]
print(sumOfProduct(arr,n,k)) |
# -*- coding:utf-8 -*-
import urllib
import xlrd
import time
from urllib import request, parse
import json
from xlutils.copy import copy
import threading
def start():
source_workbook = xlrd.open_workbook('E:/song_singer.xls', formatting_info=True)
result_workbook = copy(source_workbook)
source_sheet = source_workbook.sheet_by_index(0)
result_sheet = result_workbook.get_sheet(0)
for i in range(1, 10001):
if source_sheet.cell(i, 0).ctype == 0:
continue
song_singer = source_sheet.cell(i, 0).value
song = song_singer[0:song_singer.index('++')]
singer = song_singer[song_singer.index('++')+2:len(song_singer)]
result_sheet.write(i, 3, song)
result_sheet.write(i, 4, singer)
result_workbook.save('E:/song_singer_result.xls')
begin_time = time.time()
start()
end_time = time.time()
print(end_time - begin_time)
|
import csv
from keboola import docker
class App:
def run(self):
# initialize KBC configuration
cfg = docker.Config()
# validate application parameters
parameters = cfg.get_parameters()
text_max = parameters.get('max')
text_min = parameters.get('min')
id_dolumn = parameters.get('columns', {}).get('id')
text_column = parameters.get('columns', {}).get('text')
if text_min is None or text_min is None or id_dolumn is None or text_column is None:
raise ValueError("max, min, columns.id and columns.text are required parameters.")
id_prefix = parameters.get('id_prefix', '')
# get input and output table and validate them
tables = cfg.get_input_tables()
if len(tables) != 1:
raise ValueError("Input mapping must contain one table only.")
in_table = tables[0]
tables = cfg.get_expected_output_tables()
if len(tables) != 1:
raise ValueError("Output mapping must contain one table only.")
out_table = tables[0]
# physical location of the source file with source data
in_file_path = in_table['full_path']
# physical location of the target file with output data
out_file_path = out_table['full_path']
# validate columns in the input table
with open(in_file_path, mode='rt', encoding='utf-8') as in_file:
# handle null character
lazy_lines = (line.replace('\0', '') for line in in_file)
csv_reader = csv.DictReader(lazy_lines, dialect='kbc')
row = next(csv_reader)
if id_dolumn not in row or text_column not in row:
raise ValueError("The source table does not contain columns {}, {}".format(id_dolumn, text_column))
# read the input table and immediatelly write to the output table
with open(in_file_path, mode='rt', encoding='utf-8') as in_file, open(out_file_path, mode='wt', encoding='utf-8') as out_file:
writer = csv.DictWriter(out_file, fieldnames=['pk', 'id', 'row', 'text'], dialect='kbc')
writer.writeheader()
lazy_lines = (line.replace('\0', '') for line in in_file)
csv_reader = csv.DictReader(lazy_lines, dialect='kbc')
for row in csv_reader:
# do the text splitting
fragment_index = 0
string_to_split = row[text_column]
while len(string_to_split) > text_max:
fragment = string_to_split[:text_max + 1]
offset = fragment.rfind(' ')
if offset < text_min:
offset = text_min
fragment = string_to_split[:offset]
string_to_split = string_to_split[offset:]
# write output row
out_row = {
'pk': id_prefix + str(row[id_dolumn]) + '_' + str(fragment_index),
'id': row[id_dolumn],
'row': fragment_index,
'text': fragment
}
writer.writerow(out_row)
fragment_index += 1
if len(string_to_split) > 0:
# write output row
out_row = {
'pk': id_prefix + str(row[id_dolumn]) + '_' + str(fragment_index),
'id': row[id_dolumn],
'row': fragment_index,
'text': string_to_split
}
writer.writerow(out_row)
print("Splitting finished.")
|
import math
# import random as random
from random import random
from random import randint
from random import sample
class NQueen(object):
def __init__(self, N):
self.N = N
self.table = [[0] * N for _ in range(N)]
def print_table(self):
print("-------------")
for row in self.table:
print(row)
print("-------------")
def put_queen(self, solution):
self.table = [[0] * self.N for _ in range(self.N)]
for i,s in enumerate(solution):
self.table[s[1]][s[0]] = i+1
def check_overlap(self, posi1, posi2):
same_colum = posi1[0] == posi2[0]
same_row = posi1[1] == posi2[1]
same_dec_m = posi1[0]-posi1[1] == posi2[0]-posi2[1]
same_inc_m = posi1[0]+posi1[1] == posi2[0]+posi2[1]
return same_colum or same_row or same_dec_m or same_inc_m
def cost(self, solution):
# cost of the solution is the number of queen that isn't overlap other queen(correct postion)
# eg. n = 4 & cost = 2 mean there are 2 queen that are correct position
# n = 4 & cost = 0 mean all queen is overlap
overlap = [False] * self.N
for i in range(self.N-1):
for j in range(i+1, self.N):
if self.check_overlap( solution[i], solution[j] ):
overlap[i] = True
overlap[j] = True
return overlap.count(False)#, overlap, solution
def checkTable_true(self): # for check bug
sum_ = 0
for t in self.table:
sum_ += t.count(0)
return sum_ == (self.N*self.N - self.N)
def neighbor(self, state):
difX = [0, 0, -1 ,1] # up, down, left, right
difY = [-1, 1, 0, 0]
random_queen_list = sample(range(n),n) # random queen array for next state
finish = False
for queen in random_queen_list:
random_action_list = sample(range(4),4) # random action array for next state
for action in random_action_list:
posiX = state[queen][0] + difX[action]
posiY = state[queen][1] + difY[action]
top_down_check = posiX >= 0 and posiX < self.N
left_right_check = posiY >= 0 and posiY < self.N
# empty_check = n_queen.table[posiY][posiX] == 0
if top_down_check and left_right_check and (n_queen.table[posiY][posiX] == 0) : #can move queen to this direction
new_state = list(state)
new_state[queen] = (posiX,posiY)
return new_state
def acceptance_probability(self, diff_cost, T):
return math.exp( diff_cost / T )
def anneal(self, solution):
old_cost = self.cost(solution)
T = 1.0
T_min = 0.001
alpha = 0.99
rounds = 0
while rounds <= 200:
i = 1
# self.print_table()
while i <= 5000:
new_solution = self.neighbor(solution)
new_cost = self.cost(new_solution)
diff_cost = new_cost - old_cost
ap = self.acceptance_probability( diff_cost , T)
# ap = T
# if diff_cost <= 0:
# print(" ap: "+str(T)+" "+str(ap)+" "+str(diff_cost))
if diff_cost > 0 or ap > random():
old_solution = solution
solution = new_solution
old_cost = new_cost
n_queen.put_queen(solution)
if not self.checkTable_true():
self.print_table()
print("fail")
print(old_solution)
print(solution)
break
if new_cost == self.N:
return solution, old_cost
i += 1
if T >= T_min:
T = T * alpha
rounds += 1
# print(" Round : {} cost: {}".format(rounds, old_cost), end="\r")
print(str(rounds)+" "+str(T)+" "+str(old_cost))
return solution, old_cost
if __name__ == '__main__':
start_state = [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(7, 7)
]
n = 5
s = start_state[:n]
n_queen = NQueen(n)
n_queen.put_queen(s)
solution, old_cost = n_queen.anneal(s)
print(solution)
print(old_cost)
n_queen.put_queen(solution)
n_queen.print_table()
|
#!python
import unittest
import ast
from py2lisp import translate, translate_literal
class TestLiterals(unittest.TestCase):
function = staticmethod(translate_literal)
def test_string(self):
self.assertEqual("#()", self.function(""))
self.assertEqual("#(0 0 0 97)", self.function("a"))
self.assertEqual("#(0 0 0 97 0 0 0 98)", self.function("ab"))
self.assertEqual("#(0 0 0 97 0 0 0 98 0 0 0 99)",
self.function("abc"))
self.assertEqual("#(0 16 1 0)", self.function("\U00100100"))
def test_int(self):
self.assertEqual("-23", self.function(-23))
self.assertEqual("0", self.function(0))
self.assertEqual("5", self.function(5))
self.assertEqual("99991152921504606846975",
self.function(99991152921504606846975))
self.assertEqual("-99991152921504606846975",
self.function(-99991152921504606846975))
def test_bytes(self):
self.assertEqual("#()", self.function(b""))
self.assertEqual("#(97)", self.function(b'\x61'))
self.assertEqual("#(97 98)", self.function(b'\x61\x62'))
self.assertEqual("#(97 98 99)", self.function(b'\x61\x62\x63'))
def test_float(self):
# Example from http://stackoverflow.com/questions/3481289/
f = 0.38288746115497402
self.assertEqual(f, float(self.function(f)))
def test_complex(self):
import re
float_pattern = "([0-9]*(?:\\.[0-9]+)?)"
complex_re = re.compile("^#C\\(%s %s\\)$" %
(float_pattern, float_pattern))
f = 0.38288746115497402
def test_complex(complex_):
match = complex_re.match(self.function(complex_))
self.assertTrue(match)
self.assertEqual(float(match.group(1)), complex_.real)
self.assertEqual(float(match.group(2)), complex_.imag)
test_complex(f + 0j)
test_complex(f * 1j)
test_complex(f + f * 1j)
def test_None(self):
self.assertEqual("|None|", self.function(None))
def test_bool(self):
self.assertEqual("t", self.function(True))
self.assertEqual("nil", self.function(False))
def test_list(self):
self.assertEqual("()", self.function([]))
self.assertEqual("(1 2 3)", self.function([1, 2, 3]))
self.assertEqual("(#(97) 1 #(0 0 0 98) |None| t nil)",
self.function([b'\x61', 1, 'b',
None, True, False]))
class TestTranslate(TestLiterals):
function = staticmethod(translate)
def test_leaf_node(self):
self.assertEqual("(|py-Gt|)", self.function(ast.Gt()))
self.assertEqual("(|py-Load|)", self.function(ast.Load()))
def test_tree_node(self):
self.assertEqual("(|py-Bytes| #(97 98))",
self.function(ast.Bytes(b'\x61\x62')))
self.assertEqual("(|py-Str| #(0 0 0 97))",
self.function(ast.Str('a')))
self.assertEqual("(|py-Num| 97)", self.function(ast.Num(97)))
self.assertEqual("(|py-NameConstant| t)",
self.function(ast.NameConstant(True)))
self.assertEqual("(|py-NameConstant| nil)",
self.function(ast.NameConstant(False)))
self.assertEqual("(|py-NameConstant| |None|)",
self.function(ast.NameConstant(None)))
self.assertEqual("(|py-Module| " +
"((|py-Expr| (|py-NameConstant| |None|))" +
" (|py-Expr| (|py-NameConstant| |None|))" +
" (|py-Expr| (|py-NameConstant| |None|))))",
self.function(ast.Module(
[ast.Expr(ast.NameConstant(None))] * 3)))
def test_parsing(self):
[("None", "(|py-Module| ((|py-Expr| (|py-NameConstant| |None|))))"),
("3", "(|py-Module| ((|py-Expr| (|py-Num| 3))))"),
("'abc'", "(|py-Module| ((|py-Expr| (|py-Str| "+
"#(0 0 0 97 0 0 0 98 0 0 0 99)))))"),
("if a:\n return b\nelse:\n continue", (
"(|py-Module|" +
" (|py-If| (|py-Name| #(0 0 0 97) (|py-Load|))" +
" ((|py-Return| (|py-Name| #(0 0 0 98) (|py-Load|))))" +
" ((|py-Continue|))))"))
]
if __name__ == "__main__":
unittest.main()
|
print('------ Bem vindo ao exercicio 62 ----------')
print('\033[31m Melhore o desafio 61, perguntando para o usuario se ele quer mostrar mais alguns termos. O programa encerra quando ele disser que quer mostrar os termos')
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
termo = primeiro
c = 1
total = 0
mais = 10
while mais != 0:
total += mais
while c <=total :
print('{} -> '.format(termo), end='')
termo += razao
c += 1
print('Pausa')
mais = int(input('Quantos termos você quer mostrar a mais?'))
print('FIM')
print('Progressao finalizada com {} apresentados'.format(total)) |
import FWCore.ParameterSet.Config as cms
apvshotsfilter = cms.EDFilter('APVShotsFilter',
digiCollection = cms.InputTag("siStripDigis","ZeroSuppressed"),
historyProduct = cms.InputTag("consecutiveHEs"),
apvPhaseCollection = cms.InputTag("APVPhases"),
zeroSuppressed = cms.untracked.bool(True),
useCabling = cms.untracked.bool(False),
selectAPVshots = cms.untracked.bool(True)
)
|
INSTALLED_APPS += (
# CMS parts
'fluent_blogs',
'fluent_blogs.pagetypes.blogpage',
'fluent_pages',
'fluent_pages.pagetypes.fluentpage',
'fluent_pages.pagetypes.flatpage',
'fluent_pages.pagetypes.redirectnode',
'fluent_comments',
'fluent_contents',
'fluent_contents.plugins.code',
'fluent_contents.plugins.commentsarea',
'fluent_contents.plugins.gist',
'fluent_contents.plugins.oembeditem',
'fluent_contents.plugins.picture',
'fluent_contents.plugins.rawhtml',
'fluent_contents.plugins.sharedcontent',
'fluent_contents.plugins.text',
# Support libs
'analytical',
'any_imagefield',
'any_urlfield',
'axes',
'categories_i18n',
'crispy_forms',
'django_comments',
'django_wysiwyg',
'django.contrib.redirects',
'filebrowser',
'mptt',
'parler',
'polymorphic',
'polymorphic_tree',
'slug_preview',
'staff_toolbar',
'sorl.thumbnail',
'taggit',
'taggit_selectize',
'threadedcomments',
'tinymce',
# and enable the admin
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
)
|
"""alteracoes
Revision ID: e2d92519c296
Revises: ec8ee485b2bd
Create Date: 2020-09-20 15:33:06.633473
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e2d92519c296'
down_revision = 'ec8ee485b2bd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rent',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('checkinDate', sa.String(length=10), nullable=True),
sa.Column('checkoutDate', sa.String(length=10), nullable=True),
sa.Column('finalPrice', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('rent')
# ### end Alembic commands ###
|
import translationstring
import colander
from ..models import Emails
from ..models import Users
_ = translationstring.TranslationStringFactory('PhotoViewerExpress')
class DBEmail(colander.Email):
def __init__(self, msg=None):
colander.Email.__init__(self, msg)
def __call__(self, node, value):
colander.Email.__call__(self, node, value)
if Emails.by_email(value):
self.msg = _('Email already registered!')
raise colander.Invalid(node, self.msg)
class DBUser(colander.Length):
def __init__(self, min=None, max=None):
colander.Length.__init__(self, min, max)
def __call__(self, node, value):
colander.Length.__call__(self, node, value)
if Users.by_login(value):
existing_err = _('User already registered!')
raise colander.Invalid(node, existing_err)
|
from django.contrib import admin
from .models import Player, PlayerTurn
admin.site.register(Player)
admin.site.register(PlayerTurn)
|
import csv
from collections import defaultdict, Counter
from pathlib import Path
from itertools import combinations
import pytest
class Solution:
def fourSum(self, nums: list[int], target: int) -> list[list[int]]:
count_nums: Counter = Counter(nums)
nums = [val for val, val_count in count_nums.items() for _ in range(min(2, val_count))]
square_store: defaultdict[int, set[tuple[int, int]]] = defaultdict(set)
for num_0, num_1 in combinations(nums, 2):
sum_01 = num_0+num_1
square_store[sum_01].add(tuple(sorted((num_0, num_1))))
results = set()
for sum_01, combs_01 in square_store.items():
sum_23 = (target - sum_01)
for num_0, num_1 in combs_01:
for num_2, num_3 in square_store.get(sum_23,[]):
cand_comb = tuple(sorted([num_0, num_1, num_2, num_3]))
if all(count_nums[val] >= count_comb for val, count_comb in Counter(cand_comb).items()):
results.add(cand_comb)
return [list(comb) for comb in results]
long_csv_path = Path(__file__).parent/"long.csv"
with long_csv_path.open("r") as long_csv_fh:
reader = csv.reader(long_csv_fh)
long_nums = [int(num) for num in next(reader)]
long_target = int(next(reader)[0])
@pytest.mark.parametrize(
"nums,target,expected",
(
([1,0,-1,0,-2,2],0,[[-2,-1,1,2],[-2,0,0,2],[-1,0,0,1]]),
([2,2,2,2,2],8,[[2,2,2,2]]),
(long_nums,long_target,[]),
)
)
def test(nums, target, expected):
actual = Solution().fourSum(nums, target)
for exp in expected:
assert exp in actual
assert len(expected) == len(actual)
|
numero1=int(input("ingrese el primer numero: "))
numero2=int(input("ingrese el 2 numero: "))
if numero2>numero1:
print("el numero mayor es: ", numero2)
elif numero1>numero2:
print("el numero mayor es: ",numero1)
else:
print ("no hay numero mayor, los numeros son iguales")
|
import os
import logging
import airflow
import pendulum
from airflow import DAG
from airflow.utils import dates as date
from datetime import timedelta, datetime
from airflow.models import BaseOperator, Pool
from airflow.operators.bash_operator import BashOperator
from airflow.utils.trigger_rule import TriggerRule
from airflow.operators.python_operator import PythonOperator
# Set timezone
LOCAL_TZ = pendulum.timezone("Europe/Moscow")
# Batch processing module name
MODULE_NAME = 'sales-fs'
# Set dag id as module name + current filename
DAG_ID = MODULE_NAME + '__' + \
os.path.basename(__file__).replace('.pyc', '').replace('.py', '')
args = {
'owner': 'Zhilyakov Mikhail',
'depends_on_past': False,
'email': ['Mihail.Zhiljakov_ext@leroymerlin.ru'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 10,
'retry_delay': timedelta(minutes=15),
'start_date': airflow.utils.dates.days_ago(1),
'queue': MODULE_NAME,
'concurrency': 10
}
load_tickets_dag = DAG(
dag_id=DAG_ID,
default_args=args,
max_active_runs=1,
schedule_interval="20 6 * * *",
catchup=True,
access_control={
'sales-fs': {'can_dag_read', 'can_dag_edit'}
}
)
def tickets_load(ds, **kwargs):
from airflow_clickhouse_plugin import ClickHouseHook
client = ClickHouseHook(clickhouse_conn_id='clickhouse_salesfs')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# ===================================================PARAMETERS====================================================
database_name = 'salesfs'
raw_table_name = 'raw_tickets'
ods_table_name = 'ods_tickets'
table_columns_comma_sep = 'store, checkout_number, ticket_number, sell_date, ticket_time, transaction_type, item, purchase_price, total_cost, total_cost_wo_tax, item_qty, total_margin, num_acp, num_cde, created_dttm, load_dttm'
table_columns_select = '''
store,
checkout_number,
ticket_number,
sell_date,
ticket_time,
transaction_type,
item,
purchase_price,
sum(total_cost) AS total_cost,
sum(total_cost_wo_tax) AS total_cost_wo_tax,
sum(item_qty) AS item_qty,
sum(total_margin) AS total_margin,
num_acp,
num_cde,
min(created_dttm) AS created_dttm,
min(load_dttm) AS load_dttm
'''
table_columns_group_by = 'store, checkout_number, ticket_number, sell_date, ticket_time, transaction_type, item, purchase_price, num_acp, num_cde'
business_dttm = 'sell_date'
# ===================================================QUERY INIT====================================================
GET_MIN_MAX_DATES_SQL = """
SELECT
toDate(min({business_dttm})),
toDate(max({business_dttm}))
FROM {database_name}.{raw_table_name}
"""
COUNT_SQL = """
SELECT toString({business_dttm}) AS partition_name, count(*)
FROM {database_name}.{table_name}
WHERE {business_dttm} BETWEEN toDateTime('{start_date}') AND toDateTime('{end_date}')
GROUP BY {business_dttm}
"""
DROP_PART_SQL = """ALTER TABLE {database_name}.{ods_table_name} DROP PARTITION '{key}'"""
INSERT_SQL = """
INSERT INTO {database_name}.{ods_table_name} ({table_columns_comma_sep})
SELECT {table_columns_select}
FROM {database_name}.{raw_table_name}
WHERE {business_dttm} in toDateTime('{key}')
GROUP BY {table_columns_group_by}
"""
TRUNCATE_SQL = """TRUNCATE TABLE {database_name}.{table_name}"""
# =================================================QUERY EXECUTION=================================================
logger.info("{}, start".format(datetime.now()))
dates = client.run(GET_MIN_MAX_DATES_SQL.format(database_name=database_name,
raw_table_name=raw_table_name,
business_dttm=business_dttm))
start_date = str(dates[0][0]) + ' 00:00:00'
end_date = str(dates[0][1]) + ' 00:00:00'
logger.info("{}, start_date: {}, end_date: {}".format(datetime.now(), start_date, end_date))
ods_count = dict(client.run(COUNT_SQL.format(database_name=database_name,
table_name=ods_table_name,
business_dttm=business_dttm,
start_date=start_date,
end_date=end_date)))
raw_count = dict(client.run(COUNT_SQL.format(database_name=database_name,
table_name=raw_table_name,
business_dttm=business_dttm,
start_date=start_date,
end_date=end_date)))
keys = raw_count.copy().keys()
for key in keys:
try:
if raw_count[key] == ods_count[key]:
raw_count.pop(key)
logger.info("{}, partition {} is ok".format(datetime.now(), key))
else:
logger.info("{}, partition {} to reload raw={}, ods={}".format(datetime.now(),
key,
raw_count[key],
ods_count[key]))
except Exception as e:
logger.info("{}, partition {} is new".format(datetime.now(), key))
continue
logger.info("{}, reload {} partitions".format(datetime.now(), len(raw_count)))
logger.info("QUERY: " + INSERT_SQL.format(database_name=database_name,
ods_table_name=ods_table_name,
table_columns_comma_sep=table_columns_comma_sep,
raw_table_name=raw_table_name,
business_dttm=business_dttm,
key='',
table_columns_select=table_columns_select,
table_columns_group_by=table_columns_group_by))
for key in raw_count:
client.run(DROP_PART_SQL.format(database_name=database_name,
ods_table_name=ods_table_name,
key=key))
logger.info("{}, partition {} is dropped".format(datetime.now(), key))
client.run(INSERT_SQL.format(database_name=database_name,
ods_table_name=ods_table_name,
table_columns_comma_sep=table_columns_comma_sep,
raw_table_name=raw_table_name,
business_dttm=business_dttm,
key=key,
table_columns_select=table_columns_select,
table_columns_group_by=table_columns_group_by))
logger.info("{}, partition {} is uploaded".format(datetime.now(), key))
logger.info("{0}, truncate {1}.{2}".format(datetime.now(), database_name, raw_table_name))
client.run(TRUNCATE_SQL.format(database_name=database_name,
table_name=raw_table_name))
logger.info("{}, end".format(datetime.now()))
load_tickets = PythonOperator(
task_id="load_tickets", python_callable=tickets_load, provide_context=True, dag=load_tickets_dag
)
load_tickets |
import json
import pytest
from rest_framework import status
from sme_uniforme_apps.core.models import Uniforme
pytestmark = pytest.mark.django_db
def test_uniformes_api_get_categorias(client, uniforme_meias, uniforme_tenis):
response = client.get('/uniformes/categorias/', content_type='application/json')
result = json.loads(response.content)
esperado = [
{
'id': Uniforme.CATEGORIA_MALHARIA,
'nome': Uniforme.CATEGORIA_NOMES[Uniforme.CATEGORIA_MALHARIA],
'uniformes': [],
},
{
'id': Uniforme.CATEGORIA_CALCADO,
'nome': Uniforme.CATEGORIA_NOMES[Uniforme.CATEGORIA_CALCADO],
'uniformes': []
},
{
'id': Uniforme.CATEGORIA_KIT_VERAO,
'nome': Uniforme.CATEGORIA_NOMES[Uniforme.CATEGORIA_KIT_VERAO],
'uniformes': [{'descricao': 'Meias (5 pares)',
'id': uniforme_meias.id,
'nome': 'Meias',
'quantidade': 5,
'unidade': 'PAR'}],
},
{
'id': Uniforme.CATEGORIA_KIT_INVERNO,
'nome': Uniforme.CATEGORIA_NOMES[Uniforme.CATEGORIA_KIT_INVERNO],
'uniformes': [{'descricao': 'Tenis (1 par)',
'id': uniforme_tenis.id,
'nome': 'Tenis',
'quantidade': 1,
'unidade': 'PAR'}]
},
]
assert response.status_code == status.HTTP_200_OK
assert result == esperado
|
import AuthenticationServices
from PyObjCTools.TestSupport import TestCase, min_os_level, min_sdk_level
class TestASAuthorizationProvider(TestCase):
@min_sdk_level("10.15")
def test_protocols(self):
self.assertProtocolExists("ASAuthorizationProvider")
@min_os_level("10.15")
def test_methods10_15(self):
self.assertArgIsBlock(
AuthenticationServices.ASAuthorizationProviderExtensionAuthorizationRequest.presentAuthorizationViewControllerWithCompletion_, # noqa: B950
0,
b"vZ@",
)
|
#!/usr/bin/python
from setuptools import setup
install_requires = [i.strip() for i in open("requirements.txt").readlines()]
entry_points = """
[console_scripts]
manage = manage:manager.run
webapp = infopub.webapp:main
"""
setup(
name="cn486",
version="1.0",
url='http://www.jcing.com',
license='Private',
description="cn486 web site",
long_description=open('readme.md').read(),
author='wuxqing',
author_email='wuxqing@gmail.com',
include_package_data=True,
package_dir={'': 'src'},
install_requires=install_requires,
entry_points=entry_points,
platforms=['Linux']
)
|
import torch
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
from torchvision.models import alexnet
from torch.autograd import Variable
from torch import nn
from .config import config
# from pose.models.pose_resnet import get_pose_net
from .cpm import CPM
class SiameseAlexNet(nn.Module):
def __init__(self, gpu_id, train=True):
super(SiameseAlexNet, self).__init__()
# self.PoseNet = get_pose_net(cfg, is_train=False, flag = 4) # Change to the CPM model
self.PoseNet = CPM(1)
self.exemplar_size = (8,8)
self.instance_size = (24,24)
self.multi_instance_size = [(24,24),(22,22)]
self.features = nn.Sequential(
nn.Conv2d(3, 96, 11, 2),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.Conv2d(96, 256, 5, 1, groups=2),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.Conv2d(256, 384, 3, 1),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, 3, 1, groups=2),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True))
self.conv_pose = nn.Conv2d(2048, 384, 3, 1, padding=1)
self.conv_final = nn.Conv2d(384, 256, 3, 1, groups=2)
self.conv_cpm_pose = nn.Conv2d(128, 384, 3, 1, padding=1)
self.corr_bias = nn.Parameter(torch.zeros(1))
if train:
gt, weight = self._create_gt_mask((config.train_response_sz, config.train_response_sz),mode='train')
with torch.cuda.device(gpu_id):
self.train_gt = torch.from_numpy(gt).cuda()
self.train_weight = torch.from_numpy(weight).cuda()
gt, weight = self._create_gt_mask((config.response_sz, config.response_sz), mode='valid')
with torch.cuda.device(gpu_id):
self.valid_gt = torch.from_numpy(gt).cuda()
self.valid_weight = torch.from_numpy(weight).cuda()
self.exemplar = None
"""
Initial the models
"""
def init_models(self, pose_model_file = '/export/home/cyh/modify/openpose_model/cpm.pth', track_model_file = '/export/home/cyh/modify/SiamFC/models/siamfc_pretrained.pth'):
# Loading the Pose model
pose_model_file = cfg.TEST.MODEL_FILE if pose_model_file is None else pose_model_file
print("OpenPose: Loading checkpoint from %s" % (pose_model_file))
checkpoint=torch.load(pose_model_file)
model_dict = self.PoseNet.state_dict()
new_state_dict = OrderedDict()
# k,v represents the Key,value
for k,v in checkpoint.items():
new_name = k[7:] if 'module' in k else k
if new_name in model_dict:
new_state_dict[new_name]=v
model_dict.update(new_state_dict)
self.PoseNet.load_state_dict(model_dict)
print('OpenPose: OpenPose network has been initilized')
print('-----------------------------------------------------------')
# Loading the Pretrained track model
print("Tracking: Loading checkpoint from %s" % (pose_model_file))
checkpoint=torch.load(track_model_file)
model_dict = self.features.state_dict()
new_state_dict = OrderedDict()
for k,v in checkpoint.items():
new_name = k[9:] if 'feature' in k else k
if new_name in model_dict:
new_state_dict[new_name]=v
model_dict.update(new_state_dict)
self.features.load_state_dict(model_dict)
print('Tracking: Tracking network has been initilized')
print('-----------------------------------------------------------')
# Fix the Pose Net
for p in self.PoseNet.parameters():
p.requires_grad = False
def set_bn_fix(self):
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.PoseNet.apply(set_bn_eval)
def forward(self, x =(None, None), y = (None,None), feature = None):
exemplar, instance = x # Tracking input
exemplar_pose, instance_pose = y # Pose input
if feature is None:
if exemplar is not None and instance is not None:
batch_size = exemplar.shape[0]
exemplar_pose_feature = self.conv_cpm_pose(self.PoseNet(exemplar_pose))
instance_pose_feature = self.conv_cpm_pose(self.PoseNet(instance_pose))
#print('the exampeler pose feature is {}, the instance pose feature is {}'.format(exemplar_pose_feature.shape, instance_pose_feature.shape))
exemplar_pose_feature = F.upsample(exemplar_pose_feature, size= self.exemplar_size, mode='bilinear')
instance_pose_feature = F.upsample(instance_pose_feature, size= self.instance_size, mode='bilinear')
#print(exemplar_pose_feature.shape, instance_pose_feature.shape)
temp_exemplar = self.features(exemplar)
temp_instance = self.features(instance)
#print('The exemplar featrue is {}, the instance feature is {}'.format(temp_exemplar.shape, temp_instance.shape))
instance = self.conv_final(temp_instance + instance_pose_feature)
exemplar = self.conv_final(temp_exemplar + exemplar_pose_feature)
# exemplar = self.conv_final(self.features(exemplar) + self.conv_pose(exemplar_pose_feature))
# instance = self.conv_final(self.features(instance) + self.conv_pose(instance_pose_feature))
score_map = []
N, C, H, W = instance.shape
#print(instance.shape)
if N > 1:
for i in range(N):
score = F.conv2d(instance[i:i+1], exemplar[i:i+1]) * config.response_scale + self.corr_bias
score_map.append(score)
return torch.cat(score_map, dim=0)
else:
return F.conv2d(instance, exemplar) * config.response_scale + self.bias
elif exemplar is not None and instance is None:
exemplar_pose_feature = self.conv_cpm_pose(self.PoseNet(exemplar_pose))
exemplar_pose_feature = F.upsample(exemplar_pose_feature, size= self.exemplar_size, mode='bilinear')
exemplar = self.conv_final(self.features(exemplar) + exemplar_pose_feature)
# inference used
#self.exemplar = self.features(exemplar)
return exemplar
else:
# inference used we don't need to scale the reponse or add bias
instance_pose_feature = self.conv_cpm_pose(self.PoseNet(instance_pose))
instance_pose_feature = F.upsample(instance_pose_feature, size= self.instance_size, mode='bilinear')
instance = self.conv_final(self.features(instance) + instance_pose_feature)
score_map = []
for i in range(instance.shape[0]):
score_map.append(F.conv2d(instance[i:i+1], self.exemplar))
return torch.cat(score_map, dim=0)
else:
self.exemplar = feature
N, C, H, W = instance.shape
#print(H,W)
if H == 255:
instance_flag = 0
elif H == 239:
instance_flag = 1
instance_pose_feature = self.conv_cpm_pose(self.PoseNet(instance_pose))
self.instance_size
instance_pose_feature = F.upsample(instance_pose_feature, size= self.multi_instance_size[instance_flag], mode='bilinear')
#print(instance_pose_feature.shape)
instance = self.conv_final(self.features(instance) + instance_pose_feature)
score_map = []
for i in range(instance.shape[0]):
score_map.append(F.conv2d(instance[i:i+1], self.exemplar))
return torch.cat(score_map, dim=0)
def loss(self, pred):
return F.binary_cross_entropy_with_logits(pred, self.gt)
def weighted_loss(self, pred):
if self.training:
#print(pred.shape,self.train_gt.shape)
return F.binary_cross_entropy_with_logits(pred, self.train_gt,
self.train_weight, size_average = False) / config.train_batch_size # normalize the batch_size
else:
#print(pred.shape, self.valid_gt.shape, self.valid_weight.shape)
return F.binary_cross_entropy_with_logits(pred, self.valid_gt,
self.valid_weight, size_average = False) / config.valid_batch_size # normalize the batch_size
def _create_gt_mask(self, shape, mode='train'):
# same for all pairs
h, w = shape
y = np.arange(h, dtype=np.float32) - (h-1) / 2.
x = np.arange(w, dtype=np.float32) - (w-1) / 2.
y, x = np.meshgrid(y, x)
dist = np.sqrt(x**2 + y**2)
mask = np.zeros((h, w))
mask[dist <= config.radius / config.total_stride] = 1
mask = mask[np.newaxis, :, :]
weights = np.ones_like(mask)
weights[mask == 1] = 0.5 / np.sum(mask == 1)
weights[mask == 0] = 0.5 / np.sum(mask == 0)
if mode == 'train':
mask = np.repeat(mask, config.train_batch_size, axis=0)[:, np.newaxis, :, :]
elif mode == 'valid':
mask = np.repeat(mask, config.valid_batch_size, axis=0)[:, np.newaxis, :, :]
return mask.astype(np.float32), weights.astype(np.float32)
|
import sys
import csv
from collections import defaultdict
def main(non_canonical_final_table, final_table, SJ_introns, reads_tags):
csv.field_size_limit(1000000000)
reader1 = csv.reader(open(non_canonical_final_table), delimiter = ' ')
reader2 = csv.reader(open(final_table), delimiter = ' ')
reader3 = csv.reader(open(SJ_introns), delimiter = ' ')
reader4 = csv.reader(open(reads_tags), delimiter = ' ')
Total_intron_info = {}
non_can_reads_seq = {}
introns_reads_tags = defaultdict(set)
introns_seq_tags = defaultdict(set)
data_set_introns = set([])
for row in reader1:
intron = row[0]
chr = intron.split(":")[0]
strand = ""
if "+" in intron:
strand = "+"
elif "-" in intron:
strand = "-"
istart = intron.split(":")[1].split(strand)[0]
iend = intron.split(":")[1].split(strand)[1]
ilength = row[5]
dn = row[6]
dn_type = row[7]
dn_score = row[8]
Total_intron_info[intron] = (intron, chr, strand, istart, iend, ilength, dn, dn_type, dn_score)
for row in reader3:
read = row[0]
chr = row[1]
istart = int(row[2])
iend = int(row[3])
strand = row[4]
ilen = int(row[5])
intron = row[6]
dn = row[7]
start = int(row[8])
cigar = row[9]
e5s = int(row[10])
e5e = int(row[11])
e3s = int(row[12])
e3e = int(row[13])
seq = row[14]
end = e3e
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
non_can_reads_seq[read] = seq
for row in reader4:
read = row[0]
seq = row[1]
qual = row[2]
intron = row[3]
up_anchor = int(row[4])
down_anchor = int(row[5])
introns_reads_tags[intron].add(read)
introns_seq_tags[intron].add(seq)
for row in reader2:
intron = row[0]
coverage = row[1]
chr = row[2]
strand = row[3]
istart = row[4]
iend = row[5]
ilength = row[6]
dn = row[7]
dn_type = row[8]
dn_score = row[9]
reads = set(row[10].split(","))
data_set_introns.add(intron)
if dn=="GTAG" or dn=="GCAG" or dn=="ATAC":
print " ".join(row)
elif introns_reads_tags.has_key(intron): #Esto es para los que ya estaban en la tabla final, pero se aumento el numero de reads encontrados
seq_coverage = set([])
for read in reads:
seq = non_can_reads_seq[read]
seq_coverage.add(seq)
reads_tags = introns_reads_tags[intron]
seq_tags = introns_seq_tags[intron]
reads = reads | reads_tags
seq_coverage = seq_coverage | seq_tags
coverage = len(seq_coverage)
print intron, coverage, chr, strand, istart, iend, ilength, dn, dn_type, dn_score, ",".join(reads)
else:
print " ".join(row)
for row in introns_reads_tags.items():
intron = row[0]
reads = row[1]
seq_coverage = introns_seq_tags[intron]
if intron in data_set_introns:
pass
else:
coverage = len(seq_coverage)
chr = Total_intron_info[intron][1]
strand = Total_intron_info[intron][2]
istart = Total_intron_info[intron][3]
iend = Total_intron_info[intron][4]
ilength = Total_intron_info[intron][5]
dn = Total_intron_info[intron][6]
dn_type = Total_intron_info[intron][7]
dn_score = Total_intron_info[intron][8]
print intron, coverage, chr, strand, istart, iend, ilength, dn, dn_type, dn_score, ",".join(reads)
if __name__ == '__main__':
main(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
|
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
from collections import namedtuple
os.system("")
plt.rcdefaults()
filename = 'benchmarks-iterators-{}.png'
def get_cpu_caches(bench_file):
cpu_caches = ['L0', 'L1', 'L2', 'L3', 'L4']
benchmark_names = '\n'.join(row[0].strip() for row in bench_file if len(row) == 1 and
any(cache in row[0] for cache in cpu_caches))
index_row_name = None
for i in range(len(bench_file)):
if bench_file[i][0] == 'name':
index_row_name = i
return benchmark_names, index_row_name
def get_benchmark_names_and_time(bench_file, name_idx):
records = []
bench_unit = bench_file[name_idx][4]
BenchmarkRecord = namedtuple('BenchmarkRecord', 'name real_time')
for row in bench_file[name_idx:]:
records.append(BenchmarkRecord(row[0], float(row[2])))
return records, bench_unit
def make_table(benchmark_records, unit, cxx_version, n_iterations, title_iterations):
fig_border = 'steelblue'
benchmark_records = sorted(benchmark_records, key=lambda e: e.real_time)
fastest = benchmark_records[0].real_time
data = [[record.name, record.real_time, round(record.real_time / fastest, 2)] for record in benchmark_records]
column_headers = ["Time", "Relative speed"]
row_headers = [x.pop(0) for x in data]
cell_text = []
for row in data:
cell_text.append([f'{round(row[0] / n_iterations, 4)} ns', f'{row[1]} x'])
row_colors = plt.cm.BuPu(np.full(len(row_headers), 0.1))
col_colors = plt.cm.BuPu(np.full(len(column_headers), 0.1))
plt.figure(linewidth=2,
edgecolor=fig_border)
the_table = plt.table(cellText=cell_text,
rowLabels=row_headers,
rowColours=row_colors,
colColours=col_colors,
cellLoc='center',
colLabels=column_headers,
loc='center')
the_table.scale(1.5, 1.5)
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.box(on=None)
plt.figtext(.90, 0.05, None, horizontalalignment='center', size=8, weight='light')
fig = plt.gcf()
plt.savefig(f'benchmarks-iterators-{cxx_version}.png',
bbox_inches='tight',
edgecolor=fig.get_edgecolor(),
facecolor=fig.get_facecolor(),
dpi=200)
plt.clf()
def get_cmake_cxx_version():
cxx_version = "C++"
cmake_cxx_string = 'CMAKE_CXX_STANDARD'
cmake_file = open('CMakeLists.txt', 'r')
for idx, line in enumerate(cmake_file):
cmake_cxx_string_index = line.find(cmake_cxx_string)
if cmake_cxx_string_index == -1:
continue
reading_offset = len(cmake_cxx_string) + cmake_cxx_string_index + 1
cxx_number = line[reading_offset: line.index(')', reading_offset)]
cxx_version += cxx_number
break
return cxx_version
def main():
benchmark_file_path = os.path.join(os.getcwd(), 'cmake-build-release-mingw', 'benchmark-iterators.csv')
cxx_version = get_cmake_cxx_version()
with open(benchmark_file_path) as benchmarks_file:
benchmark_csv_list = list(csv.reader(benchmarks_file, delimiter=','))
caches, name_index = get_cpu_caches(benchmark_csv_list)
bench_records, unit = get_benchmark_names_and_time(benchmark_csv_list, name_index + 1)
iterations = 32
title_iterations = 1
make_table(bench_records, unit, cxx_version, iterations, title_iterations)
green = '\033[32m'
print(f'{green}Successfully created {filename.format(cxx_version)}')
if __name__ == '__main__':
main()
|
# Count the number of even and odd numbers from a series of numbers
numbers = (1, 2, 3, 4, 5, 6, 7, 8, 9)
#initialise the variables (to count the number of even and odd numbers) to zero
count_odd = 0
count_even = 0
#An if-else loop nested in a for loop to count the number of even and odd numbers
for x in numbers:
if not x % 2:
count_even+=1
else:
count_odd+=1
#print the number of even and odd numbers
print("Number of even numbers :",count_even)
print("Number of odd numbers :",count_odd)
|
#!/usr/bin/python3
import json
def save_to_json_file(my_obj, filename):
"""writes an object to a text file in JSON representation"""
if not isinstance(filename, str):
raise TypeError("filename must be a string")
with open(filename, mode="w", encoding="utf-8") as writeFile:
json.dump(my_obj, writeFile)
|
"""
Gene Analysis project
"""
# import file_in
# import modify_dataframes
# import Visualization.heatmap as heatmap
# import Visualization.volcano as volcano
# import matplotlib.pyplot as plt
# import os
# import ntpath
# def make_a_heatmap():
# dfs = list()
# paths = [os.path.join("/Users/coltongarelli/Dropbox/toptables/scle_toptable_abs-logfc-1.csv"),
# os.path.join("/Users/coltongarelli/Dropbox/toptables/acle_toptable_abs-logfc-1.csv"),
# os.path.join("/Users/coltongarelli/Dropbox/toptables/dle_toptable_abs-logfc-1.csv")]
#
# for i in paths:
# cols = file_in.check_col_names(i)
# df = file_in.read_csv_data(i, cols)
# # df = file_in.make_unique_index(df)
# # drop duplicate values
# df = modify_dataframes.remove_duplicate_indicies(df)
# # remove unneeded data
# df = file_in.strip_data(df, cols.copy())
# dfs.append(df)
# counter = 0
#
# for i in dfs:
# new_name = 'logfc_' + str(counter)
# # give columns a unique name
# i.rename(columns={cols['logfc']: new_name}, inplace=True)
# counter += 1
# # pop pvalue
# modify_dataframes.drop_cols(i, cols['pval'])
# final_df = modify_dataframes.find_common_genes(dfs)
# hm = heatmap.sns_clustermap(df=final_df)
# # heatmap.sns_heatmap(final_df)
# hm.fig.show()
#
# def volcano_plotly():
# paths = [os.path.join("/Users/coltongarelli/Dropbox/toptables/scle_toptable_abs-logfc-1.csv")]
# for i in paths:
# cols = file_in.check_col_names(i)
# df = file_in.read_csv_data(i, cols)
# # df = file_in.make_unique_index(df)
# # drop duplicate values
# df = modify_dataframes.remove_duplicate_indicies(df)
# # remove unneeded data
# df = file_in.strip_data(df, cols.copy())
#
# volcano.plotly_volcano(df, cols['pval'])
if __name__ == '__main__':
import os
import pprint
pprint.pprint(os.getenv('PATH'))
pprint.pprint(os.environ.values())
# '/Library/Frameworks/Python.framework/Versions/3.7/bin:/Users/coltongarelli/anaconda3/envs/GeneAnalysis/bin:/Users/coltongarelli/anaconda3/condabin:/Library/Frameworks/Python.framework/Versions/3.6/bin:/Library/Frameworks/Python.framework/Versions/3.6/bin:/Library/Frameworks/Python.framework/Versions/3.6/bin:/Users/coltongarelli/anaconda3/bin:/Library/Frameworks/Python.framework/Versions/3.5/bin:/Library/Frameworks/Python.framework/Versions/3.5/bin:/Library/Frameworks/Python.framework/Versions/3.5/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin'
|
def insertion_sort(data):
n = len(data)
for i in range(1, n):
item = data[i]
j = i - 1
while j>=0 and data[j] < item:
# temp = data[j]
# data[j] = data[j + 1]
# data[j + 1] = temp
[data[j], data[j + 1]] = [data[j +1], data[j]]
j -= 1
data_list = [10, 50, 40, 60, 20]
insertion_sort(data_list)
print(data_list) |
import json
def add(event, context):
inputDoc = json.loads(event['body'])
sum = str(inputDoc['value1'] + inputDoc['value2'])
result = json.dumps("{'sum':" + sum + "}")
return { "statusCode": 200, "body": result }
|
from unittest import TestCase
from nose.plugins.attrib import attr
from shiftschema.result import Error, Result
from shiftschema import exceptions as x
from pprint import pprint as pp
@attr('result', 'result')
class ResultTest(TestCase):
# --------------------------------------------------------------------------
# basics
# --------------------------------------------------------------------------
def test_create_result(self):
""" Creating result object """
msgs = {'what?': 'error'}
result = Result(msgs)
self.assertIsInstance(result, Result)
self.assertEquals(msgs, result.errors)
def test_empty_result_is_true(self):
""" result without errors evaluates to True"""
result = Result()
self.assertTrue(result)
self.assertTrue(result == True)
self.assertTrue(result != False)
def test_result_with_errors_is_false(self):
""" Result with errors evaluates to False """
result = Result({'what': 'error'})
self.assertFalse(result)
self.assertTrue(result == False)
self.assertTrue(result != True)
# --------------------------------------------------------------------------
# state validation errors errors
# --------------------------------------------------------------------------
def test_raise_on_adding_bad_state_errors(self):
""" Raise on adding bad state errors """
result = Result()
with self.assertRaises(x.InvalidErrorType):
result.add_state_errors('Err')
with self.assertRaises(x.InvalidErrorType):
result.add_state_errors(['Err', 'Err'])
def test_add_single_state_error(self):
""" Adding state errors to result """
error = Error('error1')
result = Result()
result.add_state_errors(error)
self.assertEqual(1, len(result.errors['__state__']))
self.assertIn(error, result.errors['__state__'])
def test_add_multiple_state_errors(self):
""" Adding multiple state errors to result """
errors = [Error('error1'), Error('error2')]
result = Result()
result.add_state_errors(errors)
self.assertEqual(2, len(result.errors['__state__']))
def test_append_state_errors(self):
""" Appending state errors """
e1 = Error('error 1')
e2 = Error('error 2')
e3 = Error('error 3')
e4 = Error('error 4')
result = Result()
result.add_state_errors([e1, e2])
result.add_state_errors([e3, e4])
self.assertIn(e1, result.errors['__state__'])
self.assertIn(e2, result.errors['__state__'])
self.assertIn(e3, result.errors['__state__'])
self.assertIn(e4, result.errors['__state__'])
# --------------------------------------------------------------------------
# simple property errors
# --------------------------------------------------------------------------
def test_raise_on_adding_bad_errors(self):
""" Errors are type-checked before adding to result """
result = Result()
with self.assertRaises(x.InvalidErrorType):
result.add_errors(errors='Err', property_name='prop')
with self.assertRaises(x.InvalidErrorType):
result.add_errors(errors=['Err', 'Err'], property_name='prop')
def test_add_single_error(self):
""" Adding single error to Result """
result = Result()
result.add_errors(errors=Error('message'), property_name='property')
self.assertTrue('property' in result.errors)
def test_add_multiple_errors(self):
""" Adding multiple errors to result """
errors = [Error('error1'), Error('error2')]
result = Result()
result.add_errors('property', errors)
self.assertEqual(2, len(result.errors['property']))
def test_append_simple_property_errors(self):
""" Appending simple property errors """
e1 = Error('error 1')
e2 = Error('error 2')
e3 = Error('error 3')
e4 = Error('error 4')
result = Result()
result.add_errors('prop', [e1, e2])
result.add_errors('prop', [e3, e4])
self.assertIn(e1, result.errors['prop'])
self.assertIn(e2, result.errors['prop'])
self.assertIn(e3, result.errors['prop'])
self.assertIn(e4, result.errors['prop'])
# --------------------------------------------------------------------------
# nested entity property errors
# --------------------------------------------------------------------------
def test_add_single_direct_error_to_nested_entity_errors(self):
""" Adding single direct error to nested entity errors """
error = Error('Direct entity error')
result = Result()
result.add_entity_errors('entity', direct_errors=error)
self.assertIn('entity', result.errors)
self.assertIn(error, result.errors['entity']['direct'])
def test_add_multiple_direct_errors_to_nested_entity_errors(self):
""" Adding multiple direct errors to nested entity errors """
error1 = Error('Direct entity error 1')
error2 = Error('Direct entity error 2')
result = Result()
result.add_entity_errors('entity', direct_errors=[error1, error2])
self.assertIn('entity', result.errors)
self.assertIn(error1, result.errors['entity']['direct'])
self.assertIn(error2, result.errors['entity']['direct'])
def test_raise_on_adding_bad_direct_errors_to_nested_entity_errors(self):
""" Typecheck direct entity errors """
result = Result()
with self.assertRaises(x.InvalidErrorType):
result.add_entity_errors('entity', direct_errors='Bad')
with self.assertRaises(x.InvalidErrorType):
result.add_entity_errors('entity', direct_errors=['Bad'])
def test_append_direct_errors_to_nested_entity_errors(self):
""" Appending direct errors to nested entity erorrs """
e1 = Error('error 1')
e2 = Error('error 2')
e3 = Error('error 3')
e4 = Error('error 4')
result = Result()
result.add_entity_errors('entity_property', direct_errors=[e1, e2])
result.add_entity_errors('entity_property', direct_errors=[e3, e4])
self.assertIn(e1, result.errors['entity_property']['direct'])
self.assertIn(e2, result.errors['entity_property']['direct'])
self.assertIn(e3, result.errors['entity_property']['direct'])
self.assertIn(e4, result.errors['entity_property']['direct'])
def test_add_schema_errors_to_nested_entity_property(self):
""" Adding schema results to nested entity property"""
e1 = Error('error 1')
schema_errors = Result()
schema_errors.add_errors('simple_prop', e1)
result = Result()
result.add_entity_errors('entity_prop', schema_errors=schema_errors)
self.assertIn(e1, result.errors['entity_prop']['schema']['simple_prop'])
def test_append_schema_errors_to_nested_entity_erors(self):
""" Appending schema erros to nested entity erorrs """
e1 = Error('error 1')
e2 = Error('error 2')
e3 = Error('error 3')
e4 = Error('error 4')
schema_errors1 = Result()
schema_errors1.add_errors('simple_prop', [e1, e2])
schema_errors2 = Result()
schema_errors2.add_errors('simple_prop', [e3, e4])
result = Result()
result.add_entity_errors('entity_prop', schema_errors=schema_errors1)
result.add_entity_errors('entity_prop', schema_errors=schema_errors2)
self.assertIn(e1, result.errors['entity_prop']['schema']['simple_prop'])
self.assertIn(e2, result.errors['entity_prop']['schema']['simple_prop'])
self.assertIn(e3, result.errors['entity_prop']['schema']['simple_prop'])
self.assertIn(e4, result.errors['entity_prop']['schema']['simple_prop'])
def test_result_is_valid_when_nested_entity_valid(self):
""" If nested entity is valid, result also valid """
schema_errors = Result()
result = Result()
result.add_entity_errors('entity', schema_errors=schema_errors)
self.assertTrue(result)
# --------------------------------------------------------------------------
# collection property errors
# --------------------------------------------------------------------------
def test_add_single_direct_error_to_nested_collection_errors(self):
""" Adding single direct error to nested collection errors"""
e1 = Error('error 1')
result = Result()
result.add_collection_errors('collection_prop', direct_errors=e1)
self.assertIn(e1, result.errors['collection_prop']['direct'])
def test_add_multiple_direct_errors_to_nested_collection_errors(self):
""" Adding multiple direct error to nested collection errors"""
e1 = Error('error 1')
e2 = Error('error 2')
result = Result()
result.add_collection_errors('collection_prop', direct_errors=[e1, e2])
self.assertIn(e1, result.errors['collection_prop']['direct'])
self.assertIn(e2, result.errors['collection_prop']['direct'])
def test_raise_on_adding_bad_direct_errs_to_nested_collection_errors(self):
""" Typecheck direct collection errors """
result = Result()
with self.assertRaises(x.InvalidErrorType):
result.add_collection_errors('collection', direct_errors='Bad')
with self.assertRaises(x.InvalidErrorType):
result.add_entity_errors('collection', direct_errors=['Bad'])
def test_append_direct_errors_to_nested_collection_errors(self):
""" Appending direct errors to nested collection errors """
e1 = Error('error 1')
e2 = Error('error 2')
e3 = Error('error 3')
e4 = Error('error 4')
result = Result()
result.add_collection_errors('collection_prop', direct_errors=[e1, e2])
result.add_collection_errors('collection_prop', direct_errors=[e3, e4])
self.assertIn(e1, result.errors['collection_prop']['direct'])
self.assertIn(e2, result.errors['collection_prop']['direct'])
self.assertIn(e3, result.errors['collection_prop']['direct'])
self.assertIn(e4, result.errors['collection_prop']['direct'])
def test_add_collection_errors_to_nested_collection_errors(self):
""" Adding collection errors """
e1 = Error('error 1')
e2 = Error('error 2')
collection_errors = [
Result(errors=dict(simple=[e1])),
Result(),
Result(errors=dict(simple=[e2])),
Result()
]
result = Result()
result.add_collection_errors(
'collection_prop',
collection_errors=collection_errors
)
errors = result.errors['collection_prop']['collection']
self.assertIn(e1, errors[0].errors['simple'])
self.assertIn(e2, errors[2].errors['simple'])
def test_append_collection_errors_to_nested_collection_errors(self):
""" Appending direct errors to nested collection errors """
result = Result()
result.add_collection_errors('collection_prop', collection_errors=[
Result(errors={'simple': [Error('Simple 0-1')]}),
Result(),
Result(),
Result(errors={'simple': [Error('Simple 3-1')]}),
])
result.add_collection_errors('collection_prop', collection_errors=[
Result(errors={'simple': [Error('Simple 0-2')]}),
Result(),
Result(),
Result(errors={'simple': [Error('Simple 3-2')]}),
Result(errors={'simple': [Error('Simple 4')]}),
])
# assert missing added
err = result.errors['collection_prop']['collection']
self.assertIn(4, err)
# assert existing merged
self.assertEquals(2, len(err[0]['simple']))
self.assertEquals(2, len(err[3]['simple']))
def test_result_valid_when_nested_collection_valid(self):
""" Do not create collection property on result if collection valid"""
result = Result()
result.add_collection_errors(
'collection_prop',
collection_errors=[Result(), Result()]
)
result2 = Result()
result2.add_collection_errors(
'collection_prop',
collection_errors=[Result(), Result()]
)
result.merge(result2)
self.assertTrue(result)
# --------------------------------------------------------------------------
# merging results
# --------------------------------------------------------------------------
def test_raise_on_merging_incompatible_results(self):
""" Raise on merging incompatible results """
result1 = Result()
result1.add_errors('property', [Error('Some Error')])
result2 = Result()
result2.add_entity_errors('property', direct_errors=[Error('Error')])
with self.assertRaises(x.UnableToMergeResultsType):
result1.merge(result2)
def test_raise_on_merging_collection_into_entity(self):
""" Raise on merging collection into entity"""
result1 = Result()
result1.errors = dict(prop=dict(schema=dict()))
result2 = Result()
result2.errors = dict(prop=dict(collection=dict()))
with self.assertRaises(x.UnableToMergeResultsType):
result1.merge(result2)
def test_raise_on_merging_entity_into_collection(self):
""" Raise on mergin entity into collection"""
result1 = Result()
result1.errors = dict(prop=dict(collection=dict()))
result2 = Result()
result2.errors = dict(prop=dict(schema=dict()))
with self.assertRaises(x.UnableToMergeResultsType):
result1.merge(result2)
def test_merging_nested_results(self):
""" Merging nested results"""
"""
Result 1
"""
result1 = Result()
# simple
result1.add_errors('simple1', [
Error('Res1 Simple prop1 err 1'),
Error('Res1 Simple prop1 err 2'),
])
# state
result1.add_state_errors([
Error('Res1 State 2'),
Error('Res1 State 1')
])
# entity direct
result1.add_entity_errors('nested_entity1', direct_errors=[
Error('Res1 Entity prop1 direct err 1'),
Error('Res1 Entity prop1 direct err 2'),
])
# entity nested schemas
nested1_1 = Result()
nested1_1.add_errors('nested_simple_1', [
Error('Res1 Nested1 Simple1'),
Error('Res1 Nested1 Simple2'),
])
nested1_2 = Result()
nested1_2.add_errors('deeper_nested_simple_1', [
Error('Res1 Nested1 Simple1'),
Error('Res1 Nested1 Simple2'),
])
nested1_1.add_entity_errors('deeper', schema_errors=nested1_2)
result1.add_entity_errors('nested_entity1', schema_errors=nested1_1)
# collection direct
result1.add_entity_errors('nested_collection1', direct_errors=[
Error('Res2 Collection prop1 direct err 1'),
Error('Res2 Collection prop1 direct err 2'),
])
# collection nested schemas
result1.add_collection_errors('nested_collection1', collection_errors=[
Result({'simple1': Error('Simple Error')}),
Result(),
Result({'simple2': Error('Another simple Error')}),
Result()
])
"""
Result 2
"""
result2 = Result()
# simple
result2.add_errors('simple1', [
Error('Res2 Simple prop1 err 3'),
Error('Res2 Simple prop1 err 4'),
])
result2.add_errors('simple2', [
Error('Res2 Simple prop2 err 1'),
Error('Res2 Simple prop2 err 2'),
])
# state
result2.add_state_errors([
Error('Res2 State 1'),
Error('Res2 State 2')
])
# entity direct
result2.add_entity_errors('nested_entity1', direct_errors=[
Error('Res2 Entity prop1 direct err 3'),
Error('Res2 Entity prop1 direct err 4'),
])
result2.add_entity_errors('nested_entity2', direct_errors=[
Error('Res2 Entity prop2 direct err 1'),
Error('Res2 Entity prop2 direct err 2'),
])
# entity nested schemas
nested2_1 = Result()
nested2_1.add_errors('nested_simple_1', [
Error('Res2 Nested1 Simple1'),
Error('Res2 Nested1 Simple2'),
])
nested2_2 = Result()
nested2_2.add_errors('deeper_nested_simple_2', [
Error('Res2 Nested1 Simple1'),
Error('Res2 Nested1 Simple2'),
])
nested2_1.add_entity_errors('deeper', schema_errors=nested2_2)
result2.add_entity_errors('nested_entity1', schema_errors=nested2_1)
# collection direct
result2.add_entity_errors('nested_collection1', direct_errors=[
Error('Res2 Collection prop1 direct err 3'),
Error('Res2 Collection prop1 direct err 4'),
])
# collection nested schemas
result2.add_collection_errors('nested_collection1', collection_errors=[
Result({'simple3': Error('And another simple Error')}),
Result(),
Result(),
Result(),
Result({'simple4': Error('Another simple Error fom result2')}),
])
result2.add_collection_errors('nested_collection2', collection_errors=[
Result({'simple1': Error('Simple Error')}),
Result({'simple2': Error('Another simple Error')}),
])
# now merge
result1.merge(result2)
err = result1.errors
# assert simple merged
self.assertEqual(4, len(err['simple1']))
self.assertEqual(2, len(err['simple2']))
# assert state merged
self.assertEquals(4, len(err['__state__']))
# assert direct errors merged
self.assertEquals(4, len(err['nested_entity1']['direct']))
self.assertEquals(2, len(err['nested_entity2']['direct']))
# assert nested entities merged
self.assertEquals(
4,
len(err['nested_entity1']['schema']['nested_simple_1'])
)
# assert deeper nested entities merged
self.assertEquals(
4,
len(err['nested_entity1']['schema']['nested_simple_1'])
)
# assert merged recursively
self.assertIn(
'deeper_nested_simple_2',
err['nested_entity1']['schema']['deeper']['schema']
)
self.assertIn(
'deeper_nested_simple_1',
err['nested_entity1']['schema']['deeper']['schema']
)
# assert nested collections direct merged
self.assertEquals(4, len(err['nested_collection1']['direct']))
# assert missing added
self.assertIn('nested_collection2', err)
# assert missing collection members added
self.assertIn(4, err['nested_collection1']['collection'])
# assert errors merged recursively for existing collection members
self.assertEquals(2, len(err['nested_collection1']['collection'][0]))
# --------------------------------------------------------------------------
# translating and formatting results
# --------------------------------------------------------------------------
def test_translate_messages(self):
""" Translating nested result with arbitrary translator"""
t = 'ZZZ'
def translator(input):
return t + input
result = Result()
# state errors
state = [Error('state1'), Error('state2')]
result.add_state_errors(state)
# properties
result.add_errors('simple_prop', [
Error('Simple prop error 1'),
Error('Simple prop error 2'),
])
# entities direct
result.add_entity_errors('entity_prop', direct_errors=[
Error('Entity direct error 1'),
Error('Entity direct error 2')
])
# entity schema
schema_result = Result()
schema_result.add_errors('nested_prop', [
Error('Nested prop error 1'),
Error('Nested prop error 2'),
])
result.add_entity_errors('entity_prop', schema_errors=schema_result)
# collection direct
result.add_collection_errors('collection_prop', direct_errors=[
Error('Collection direct error 1'),
Error('Collection direct error 2')
])
# collections schema
collection_item = Result()
collection_item.add_errors('simple_nested', [
Error('Collection item error 1'),
Error('Collection item error 2'),
])
result.add_collection_errors('collection_prop', collection_errors=[
Result(),
collection_item,
Result(),
])
err = result._translate_errors(result.errors, translator)
# assert state translated
self.assertEquals(t + 'state1', err['__state__'][0])
self.assertEquals(t + 'state2', err['__state__'][1])
# assert simple props translated
self.assertEquals(t + 'Simple prop error 1', err['simple_prop'][0])
self.assertEquals(t + 'Simple prop error 2', err['simple_prop'][1])
# assert entity direct translated
self.assertEquals(
t + 'Entity direct error 1',
err['entity_prop']['direct'][0]
)
self.assertEquals(
t + 'Entity direct error 2',
err['entity_prop']['direct'][1]
)
# assert nested schemas translated
self.assertEquals(
t + 'Nested prop error 1',
err['entity_prop']['schema']['nested_prop'][0]
)
self.assertEquals(
t + 'Nested prop error 2',
err['entity_prop']['schema']['nested_prop'][1]
)
# assert collection direct translated
self.assertEquals(
t + 'Collection direct error 1',
err['collection_prop']['direct'][0]
)
self.assertEquals(
t + 'Collection direct error 2',
err['collection_prop']['direct'][1]
)
# assert collection items translated with schema
self.assertEquals(
t + 'Collection item error 1',
err['collection_prop']['collection'][1]['simple_nested'][0]
)
self.assertEquals(
t + 'Collection item error 2',
err['collection_prop']['collection'][1]['simple_nested'][1]
)
def test_formatting_messages(self):
""" Error messages formatted with parameters (if any) """
result = Result()
no = 'Me has no params'
no_params = None
self.assertEqual(no, result.format_error(no, no_params))
positional = 'I have positionals: one {} two {} and {}'
positional_params = [1, 2, 3]
self.assertEqual(
positional.format(*positional_params),
result.format_error(positional, positional_params)
)
named = 'I have named params {one} and {two}'
named_params = dict(one='FIRST', two='SECOND')
self.assertEqual(
named.format(**named_params),
result.format_error(named, named_params)
)
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 9:22
# @Author : zls
# @File : 120ask_spider.py
from pyquery import PyQuery as pq
from openpyxl import Workbook
import requests
headers = {'Content-Encoding': 'gzip',
'Content-Type': 'text/html; charset=UTF-8',
'Date': 'Wed, 28 Oct 2020 01:22:31 GMT',
'Vary': 'Accept-Encoding',
'X-Cache': 'bypass',
'X-Via-JSL': '16d00f5,-',
'Yii-server': '181',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'}
def get_html(url):
n = 0
try:
response = requests.get(url, headers=headers, timeout=3)
response.encoding = response.apparent_encoding
html = response.text
return html
except ConnectionError:
n += 1
if n < 4:
print('连接超时,正在重新爬取数据......')
return get_html(url)
else:
return None
def get_url():
# 中西药品:https://yp.120ask.com/search/0-0-1--0-0-0-0.html
zhongxi = []
for i in range(1, 101):
url = f'https://yp.120ask.com/search/0-0-{i}--0-0-0-0.html'
zhongxi.append(url)
# 家庭常备:https://yp.120ask.com/search/36-0-99--0-0-0-0.html
jiating = []
for i in range(1, 101):
url = f'https://yp.120ask.com/search/36-0-{i}--0-0-0-0.html'
jiating.append(url)
# 男科用药:https://yp.120ask.com/search/9-0-1--0-0-0-0.html
man = []
for i in range(1, 101):
url = f'https://yp.120ask.com/search/9-0-{i}--0-0-0-0.html'
man.append(url)
# 母婴用药:https://yp.120ask.com/search/11-0-1--0-0-0-0.html
mother = []
for i in range(1, 101):
url = f'https://yp.120ask.com/search/11-0-{i}--0-0-0-0.html'
mother.append(url)
# 中老年用药:https://yp.120ask.com/search/12-0-1--0-0-0-0.html
old = []
for i in range(1, 101):
url = f'https://yp.120ask.com/search/12-0-{i}--0-0-0-0.html'
old.append(url)
return zhongxi, jiating, man, mother, old
# 查找每个具体药品的url
def search(row, url, titleName, currentSheet):
t = [] # title
c = [] # the content of the title
print(url)
html = get_html(url)
if html:
doc = pq(html)
urls1 = doc('.Sort-list.Drug-store ul li div i a').items()
for detail in urls1:
manual = detail.attr('href').replace('detail', 'manual')
detail_url = 'https://yp.120ask.com' + manual
# print(detail_url)
html = get_html(detail_url)
if html:
doc = pq(html)
title = doc('.cont-Drug-details .cont-2.tab-dm-2 .table .tabrow .td').items()
content = doc('.cont-Drug-details .cont-2.tab-dm-2 .table .tabrow .td-details').items()
for tit in title:
# print(tit.text())
t.append(tit.text())
if tit.text() not in titleName:
titleName.append(tit.text())
for con in content:
# print(con.text())
c.append(con.text())
dic = dict(zip(t, c)).items()
print(dic)
# print(titleName)
for k, v in dic:
currentSheet.cell(row=row, column=titleName.index(k) + 1, value=v)
print(f'已爬取{row - 1}条')
row = row + 1
return row
def save_file(wb, currentSheet, file_name, url_list):
titleName = []
row = 2
for page_url in url_list: # page_url为每一页的url
row = search(row, page_url, titleName, currentSheet)
for i in range(1, len(titleName) + 1):
currentSheet.cell(row=1, column=i, value=titleName[i - 1])
print(titleName)
wb.save(file_name)
print('爬取完毕!!!')
def creat_excel():
wb = Workbook()
wb.create_sheet(index=0, title="Sheet1")
currentSheet = wb.active
return wb, currentSheet
def run_spider():
"""
分步爬取数据,生成五个文件:中西药品;家庭常备;男科用药;母婴用药;中老年用药
:return:
"""
zhongxi, jiating, man, mother, old = get_url()
# 创建excel文档,中西药品
wb, currentSheet = creat_excel()
file_name1 = '120ask中西药品说明书.xlsx'
save_file(wb, currentSheet, file_name1, zhongxi)
# 创建excel文档, 家庭常备
wb, currentSheet = creat_excel()
file_name2 = '120ask家庭常备说明书.xlsx'
save_file(wb, currentSheet, file_name2, jiating)
# 创建excel文档男科用药, 男科用药
wb, currentSheet = creat_excel()
file_name3 = '120ask男科用药说明书.xlsx'
save_file(wb, currentSheet, file_name3, man)
# 创建excel文档,母婴用药
wb, currentSheet = creat_excel()
file_name4 = '120ask母婴用药说明书.xlsx'
save_file(wb, currentSheet, file_name4, mother)
# 创建excel文档,中老年用药
wb, currentSheet = creat_excel()
file_name5 = '120ask中老年用药说明书.xlsx'
save_file(wb, currentSheet, file_name5, old)
if __name__ == '__main__':
run_spider()
|
import os
import pistis
import unittest
import tempfile
import json
import shutil
class ApiTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
self.client = pistis.app.test_client()
def tearDown(self):
pass
def test_add_manifest(self):
def req(*args, **kwargs):
res = self.client.post(
'/api/v1/manifest',
data=json.dumps(kwargs),
content_type='application/json',
)
return json.loads(res.data.decode())
self.assertEqual(
req(fruit='apple', people='adult'),
dict(error='key "field" not exists')
)
self.assertEqual(
req(field='gitlab'),
dict(error='unsupported field gitlab')
)
self.assertEqual(
req(field='keepwork'),
dict(error='incomplete manifest for field keepwork')
)
self.assertEqual(
req(field='keepwork', author='dukes', work='test-report', identity='f844aa8d4ec646c1976a0fde5257767f2387d425'),
dict(field='keepwork', author='dukes', work='test-report', identity='f844aa8d4ec646c1976a0fde5257767f2387d425')
)
def test_search_manifest(self):
def req(*args, **kwargs):
res = self.client.get(
'/api/v1/manifest',
query_string=kwargs
)
return json.loads(res.data.decode())
self.assertEqual(
req(person='duke', job='test'),
dict(error='key "field" not exists')
)
self.assertEqual(
req(field='gitlab', location='America'),
dict(error='unsupported field gitlab')
)
self.assertEqual(
req(field='keepwork', what='the ****'),
dict(error='incomplete query condition')
)
# can't find anything
self.assertEqual(
req(field='keepwork', author='aha', work='everhome'),
dict(data=[])
)
# return every version that manifest changes
self.assertEqual(
req(field='keepwork', author='duting3', work='haqi'),
dict(
data=[
dict(
manifest=dict(
field='keepwork',
author='duting3',
work='haqi',
identity='b0112d212a67c9b3b7e305e53946751fcfcbf4d3'
),
pistis=dict(
hash='b1beda7644d7b992926d0bfe177baeb25d87872c',
timestamp='1523518286'
),
blockchain=dict(
ethereum=dict(
hash='e27db291d477391a7556d4467b8c7859609a2200507f950d37cc4b4abf5bb30f'
),
bitcoin=dict(
hash='f7f5b8d297e8ca4199ad7d2fe82947e449849b53d21804b0a29bb5904fbd0a3f'
),
)
)
]
)
)
self.assertEqual(
req(field='keepwork', author='keep2', work='paracraft'),
dict(
data=[
dict(
manifest=dict(
field='keepwork',
author='keep2',
work='paracraft',
identity='5c1e9ce71b7862d568a75ef5b13562993cc1f9b4'
),
pistis=dict(
hash='b1beda7644d7b992926d0bfe177baeb25d87872c',
timestamp='1523518286'
),
blockchain=dict(
ethereum=dict(
hash='e27db291d477391a7556d4467b8c7859609a2200507f950d37cc4b4abf5bb30f'
),
bitcoin=dict(
hash='f7f5b8d297e8ca4199ad7d2fe82947e449849b53d21804b0a29bb5904fbd0a3f'
),
)
)
]
)
)
|
from django.contrib import admin
from myapp.models import Articles, Comments, CommentToComment
class ArticleInLine(admin.StackedInline):
model = Comments
extra = 1
class ArticleAdmin(admin.ModelAdmin):
fields = ['title', 'text']
inlines = [ArticleInLine]
admin.site.register(Articles, ArticleAdmin)
class SubCommentsInLine(admin.StackedInline):
model = CommentToComment
extra = 1
class CommentAdmin(admin.ModelAdmin):
fields = ['comment']
inlines = [SubCommentsInLine]
admin.site.register(Comments, CommentAdmin) |
from django.conf.urls import patterns, url
urlpatterns = patterns('users.views',
url(r'^signup/$', 'signup', name='signup'),
)
|
# Generated by Django 2.2.6 on 2019-11-23 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobile', '0023_auto_20191123_1047'),
]
operations = [
migrations.AlterField(
model_name='mobil',
name='defect',
field=models.CharField(default='...', max_length=30),
),
migrations.AlterField(
model_name='mobil',
name='defect_1_tel',
field=models.CharField(choices=[('programm', 'Programm'), ('kod', 'Код'), ('akkaunt', 'Аккаунт'), ('lcd/touch', 'LCD/Touch'), ('lcd', 'LCD'), ('touch', 'Touch'), ('dnt_1', 'Не вкл'), ('dnt_2', 'Не вкл, Мокрый'), ('dnt_3', 'Не вкл, Падал'), ('not_charging', 'Не заряж'), ('speaker', 'Speaker'), ('buzzer', 'Buzzer'), ('microphone', 'Microphone'), ('hands_free', 'Hands_Free'), ('wi_fi', 'Wi-Fi'), ('wet', 'Мокрый'), ('sim', 'Sim'), ('------->', '------->')], default='***', max_length=20),
),
migrations.AlterField(
model_name='mobil',
name='defect_tel',
field=models.CharField(choices=[('programm', 'Programm'), ('kod', 'Код'), ('akkaunt', 'Аккаунт'), ('lcd/touch', 'LCD/Touch'), ('lcd', 'LCD'), ('touch', 'Touch'), ('dnt_1', 'Не вкл'), ('dnt_2', 'Не вкл, Мокрый'), ('dnt_3', 'Не вкл, Падал'), ('not_charging', 'Не заряж'), ('speaker', 'Speaker'), ('buzzer', 'Buzzer'), ('microphone', 'Microphone'), ('hands_free', 'Hands_Free'), ('wi_fi', 'Wi-Fi'), ('wet', 'Мокрый'), ('sim', 'Sim'), ('------->', '------->')], max_length=20),
),
migrations.AlterField(
model_name='mobil',
name='model_tel',
field=models.CharField(choices=[('apple', 'Apple'), ('samsung', 'Samsung'), ('huawei', 'Huawei'), ('meizu', 'Meizu'), ('xiaomi', 'Xiaomi'), ('nokia', 'Nokia'), ('microsoft', 'Microsoft'), ('sony', 'Sony'), ('alkatel', 'Alcatel'), ('asus', 'Asus'), ('blackberry', 'BlackBerry'), ('coolpad', 'Coolpad'), ('cubot', 'Cubot'), ('doogee', 'Doogee'), ('doro', 'Doro'), ('estar', 'Estar'), ('getnord', 'Getnord'), ('google', 'Google'), ('htc', 'HTC'), ('lenovo', 'Lenovo'), ('lg', 'Lg'), ('myphone', 'MyPhone'), ('nous', 'Nous'), ('zte', 'ZTE'), ('------->', '------->')], max_length=20),
),
]
|
import os
import json
class DataLogger(object):
def __init__(self, folder_name, log_freq=10, test_freq=100):
"""
folder_name: (str) The name of the log folder
"""
self.folder_name = folder_name
if not folder_name is None:
if not os.path.exists(folder_name):
os.mkdir(folder_name)
if not os.path.exists(os.path.join(folder_name, "res.json")):
self.res = {}
else:
with open(os.path.join(folder_name, "res.json"), "r") as f:
self.res = json.load(f)
self.log_freq = log_freq
self.test_freq = test_freq
self.train_monitor = Monitor()
self.test_monitor = Monitor()
self.test_acc = 0
self.test_loss = 0
self.train_acc = 0
self.train_loss = 0
self.previous_iteration = 0
def update(self, name, iteration, *args, force_log=False):
getattr(self, "%s_monitor" % name).update_metrics(*args)
if (name != "test" and self.require_log(iteration)) or force_log:
acc, loss = getattr(self, "%s_monitor" % name).return_metrics()
setattr(self,"%s_acc" % name, acc)
setattr(self,"%s_loss" % name, loss)
self._logg(acc, "%s_acc" % name, iteration)
self._logg(loss, "%s_loss" % name, iteration)
setattr(self, "%s_monitor" % name, Monitor())
def _logg(self, value, name, iteration):
if hasattr(self, "res"):
if str(iteration) in self.res:
self.res[str(iteration)][name] = value
else:
self.res[str(iteration)] = {name: value}
def require_log(self, iteration, name="log"):
ans = int(iteration/getattr(self, "%s_freq" % name)) !=\
int(self.previous_iteration/getattr(self, "%s_freq" % name))
return ans
def save(self):
if hasattr(self, "folder_name"):
with open(os.path.join(self.folder_name, "res.json"), "w") as f:
json.dump(self.res, f, indent=4)
class Monitor(object):
def __init__(self):
self.acc = 0
self.loss = 0
self.count = 0
def update_metrics(self, loss, output, target):
self.loss += loss.item()
self.count += 1
def return_metrics(self):
return self.acc/self.count, self.loss/self.count
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.core.exceptions import ObjectDoesNotExist
from gridsingularity.utils import sim
from gridsingularity.exceptions.exceptions import NotFoundException
from .models import SimulationResult
from .serializers import SimulationResultSerializer
class CreateSimulation(APIView):
"""
Api view for Create simulation
"""
def post(self, request):
"""
Accept request object start simulation
Parameters:
request : request with the payloads
Returns:
response: Return a json object
"""
active, reactive = sim.run_simulation()
serializer = SimulationResultSerializer(data={'active': active, 'reactive': reactive})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_406_NOT_ACCEPTABLE)
class GetSimulation(APIView):
"""
Api view for Get simulation result
"""
def get(self, request, id):
"""
Return simulation result
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response(serializer.data)
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
class GetSimulationActive(APIView):
"""
Api view for Get simulation result active only
"""
def get(self, request, id):
"""
Return result simulation result active only
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response({'active': serializer.data['active'] })
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
class GetSimulationReActive(APIView):
"""
Api view for Get simulation result reactive only
"""
def get(self, request, id):
"""
Return result simulation result reactive only
Returns:
response: Return a json object
"""
# Get the last simulation
try:
simulation = SimulationResult.objects.get(id=id)
serializer = SimulationResultSerializer(simulation)
return Response({'reactive': serializer.data['reactive'] })
except ObjectDoesNotExist:
raise NotFoundException('Simulation not found')
|
#!/bin/python3
import sys
from collections import defaultdict
class Graph(object):
def __init__(self,vertices):
self.vertices = vertices
self.graph = defaultdict(list)
self.paths = set()
def set_positions(self, u=None, v=None, w=None):
self.u = None
self.v = None
self.w = None
def add_edges(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
#DFS
def get_all_possible_paths(self, start, end, path=[]):
path = path + [start]
if start==end:
return [path]
if start not in self.graph:
return []
paths = []
for node in self.graph[start]:
if node not in path:
new_paths = self.get_all_possible_paths(node, end, path)
for new_path in new_paths:
paths.append(new_path)
return paths
n, m, q = input().strip().split(' ')
n, m, q = [int(n), int(m), int(q)]
G = Graph(n)
for a0 in range(m):
u, v = input().strip().split(' ')
u, v = [int(u), int(v)]
G.add_edges(u, v)
for a0 in range(q):
u, v, w = input().strip().split(' ')
u, v, w = [int(u), int(v), int(w)]
print(G.get_all_possible_paths(u, w))
'''
4 4 1
1 2
2 3
2 4
3 1
2 4 2
'''
|
# -*- conding:utf-8 -*-
# 文件就是用来持久化数据(把数据 保存到硬盘)
# 文件基础操作
# 1 打开文件 2 关闭
# 文件操作模式 : w:write写入 r:写入
# 1.打开文件
# f = open('123.txt', 'w') # 用w模式 如果文件不存在,会自动创建此文件,如果存在会覆盖里面的内容
#
# 2.写入数据
# f.write('hello')
# 3.关闭文件 不关闭会 内存泄露 应该要释放的内存无法释放
# f.close()
"""文件操作的推荐方式"""
with open('123.txt', 'w') as f:
f.write('python learn')
|
# -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pyparsing
from mysqlparse.grammar.data_type import data_type_syntax
class DataTypeSyntaxTest(unittest.TestCase):
def test_bit(self):
self.assertEquals(data_type_syntax.parseString("BIT").data_type, 'BIT')
self.assertEquals(data_type_syntax.parseString("BIT(8)").data_type, 'BIT')
self.assertEquals(data_type_syntax.parseString("BIT(8)").length[0], '8')
def test_integers(self):
type_list = ['TINYINT', 'SMALLINT', 'MEDIUMINT', 'INT', 'INTEGER', 'BIGINT']
type_plain = "{type_name}".format
type_with_length = "{type_name}(8)".format
type_with_unsigned = "{type_name}(8) unsigned".format
type_with_zerofill = "{type_name}(8) zerofill".format
type_with_all_modifiers = "{type_name}(8) UNSIGNED ZEROFILL".format
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(type_plain(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_length(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_length(type_name=type_name)).length[0],
'8',
)
self.assertFalse(
data_type_syntax.parseString(type_with_length(type_name=type_name)).unsigned,
)
self.assertFalse(
data_type_syntax.parseString(type_with_length(type_name=type_name)).zerofill,
)
self.assertEquals(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).unsigned,
)
self.assertEquals(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).zerofill,
)
self.assertEquals(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).zerofill,
)
def test_decimals(self):
type_list = ['REAL', 'DOUBLE', 'FLOAT', 'DECIMAL', 'NUMERIC']
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString("{type_name}".format(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10)".format(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10)".format(type_name=type_name)).length,
'10',
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).decimals,
'2',
)
self.assertFalse(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).unsigned,
)
self.assertFalse(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).zerofill,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED".format(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) ZEROFILL".format(type_name=type_name)).zerofill,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED ZEROFILL".format(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED ZEROFILL".format(type_name=type_name)).zerofill,
)
def test_datetimes(self):
self.assertEquals(data_type_syntax.parseString("DATE").data_type, 'DATE')
self.assertEquals(data_type_syntax.parseString("YEAR").data_type, 'YEAR')
type_list = ['TIME', 'TIMESTAMP', 'DATETIME']
type_plain = "{type_name}".format
type_with_precision = "{type_name}(6)".format
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(type_plain(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_precision(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_precision(type_name=type_name)).precision[0],
'6',
)
def test_chars(self):
self.assertEquals(data_type_syntax.parseString("CHAR").data_type, 'CHAR')
self.assertEquals(data_type_syntax.parseString("CHAR(8)").length[0], '8')
self.assertEquals(data_type_syntax.parseString("CHAR(8) BINARY").length[0], '8')
self.assertEquals(data_type_syntax.parseString("CHAR(8) BINARY").binary, True)
self.assertEquals(data_type_syntax.parseString("CHAR(8) CHARACTER SET 'utf8'").character_set, "utf8")
self.assertEquals(data_type_syntax.parseString("CHAR(8) COLLATE 'utf8_general'").collation_name, "utf8_general")
self.assertEquals(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).character_set,
"utf8"
)
self.assertEquals(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).collation_name,
"utf8_general"
)
self.assertTrue(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).binary,
)
def test_varchar(self):
with self.assertRaises(pyparsing.ParseException):
data_type_syntax.parseString("VARCHAR").data_type
self.assertEquals(data_type_syntax.parseString("VARCHAR(8)").length[0], '8')
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) BINARY").length[0], '8')
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) BINARY").binary, True)
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) CHARACTER SET 'utf8'").character_set, "utf8")
self.assertEquals(
data_type_syntax.parseString("VARCHAR(8) COLLATE 'utf8_general'").collation_name,
"utf8_general",
)
self.assertEquals(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).character_set,
"utf8"
)
self.assertEquals(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).collation_name,
"utf8_general"
)
self.assertTrue(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).binary,
)
def test_binary(self):
self.assertEquals(data_type_syntax.parseString("BINARY").data_type, 'BINARY')
self.assertEquals(data_type_syntax.parseString("BINARY(8)").data_type, 'BINARY')
self.assertEquals(data_type_syntax.parseString("BINARY(8)").length[0], '8')
def test_varbinary(self):
with self.assertRaises(pyparsing.ParseException):
data_type_syntax.parseString("VARBINARY").data_type
self.assertEquals(data_type_syntax.parseString("VARBINARY(8)").length[0], '8')
def test_blobs(self):
type_list = ['TINYBLOB', 'BLOB', 'MEDIUMBLOB', 'LONGBLOB']
for type_name in type_list:
self.assertEquals(data_type_syntax.parseString(type_name).data_type, type_name)
def test_texts(self):
type_list = ['TINYTEXT', 'TEXT', 'MEDIUMTEXT', 'LONGTEXT']
for type_name in type_list:
self.assertEquals(data_type_syntax.parseString(type_name).data_type, type_name)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY".format(type_name=type_name)
).data_type,
type_name,
)
self.assertTrue(
data_type_syntax.parseString(
"{type_name} BINARY".format(type_name=type_name)
).binary,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} CHARACTER SET 'utf8'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).collation_name,
'utf8_general_ci',
)
self.assertFalse(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).binary,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).collation_name,
'utf8_general_ci',
)
self.assertTrue(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).binary,
)
def test_enumerables(self):
type_list = ['ENUM', 'SET']
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3')".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3')".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(
type_name=type_name
)
).collation_name,
'utf8_general_ci',
)
|
import os
import sys
from pandac.PandaModules import WindowProperties
import pandac.PandaModules
# from panda3d.core import Shader
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
import numpy as np
from scipy.ndimage.filters import gaussian_filter
# pandac.PandaModules.loadPrcFileData("", """
# fullscreen 0
# load-display pandagl
# win-origin 0 0
# undecorated 0
# win-size 640 400
# sync-video 1
# """)
pandac.PandaModules.loadPrcFileData("", """
fullscreen 0
load-display pandagl
win-origin 0 0
undecorated 0
win-size 1280 800
sync-video 1
""")
# pandac.PandaModules.loadPrcFileData("", """
# fullscreen 1
# load-display pandagl
# win-origin 0 0
# undecorated 1
# win-size 2560 1600
# sync-video 1
# """)
class MyApp(ShowBase):
def __init__(self):
homedir = '/Users/nightcrawler2/PreycapMaster/'
sim_text = raw_input('Simulation Type: ')
if sim_text[0] == 's':
simulation = True
if sim_text == 'r' or sim_text == 't':
simulation = False
if not simulation:
if sim_text == 't':
para_cont_window = np.load(
homedir + 'para_continuity_window.npy')
para_cont_window = int(para_cont_window)
else:
para_cont_window = 0
para_positions = np.load(
homedir + '3D_paracoords.npy')[:, para_cont_window:]
fish_position = np.load(homedir + 'ufish_origin.npy')
fish_orientation = np.load(homedir + 'ufish.npy')
try:
self.strikelist = np.load(homedir + 'strikelist.npy')
except IOError:
self.strikelist = np.zeros(fish_position.shape[0])
elif simulation:
para_positions = np.load(
homedir + 'para_simulation' + sim_text[1] + '.npy')
fish_position = np.load(
homedir + 'origin_model' + sim_text[1] + '.npy')
fish_orientation = np.load(
homedir + 'uf_model' + sim_text[1] + '.npy')
if para_positions.shape[1] != fish_position.shape[0]:
end_fp = [fish_position[-1] for i in range(
para_positions.shape[1]-fish_position.shape[0])]
end_fo = [fish_orientation[-1] for i in range(
para_positions.shape[1]-fish_orientation.shape[0])]
fish_position = np.concatenate((fish_position, end_fp))
fish_orientation = np.concatenate((fish_orientation, end_fo))
try:
self.strikelist = np.load(
homedir + 'strikelist' + sim_text[1] + '.npy')
print self.strikelist.shape
except IOError:
self.strikelist = np.zeros(fish_position.shape[0])
print self.strikelist.shape
else:
self.exitmodel()
self.numpara = para_positions.shape[0]
self.numframes = para_positions.shape[1]
self.para_positions = para_positions
dfx = gaussian_filter([x[0] for x in fish_position], 1)
dfy = gaussian_filter([y[1] for y in fish_position], 1)
dfz = gaussian_filter([z[2] for z in fish_position], 1)
fish_position_filt = np.array(
[[x, y, z] for x, y, z in zip(dfx, dfy, dfz)])
self.fish_position = fish_position_filt
fox = gaussian_filter([x[0] for x in fish_orientation], 1)
foy = gaussian_filter([y[1] for y in fish_orientation], 1)
foz = gaussian_filter([z[2] for z in fish_orientation], 1)
fish_orientation_filt = np.array(
[[x, y, z] for x, y, z in zip(fox, foy, foz)])
self.fish_orientation = fish_orientation_filt
print fish_orientation.shape
print fish_position.shape
print para_positions.shape
print('numframes')
print self.numframes
ShowBase.__init__(self)
self.accept("escape", self.exitmodel)
# self.accept("escape", sys.exit)
props = WindowProperties()
props.setCursorHidden(False)
props.setMouseMode(WindowProperties.M_absolute)
self.win.requestProperties(props)
self.lens1 = pandac.PandaModules.PerspectiveLens()
self.lens1.setFov(90, 90)
self.lens1.setNearFar(.1, 10000)
# self.lens1.setAspectRatio(1920/1080.)
self.lens1.setAspectRatio(1280/800.)
self.cam.node().setLens(self.lens1)
pivot = render.attachNewNode("pivot")
# pivot.setPos(-1200, -1200, 944)
pivot.setPos(3000, 3000, 944)
self.cam.reparentTo(pivot)
# self.cam.setH(100)
# self.cam.setPos(-450, 944, 944)
self.setBackgroundColor(1, 1, 1, 1)
# Some Lines That Define Tank Boundaries
self.d2 = pandac.PandaModules.LineSegs()
self.d2.setColor(.5, .5, .5, 1)
self.d2.setThickness(2)
self.d2.moveTo(0, 0, 0)
self.d2.drawTo(1888, 0, 0)
self.d2.moveTo(0, 0, 0)
self.d2.drawTo(0, 0, 1888)
self.d2.moveTo(0, 0, 0)
self.d2.drawTo(0, 1888, 0)
self.d2.moveTo(1888, 1888, 0)
self.d2.drawTo(1888, 1888, 1888)
self.d2.moveTo(0, 1888, 1888)
self.d2.drawTo(1888, 1888, 1888)
self.d2.moveTo(1888, 0, 1888)
self.d2.drawTo(1888, 1888, 1888)
self.d2.moveTo(1888, 0, 0)
self.d2.drawTo(1888, 1888, 0)
self.d2.moveTo(1888, 0, 0)
self.d2.drawTo(1888, 0, 1888)
self.d2.moveTo(0, 1888, 0)
self.d2.drawTo(1888, 1888, 0)
self.d2.moveTo(0, 1888, 0)
self.d2.drawTo(0, 1888, 1888)
self.d2.moveTo(0, 0, 1888)
self.d2.drawTo(0, 1888, 1888)
self.d2.moveTo(0, 0, 1888)
self.d2.drawTo(1888, 0, 1888)
self.reference = self.loader.loadModel("sphere-highpoly")
self.reference.reparentTo(self.render)
self.reference.setScale(.01, .01, .01)
self.reference.setColor(1, 1, 1)
self.reference.setPos(944, 944, 944)
self.cam.lookAt(self.reference)
drawtank = True
scale = 1888
if drawtank:
self.tank = self.loader.loadModel("rgbCube.egg")
self.tank.reparentTo(self.render)
self.tank.setScale(scale, scale, scale)
self.tank.setTransparency(1)
self.tank.setAlphaScale(0.2)
self.tank.setColor(.3, .6, .9)
self.tank.setPos(scale / 2, scale / 2, scale / 2)
# #
geom2 = self.d2.create()
self.nodegeom2 = self.render.attachNewNode(geom2)
# Load the environment model.
self.fishcone = self.loader.loadModel("Spotlight.egg")
self.fishcone.setTexture(self.loader.loadTexture("white.png"), 1)
self.fishcone.reparentTo(self.render)
self.fishcone.setPos(0, 0, 0)
self.fishcone.setScale(10, 10, 10)
self.fishcone.setTransparency(1)
self.fishcone.setAlphaScale(.5)
self.fishcone.setColor(0, 0, 1)
''' These three lines make sure this is drawn before the tank.
If you don't do this, tank blocks out the fishcone.'''
self.fishcone.setBin("fixed", 0)
self.fishcone.setDepthTest(False)
self.fishcone.setDepthWrite(False)
self.fishcone.show()
self.spheres = dict({})
for i in range(int(self.numpara/3)):
self.spheres[i] = self.loader.loadModel("sphere.egg")
# self.spheres[i] = Actor("models/panda-model",
# {"walk": "models/panda-walk4"})
self.spheres[i].reparentTo(self.render)
self.spheres[i].setScale(15, 15, 15)
self.spheres[i].setColor(.25, .25, .25)
# text = pandac.PandaModules.TextNode('node name')
# text.setText(' ' + str(i))
# textNodePath = self.spheres[i].attachNewNode(text)
# textNodePath.setScale(10)
# textNodePath.setTwoSided(True)
# textNodePath.setPos(-10, 0, 0)
# textNodePath.setHpr(180, 0, 0)
# self.sphere_fish = self.loader.loadModel("sphere-highpoly.egg")
self.sphere_fish = self.loader.loadModel("sphere.egg")
self.sphere_fish.reparentTo(self.render)
self.sphere_fish.setScale(35, 35, 35)
self.sphere_fish.setColor(1, 0, 0)
self.sphere_fish.setTransparency(0)
self.sphere_fish.setAlphaScale(.9)
self.fish_uvec = self.loader.loadModel("sphere-highpoly")
self.fish_uvec.reparentTo(self.render)
self.fish_uvec.setScale(.01, .01, .01)
self.fish_uvec.setColor(1, 1, 1)
# Add the spinCameraTask procedure to the task manager.
self.iteration = 0
self.complete = False
self.taskMgr.add(self.movepara, "movepara")
# Define a procedure to move the camera.
def exitmodel(self):
# self.closeWindow(self.win)
# self.taskMgr.add(sys.exit, "sys.exit")
# self.userExit()
# self.destroy()
sys.exit()
# sys.exit()
# self.userExit()
# self.destroy()
# sys.exit()
def movepara(self, task):
floor_slowdown = 2
curr_frame = np.floor(self.iteration / floor_slowdown).astype(np.int)
if curr_frame % 20 == 0:
print curr_frame
if curr_frame >= len(self.fish_position):
curr_frame = len(self.fish_position) - 1
if self.complete:
curr_frame = len(self.fish_position) - 1
para_positions = self.para_positions[:, curr_frame]
fish_position = self.fish_position[curr_frame]
fish_orientation = self.fish_orientation[curr_frame]
for i in np.arange(0, self.numpara, 3):
x = para_positions[i]
y = para_positions[i+1]
z = para_positions[i+2]
if not np.isnan(x) and not np.isnan(y) and not np.isnan(z):
self.spheres[i/3].show()
self.spheres[i/3].setPos(x, y, z)
else:
self.spheres[i/3].hide()
x_fish = fish_position[0]
y_fish = fish_position[1]
z_fish = fish_position[2]
correction = 100
correction_x = fish_orientation[0]*correction
correction_y = fish_orientation[1]*correction
correction_z = fish_orientation[2]*correction
ux = fish_orientation[0]*500
uy = fish_orientation[1]*500
uz = fish_orientation[2]*500
for i in range(int(self.numpara/3)):
self.spheres[i].lookAt(self.sphere_fish)
self.sphere_fish.setPos(x_fish, y_fish, z_fish)
if self.strikelist[curr_frame] and not self.complete:
print("STRIKE!!!!!!")
text = pandac.PandaModules.TextNode('node name')
text.setText('STRIKE!!')
textNodePath = self.render.attachNewNode(text)
textNodePath.setScale(200)
textNodePath.setTwoSided(True)
textNodePath.setPos(1200, -900, 900)
textNodePath.setHpr(180, 0, 0)
textNodePath.setColor(255, 0, 0)
self.complete = True
self.fish_uvec.setPos(x_fish - ux, y_fish - uy, z_fish - uz)
self.fishcone.setPos(x_fish + correction_x,
y_fish + correction_y,
z_fish + correction_z)
self.fishcone.lookAt(self.fish_uvec)
if self.iteration == self.numframes * floor_slowdown:
# self.iteration = 0
pass
else:
self.iteration += 1
return Task.cont
app = MyApp()
app.run()
|
import sys, os
import random
import numpy as np
import bz2
sys.path.append(os.getcwd())
from nltk.stem import WordNetLemmatizer
from visual_embeddings_reader import VisualEmbeddingsReader
class Batcher:
def __init__(self,
captions_file,
visual_file,
we_dim,
batch_size,
lemmatize,
model):
self._captions = self.read_mscococaptions(captions_file, lemmatize)
self._visual = VisualEmbeddingsReader(visual_file)
self._batch_size = batch_size
self.we_dim = we_dim
self.epoch = 0 #automatically increases when all examples have been seen
self._model=model
self._samples = self._get_samples_coords()
self._offset = 0
"""
Return all samples coordinates,
i.e., [(img_id0,cap0), (img_id0,cap1), ..., (img_id0,cap4), ..., (img_idN,cap4))
"""
def _get_samples_coords(self):
all_coords=[]
for img_id in self._visual.visual_embeddings.keys():
all_coords+=[(img_id,cap_id) for cap_id in self.caption_ids(img_id)]
return all_coords
def next(self):
batch_samples = self._samples[self._offset:self._offset + self._batch_size]
self._offset += self._batch_size
if self._offset > len(self._samples):
self._offset = 0
self.epoch += 1
random.shuffle(self._samples)
if not batch_samples:
return self.next()
img_labels,caps_pos, pooled_embeddings, visual_embeddings = [], [], [], []
for img_id,cap_pos in batch_samples:
img_labels.append(img_id)
caps_pos.append(cap_pos)
pooled_embeddings.append(self.pool_sentence(self._captions[img_id][cap_pos]))
visual_embeddings.append(self._visual.get(img_id))
return img_labels, caps_pos, pooled_embeddings, visual_embeddings
def get_caption_txt(self, img_id, cap_offset):
return self._captions[img_id][cap_offset]
def get_captions_txt(self, img_id):
return self._captions[img_id]
def num_captions(self, img_id):
return len(self._captions[img_id])
def caption_ids(self, img_id):
return range(self.num_captions(img_id))
def pool_sentence(self, sentence):
pooled = np.zeros(self.we_dim)
items = 0
for w in sentence.split():
if w in self._model:
pooled += self._model[w]
items += 1
if not items:
print('warning: no model found for sentence %s.' %sentence)
return pooled / items if items > 0 else 1
def read_mscococaptions(self, captions_file, lemmatize=False):
lemmatizer = WordNetLemmatizer() if lemmatize else None
print("Reading captions file <%s>" % captions_file)
captions = dict()
with bz2.BZ2File(captions_file, 'r', buffering=10000000) as fin:
for line in fin:
line = line.decode("utf-8")
fields = line.split("\t")
imageID = int(fields[0])
sentence = fields[2][:-1].lower()
if lemmatize:
sentence = lemmatizer.lemmatize(sentence)
if imageID not in captions:
captions[imageID] = []
captions[imageID].append(sentence)
return captions
|
import os
import numpy as np
import pickle as pkl
from skimage import io,transform
import random
import math
# read and reshape the samples' images and save their gray values as features, meanwhile, give each of them a label
def get_img_features(path,label):
img_feature=list()
label_list=list()
time=1
for file in os.listdir(path):
if os.path.isfile(path+file):
try:
img=io.imread(path+file,as_grey=True)
img=transform.resize(img,(48,64))
img=img[6:43,8:58]
img=(img-img.min())/(img.max()-img.min())
except EOFError:
print('EOFError happened at ',time,' ',file)
return 0,0
feature=np.array(img)
img_feature.append(feature)
label_list.append(label)
time=time+1
if(time%100==0):
print('Samples: ',time,' ',file)
print('Done!')
return img_feature,label_list
# data of positive
path='new_data/positive/1000_500/img/'
data,label=get_img_features(path,np.array([1,0],dtype=int))
pos_data=list(zip(data,label))
random.shuffle(pos_data)
data[:],label[:]=zip(*pos_data)
pos_data_train=tuple((data[0:math.ceil(len(data)*0.8)],label[0:math.ceil(len(data)*0.8)]))
pos_data_test=tuple((data[math.ceil(len(data)*0.8):len(data)],label[math.ceil(len(data)*0.8):len(data)]))
# data of negative
path='new_data/negative/1000_500/img/'
data,label=get_img_features(path,np.array([0,1],dtype=int))
neg_data=list(zip(data,label))
random.shuffle(neg_data)
data[:],label[:]=zip(*neg_data)
neg_data_train=tuple((data[0:math.ceil(len(data)*0.8)],label[0:math.ceil(len(data)*0.8)]))
neg_data_test=tuple((data[math.ceil(len(data)*0.8):len(data)],label[math.ceil(len(data)*0.8):len(data)]))
# training data
train_data=neg_data_train[0]+pos_data_train[0]
train_label=neg_data_train[1]+pos_data_train[1]
train=list(zip(train_data,train_label))
random.shuffle(train)
train_data[:],train_label[:]=zip(*train)
img_features_train=tuple((train_data,train_label))
pkl.dump(img_features_train,open('new_data/new_data_pkl/train_1000_500.pkl','wb'))
# test data
test_data=neg_data_test[0]+pos_data_test[0]
test_label=neg_data_test[1]+pos_data_test[1]
test=list(zip(test_data,test_label))
random.shuffle(test)
test_data[:],test_label[:]=zip(*test)
img_features_test=tuple((test_data,test_label))
pkl.dump(img_features_test,open('new_data/new_data_pkl/test_1000_500.pkl','wb'))
|
from segments.errors import replace
class TreeNode(object):
"""
Private class that creates the tree data structure from the orthography profile for
parsing.
"""
def __init__(self, char, sentinel=False):
self.char = char
self.children = {}
self.sentinel = sentinel
class Tree(object):
def __init__(self, graphemes):
def _multigraph(node, line):
# Internal function to add a multigraph starting at node.
for char in line:
node = node.children.setdefault(char, TreeNode(char))
node.sentinel = True
self.root = TreeNode('', sentinel=True)
for grapheme in graphemes:
_multigraph(self.root, grapheme)
def parse(self, line, error=replace):
res, idx = self._parse(self.root, line, 0)
rem = line[idx:]
while rem:
# Chop off one character and try parsing the remainder:
res.append(error(rem[0]))
rem = rem[1:]
r, i = self._parse(self.root, rem, 0)
res.extend(r)
rem = rem[i:]
return res
def _parse(self, root, line, idx):
"""
:param root: Tree node.
:param line: String to parse.
:param idx: Global counter of characters parsed.
:return: (list of parsed graphemes, incremented character count)
"""
# Base (or degenerate..) case.
if len(line) == 0:
return [], idx
parse = []
curr = 0
node = root
cidx = idx
while curr < len(line):
node = node.children.get(line[curr])
curr += 1
if not node:
break
if node.sentinel:
subparse, cidx = self._parse(root, line[curr:], idx + curr)
# Always keep the latest valid parse, which will be
# the longest-matched (greedy match) graphemes.
parse = [line[:curr]]
parse.extend(subparse)
if parse:
idx = cidx
return parse, idx
|
from django.conf.urls import url
from views import *
urlpatterns = [
url(r'^$', index, name = 'my_index'),
url(r'^newword$', newword ),
url(r'^clearsession', clearsession)
]
|
"""
Handle API response data.
"""
import re
from miner_manager.models import Gas
class InvalidRecord(Exception):
pass
class ResponseHandler:
def __init__(self, response) -> None:
self.response = response
self.excluded = ['gasPriceRange']
self.transform_fields = ['fast', 'fastest', 'safeLow', 'average']
def _ensure_valid_data(self):
keys = [key for key in self.response.keys() if key not in self.excluded]
for key in keys:
is_valid = self._is_valid_record(self.response[key])
if not is_valid:
raise InvalidRecord
def transform_data(self):
for key, value in self.response.items():
if key in self.transform_fields:
self.response[key] = self._transform(value)
@staticmethod
def _transform(data):
try:
return data / 10
except ZeroDivisionError:
raise ZeroDivisionError
@staticmethod
def _is_valid_record(record):
try:
if record >= 0:
return True
return False
except TypeError:
raise InvalidRecord
@staticmethod
def _camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
@staticmethod
def remap():
return {
'fast': 'gas_price_fast',
'fastest': 'gas_price_fastest',
'safe_low': 'gas_price_safe_low',
'average': 'gas_price_average',
'avg_wait': 'average_wait',
}
def to_snake_case(self):
self._ensure_valid_data()
self.transform_data()
return {self._camel_to_snake(key): value for key, value
in self.response.items() if key not in self.excluded}
def create_record(self):
remapped = {self.remap().get(key, key): value for key, value
in self.to_snake_case().items()}
return Gas(**remapped)
|
import pytest
@pytest.fixture(scope='function')
def environ(request, monkeypatch):
"""
Fixture to define environment variables before Sphinx App is created.
The test case needs to be marked as
``@pytest.mark.environ(VARIABLE='value')`` with all the environment
variables wanted to define. Also, the test has to use this fixture before
the ``app`` once to have effect.
This idea is borrowed from,
https://github.com/sphinx-doc/sphinx/blob/3f6565df6323534e69d797003d8cb20e99c2c255/sphinx/testing/fixtures.py#L30
"""
if hasattr(request.node, 'iter_markers'): # pytest-3.6.0 or newer
markers = request.node.iter_markers('environ')
else:
markers = request.node.get_marker('environ')
pargs = {}
kwargs = {}
if markers is not None:
# to avoid stacking positional args
for info in reversed(list(markers)):
for i, a in enumerate(info.args):
pargs[i] = a
kwargs.update(info.kwargs)
for name, value in kwargs.items():
monkeypatch.setenv(name, value)
|
import json
import time
from collections import defaultdict, deque
from typing import Optional, Iterable, List
if hasattr(time, "monotonic"):
get_time = time.monotonic
else:
# Python2 just won't have accurate time durations
# during clock adjustments, like leap year, etc.
get_time = time.time
from src.autoks.statistics import StatBookCollection, Statistic
from src.autoks.tracking import get_best_n_operands, get_model_scores, get_best_n_hyperparams, get_cov_dists, \
get_diversity_scores, base_kern_freq, get_n_hyperparams, get_n_operands, update_stat_book
from src.evalg.serialization import Serializable
# Adapted from Keras' callbacks
class Callback:
def __init__(self):
self.model = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_generation_begin(self, gen: int, logs: Optional[dict] = None):
"""Called at the start of a generation."""
def on_generation_end(self, gen: int, logs: Optional[dict] = None):
"""Called at the end of a generation."""
def on_evaluate_all_begin(self, logs: Optional[dict] = None):
"""Called at the start of a call to `evaluate_models`."""
def on_evaluate_all_end(self, logs: Optional[dict] = None):
"""Called at the end of a call to `evaluate_models`."""
def on_evaluate_begin(self, logs: Optional[dict] = None):
"""Called before evaluating a single model."""
def on_evaluate_end(self, logs: Optional[dict] = None):
"""Called after evaluating a single model."""
def on_train_begin(self, logs: Optional[dict] = None):
"""Called at the beginning of training."""
def on_train_end(self, logs: Optional[dict] = None):
"""Called at the end of training."""
def on_propose_new_models_begin(self, logs: Optional[dict] = None):
"""Called at the beginning of new model proposals."""
def on_propose_new_models_end(self, logs: Optional[dict] = None):
"""Called at the end of new model proposals."""
class CallbackList:
def __init__(self,
callbacks: Optional[Iterable[Callback]] = None,
queue_length: int = 10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
self.params = {}
self.model = None
self._reset_model_eval_timing()
def _reset_model_eval_timing(self):
self._delta_t_model_eval = 0.
self._delta_ts = defaultdict(lambda: deque([], maxlen=self.queue_length))
def append(self, callback: Callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def on_generation_begin(self, generation: int, logs: Optional[dict] = None):
"""Calls the `on_generation_begin` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_generation_begin(generation, logs=logs)
self._reset_model_eval_timing()
def on_generation_end(self, generation: int, logs: Optional[dict] = None):
"""Calls the `on_generation_end` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_generation_end(generation, logs=logs)
def on_train_begin(self, logs: Optional[dict] = None):
"""Calls the `on_train_begin` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs=logs)
def on_train_end(self, logs: Optional[dict] = None):
"""Calls the `on_train_end` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs=logs)
def on_evaluate_all_begin(self, logs: Optional[dict] = None):
"""Calls the `on_evaluate_all_begin` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_evaluate_all_begin(logs=logs)
def on_evaluate_all_end(self, logs: Optional[dict] = None):
"""Calls the `on_evaluate_all_end` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_evaluate_all_end(logs=logs)
def on_evaluate_begin(self, logs: Optional[dict] = None):
"""Calls the `on_evaluate_begin` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_evaluate_begin(logs=logs)
def on_evaluate_end(self, logs: Optional[dict] = None):
"""Calls the `on_evaluate_end` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_evaluate_end(logs=logs)
def on_propose_new_models_begin(self, logs: Optional[dict] = None):
"""Calls the `on_propose_new_models_begin` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_propose_new_models_begin(logs=logs)
def on_propose_new_models_end(self, logs: Optional[dict] = None):
"""Calls the `on_propose_new_models_end` methods of its callbacks."""
logs = logs or {}
for callback in self.callbacks:
callback.on_propose_new_models_end(logs=logs)
def __iter__(self):
return iter(self.callbacks)
class BaseLogger(Callback):
"""Callback that accumulates generational averages of metrics.
This callback is automatically applied to every model selector.
Attributes:
stateful_metrics: An optional iterable of string names of metrics
that should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_generation_end`.
All others will be averaged in `on_generation_end`.
"""
def __init__(self, stateful_metrics: Optional[Iterable[str]] = None):
super().__init__()
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self.seen = 0
self.totals = {}
def on_generation_begin(self, gen: int, logs: Optional[dict] = None):
self.seen = 0
self.totals = {}
def on_evaluate_all_end(self, logs: Optional[dict] = None):
logs = logs or {}
model_group_size = logs.get('size', 0)
self.seen += model_group_size
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * model_group_size
else:
self.totals[k] = v * model_group_size
def on_generation_end(self, gen: int, logs: Optional[dict] = None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every model selector. The `History` object
gets returned by the `train` method of model selectors.
"""
def on_train_begin(self, logs: Optional[dict] = None):
self.generation = []
self.history = {}
def on_generation_end(self, generation: int, logs: Optional[dict] = None):
logs = logs or {}
self.generation.append(generation)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelSearchLogger(Callback, Serializable):
def __init__(self):
super().__init__()
# statistics used for plotting
self.n_hyperparams_name = 'n_hyperparameters'
self.n_operands_name = 'n_operands'
self.score_name = 'score'
self.cov_dists_name = 'cov_dists'
self.diversity_scores_name = 'diversity_scores'
self.best_stat_name = 'best'
# separate these!
self.evaluations_name = 'evaluations'
self.active_set_name = 'active_set'
self.expansion_name = 'expansion'
self.stat_book_names = [self.evaluations_name, self.expansion_name, self.active_set_name]
self.base_kern_freq_names = []
self.stat_book_collection = StatBookCollection()
def set_stat_book_collection(self, base_kernel_names: List[str]):
self.base_kern_freq_names = [base_kern_name + '_frequency' for base_kern_name in base_kernel_names]
# All stat books track these variables
shared_multi_stat_names = [self.n_hyperparams_name, self.n_operands_name] + self.base_kern_freq_names
# raw value statistics
base_kern_stat_funcs = [base_kern_freq(base_kern_name) for base_kern_name in base_kernel_names]
shared_stats = [get_n_hyperparams, get_n_operands] + base_kern_stat_funcs
self.stat_book_collection.create_shared_stat_books(self.stat_book_names, shared_multi_stat_names, shared_stats)
sb_active_set = self.stat_book_collection.stat_books[self.active_set_name]
sb_active_set.add_raw_value_stat(self.score_name, get_model_scores)
sb_active_set.add_raw_value_stat(self.cov_dists_name, get_cov_dists)
sb_active_set.add_raw_value_stat(self.diversity_scores_name, get_diversity_scores)
sb_active_set.multi_stats[self.n_hyperparams_name].add_statistic(Statistic(self.best_stat_name,
get_best_n_hyperparams))
sb_active_set.multi_stats[self.n_operands_name].add_statistic(Statistic(self.best_stat_name,
get_best_n_operands))
sb_evals = self.stat_book_collection.stat_books[self.evaluations_name]
sb_evals.add_raw_value_stat(self.score_name, get_model_scores)
def on_evaluate_all_end(self, logs: Optional[dict] = None):
logs = logs or {}
models = logs.get('gp_models', [])
x = logs.get('x', None)
grammar = self.model.grammar
stat_book = self.stat_book_collection.stat_books[self.active_set_name]
if models:
update_stat_book(stat_book, models, x, grammar.base_kernel_names, grammar.n_dims)
def on_evaluate_end(self, logs: Optional[dict] = None):
logs = logs or {}
model = logs.get('gp_model', [])
model = [model] or []
x = logs.get('x', None)
grammar = self.model.grammar
stat_book = self.stat_book_collection.stat_books[self.evaluations_name]
if model:
update_stat_book(stat_book, model, x, grammar.base_kernel_names, grammar.n_dims)
def on_propose_new_models_end(self, logs: Optional[dict] = None):
logs = logs or {}
models = logs.get('gp_models', [])
x = logs.get('x', None)
grammar = self.model.grammar
stat_book = self.stat_book_collection.stat_books[self.expansion_name]
if models:
update_stat_book(stat_book, models, x, grammar.base_kernel_names, grammar.n_dims)
def to_dict(self) -> dict:
output_dict = super().to_dict()
output_dict["stat_book_collection"] = self.stat_book_collection.to_dict()
output_dict["base_kern_freq_names"] = self.base_kern_freq_names
return output_dict
@classmethod
def _build_from_input_dict(cls, input_dict: dict):
stat_book_collection = StatBookCollection.from_dict(input_dict.pop("stat_book_collection"))
base_kern_freq_names = input_dict.pop("base_kern_freq_names")
tracker = super()._build_from_input_dict(input_dict)
tracker.stat_book_collection = stat_book_collection
tracker.base_kern_freq_names = base_kern_freq_names
return tracker
def save(self, output_file_name: str):
self.stat_book_collection.save(output_file_name)
@staticmethod
def load(output_file_name: str):
mst = ModelSearchLogger()
sbc = StatBookCollection.load(output_file_name)
mst.stat_book_collection = sbc
return mst
class GCPCallback(Callback, Serializable):
"""Google Cloud Platform (GCP) callback.
TODO: Implement experiment saving on GCP.
"""
def __init__(self):
super().__init__()
def on_train_end(self, logs: Optional[dict] = None):
pass
class CometCallback(Callback, Serializable):
"""Comet ML callback."""
def __init__(self,
experiment,
log_params: bool = True,
log_metrics: bool = True,
log_graph: bool = True):
super().__init__()
self.experiment = experiment
self.log_params = log_params
self.log_metrics = log_metrics
self.log_graph = log_graph
self.generation_start_time = None
self.our_step = 0
self.best_fitness = float('-inf')
def on_train_begin(self, logs: Optional[dict] = None):
if self.log_graph:
json_re = json.dumps(self.model.to_dict(), sort_keys=True, indent=4)
self.experiment.set_model_graph(json_re)
if self.log_params:
# if logs:
# for k, v in logs.items():
# self.experiment.log_parameter(k, v)
# Callback doesn't set this parameter at creation by default.
if hasattr(self, "params") and self.params:
for k, v in self.params.items():
if k != "metrics":
self.experiment.log_parameter(k, v)
self.experiment.log_parameters({'optimizer': self.model.optimizer,
'n_restarts_optimizer': self.model.n_restarts_optimizer})
def on_evaluate_end(self, logs: Optional[dict] = None):
if self.log_metrics:
logs = logs or {}
model = logs.get('gp_model', None)
self.our_step = self.model.n_evals
if model and model.evaluated:
new_score = model.score
self.best_fitness = max(self.best_fitness, new_score)
self.experiment.log_metrics({
"fitness": model.score,
"best_fitness": self.best_fitness
},
step=self.our_step)
def on_generation_begin(self, gen: int, logs: Optional[dict] = None):
self.experiment.set_epoch(gen)
self.generation_start_time = get_time()
def on_generation_end(self, gen: int, logs: Optional[dict] = None):
if self.log_metrics:
if self.generation_start_time is not None:
self.experiment.log_metric("generation_duration", get_time() - self.generation_start_time,
step=self.our_step)
self.generation_start_time = None
self.experiment.log_epoch_end(gen, step=self.our_step)
# if logs:
# for name, val in logs.items():
# self.experiment.log_metric(name, val, step=self.our_step)
|
import willie
import random
citations = [
'http://en.wikipedia.org/wiki/Cessna_Citation',
'http://en.wikipedia.org/wiki/Chevrolet_Citation',
'http://en.wikipedia.org/wiki/Edsel_Citation',
'http://en.wikipedia.org/wiki/Citation_(horse)',
'http://en.wikipedia.org/wiki/Gibson_Citation',
'http://en.wikipedia.org/wiki/Citation_Boulevard',
'http://en.wikipedia.org/wiki/Citation_(album)',
'http://en.wikipedia.org/wiki/Unit_citation',
'http://en.wikipedia.org/wiki/Case_citation',
'http://en.wikipedia.org/wiki/Traffic_citation',
'http://en.wikipedia.org/wiki/Citation',
'http://en.wikipedia.org/wiki/Cetacean',
'http://en.wikipedia.org/wiki/List_of_cetaceans',
]
@willie.module.rule('(?:.*[^A-Za-z])?[Cc]itation needed')
@willie.module.commands('citation', 'cite')
def citation(bot, trigger):
"Replies with a smartypants link whenever someone requests a citation"
bot.reply(random.choice(citations))
@willie.module.rule('(?i)^(?:\x01ACTION)? *(?:stares|glares|pokes).*$')
#@willie.module.rule('^.*[KkCc]ountdown.*$')
def stare_back(bot, trigger):
bot.msg(trigger.sender,"\x01ACTION glares at "+trigger.nick+"\x01")
@willie.module.rule('(?i)^(?:\x01ACTION)? *(?:slaps|zaps|kicks|hits|smacks|chokes|stabs|suffocates|burns).*$')
def slap_back(bot, trigger):
bot.msg(trigger.sender,"\x01ACTION zaps "+trigger.nick+"\x01")
@willie.module.rule('(?i).*(?:good bot|.*bot ?snack).*$')
def snack(bot, trigger):
bot.reply('Thank you! Glad to be of service.')
@willie.module.rule('(?i).*(?:bad bot|bot ?smack).*$')
def bbf(bot, trigger):
bot.say(':[')
@willie.module.rule('(?i).*(?:needs? a hug|hug me).*$')
def hug(bot,trigger):
bot.msg(trigger.sender,"\x01ACTION hugs "+trigger.nick+"\x01")
|
import pytest
from bravado.swagger_model import load_file
from bravado.client import SwaggerClient, RequestsClient
from requests.utils import parse_header_links
import urlparse
class Client(object):
config = {
'also_return_response': True,
'validate_responses': True,
'validate_requests': False,
'validate_swagger_spec': False,
'use_models': True,
}
def setup(self):
self.http_client = RequestsClient()
self.http_client.session.verify = False
self.client = SwaggerClient.from_spec(load_file('integrations_api.yml'), config=self.config, http_client=self.http_client)
self.client.swagger_spec.api_url = "http://%s/api/%s/" % (pytest.config.getoption("host"), pytest.config.getoption("api"))
def get_all_devices(self, page=1):
r, h = self.client.devices.get_devices(page=page).result()
for i in parse_header_links(h.headers["link"]):
if i["rel"] == "next":
page = int(dict(urlparse.parse_qs(urlparse.urlsplit(i["url"]).query))["page"][0])
return r + self.get_all_devices(page=page)
else:
return r
|
# Generated by Django 2.2.6 on 2019-10-07 00:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0008_auto_20191006_2337'),
]
operations = [
migrations.AddField(
model_name='requestdetail',
name='send',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='requestdetail',
name='url',
field=models.URLField(default=1, max_length=500),
preserve_default=False,
),
]
|
def concreta_busqueda(nombre, dic):
elemento = ''
elemento = ''.join(map(str, nombre))
valor = dic.get(elemento)
if(valor):
print('{}={}'.format(elemento, valor))
else:
print('Not found')
n = int(input())
dic = {}
busqueda = []
for x in range(n):
arr = list(input().rstrip().split())
dic.setdefault(arr[0],arr[1])
while True:
try:
nombre_buscado = input().rstrip().split()
if(len(nombre_buscado)>0):
concreta_busqueda(nombre_buscado, dic)
else:
break
except:
break
|
# Define name for the player
name = str(input("What is your name? "))
hasKey = False
hasUniform = False
onQuest = False
hasToothbrush = False
hasTeddy = False
# This is to define the talk functions and dialogue for each NPC
def beardedprisoner1(name):
print("You approach the bearded prisoner")
print("The bearded prisoner yawns... 'What was your name again? Oh yeah...,", name, """
, good morning. This place sucks. I wish I could get outta here like the last guy to escape. He just grabbed some guard
clothes and strolled outta here, pretending to be a guard... I don't even know where he found those clothes."
He was a genius, that guy'""")
def talk():
print("There are multiple prisoners in your cell. One is bearded and lying on the bed,"
" another is sitting against the wall")
approach = str(input("Would you like to approach the bearded prisoner? (y/n): "))
approachCheck = approach.lower()
if approachCheck == 'y':
beardedprisoner1(name)
elif approachCheck == "n":
print("You decide not to talk to the bearded prisoner")
else:
print("Error: Invalid input. Only y or n is accepted")
talk()
def gamestarttext():
print("""You wake up, startled. Shaking your head ,you slowly get up.
You are inside of a prison cell. You have been wrongly convicted of a crime,
you have to escape!""")
talk()
gamestarttext()
room_list = []
# -- initial room (0)
room = ["""You are back in your cell and see the prisoners again. One is bearded and lying on the bed, another is
sitting against the wall. To the south, the door to the hallway has been left open""", None, None, 1, None]
room_list.append(room)
# -- hallway between cell and shower (1)
room = ["""You find yourself at the east end of the hallway next to your cell, and on the north side of the hallway is
your cell, on the south side of the hallway is a door, you could also move east deeper near the middle of the
hallway""", 0, 3, 2, None]
room_list.append(room)
# -- shower (2)
room = ["""You find yourself inside of the bathroom, looking around you notice the mouldy tiles and shower curtains.
On the ground you see a toothbrush with the name "Alpha" scribbled on the neck in what looks like a child's handwriting.
You also see a bar of soap sitting on the broken sink. The door to back the hallway is north""", 1, None, None, None]
room_list.append(room)
# -- hallway in front of supply room (3)
room = ["""You are now near the middle of your hallway closer to your cell. On the north side of the hallway,
there is one door with the words, "SUPPLY ROOM" in bold above it. If you go west, you will approach the hall by your
cell, towards the east you could go further into the hallway""", 4, 5, None, 1]
room_list.append(room)
# -- supply room (4)
room = ["""The supply room is locked, it seems like you need a key""", None, None, 3, None]
room_list.append(room)
# -- hallway between warden's office and cafeteria (5)
room = ["""You are now closer the west end of the hallway away from your cell, you see two doors on both sides of you.
'Wow, this prison is designed terribly', you think to yourself as you examine the doors. To the north is the
Warden's office, and on the other side is the doorway to the cafeteria, the smell of the horrible food wafting in
your direction.""", 6, 8, 7, 3]
room_list.append(room)
# -- warden's office (6)
room = ["""You step into the warden's office, at the far end of the room on his desk are some keys, directly to your
right is the warden's coat on a coat hanger""", None, None, 5, None]
room_list.append(room)
# -- cafeteria (7)
room = ["""Stepping into the cafeteria you are confronted by a sight, you see a rotten apple sitting on the table and
and a plate of food that smells like it has been there for weeks. You also see a teddy bear on the desk with its ear
worn off, there is a small name tag on the bottom that reads the name "Alpha". To the north is the exit back into the
hallway""", 5, None, None, None]
room_list.append(room)
# -- hallway between yard and exit (8)
room = ["""You are now at the west end of the hallway furthest from your cell and find that the exit is to your south.
To the north you also see the doorway that opens into the yard where the other prisoners hang around.""", 9, None, 10,
5]
room_list.append(room)
# -- yard (9)
room = ["""You tentatively step into the yard, careful not to strut or draw attention from the other prisoners.
Some are grunting while bench pressing in the corner. Others are playing a game of ball in the middle on the
the cracked court. In the distance on the other end of the yard, you see a crowd of prisoners hanging out. The one in
the middle looks like the boss. You think to yourself, you can retreat south back into the hallway or you can go
further north into the yard to where the boss is. But is that a good idea?""", 12, None, 8, None]
room_list.append(room)
# -- alpha (12)
room = ["""You go further north into the yard towards the big boss. He looks you up and down with a smirk on his face.
You are surrounded on your left and right by the fence. And you think of fleeing back south to the first part of the
yard.""", None, None, 9, None]
room_list.append(room)
# -- exit (10)
room = ["""You near the door to the exit, but as you try to do so, a guard behind you sees you and sounds the alarm!!
The guard nears you and grabs your arm and chuckles... "Bad idea buddy". Go back inside.""", 8, None, None, None]
room_list.append(room)
# -- freedom (11)
room = ["""Wearing the guard uniform you found, you confidently walk out the exit. The guard greets you with a nod,
and continues on with his business."""]
room_list.append(room)
# -- Starting point
current_room = 0
# -- Main Loop
finished = False
while not finished:
print(room_list[current_room][0])
if current_room == 12:
if onQuest is False:
approach = input("Would you like to approach him? (y/n)")
approachcheck = approach.lower()
if approachcheck == 'y':
print("Hey new guy. So you wanna get out huh?")
onQuest = True
elif approachcheck == "n":
pass
else:
print("Error: Invalid input. Only y or n is accepted")
else:
if hasToothbrush and hasTeddy:
print("Thanks (text) Here's the key.")
hasKey = True
else:
print("It doesn't look like you have my stuff...")
# Ending game if prisoner comes free
if current_room == 11:
print("... Thanks for playing " + name + "! You've completed the game.")
# conversation with alpha
if current_room == 4 and hasKey is True:
print("You take the guard clothes ")
hasUniform = True
if hasUniform is True:
room_list[10].remove(room_list[10][3])
room_list[3].insert(room_list[10][3], 11)
if hasKey is True:
room_list[4].remove(room_list[4][0])
room_list[4].insert(room_list[4][0], "new description of supply room now open")
openSupply = True
if onQuest is True and current_room == 2:
userTake = input("Do you want to take the toothbrush?: ")
if userTake == "y":
hasToothbrush = True
else:
print("You leave the toothbrush as it is.")
if onQuest is True and current_room == 7:
userTake = input("Do you want to take the teddy bear?: ")
if userTake == "y":
hasTeddy = True
else:
print("You leave the teddy bear as it is.")
userInput = input("Which direction would you like to go?: ")
# -- Quit
if userInput.lower() == "q":
print("Game over! Better luck next time.")
break
# -- North
elif userInput.lower() == "n":
next_room = room_list[current_room][1]
if next_room is None:
print("You can't go that way.")
else:
current_room = next_room
# -- East
elif userInput.lower() == "e":
next_room = room_list[current_room][2]
if next_room is None:
print("You can't go that way.")
else:
current_room = next_room
# -- South
elif userInput.lower() == "s":
next_room = room_list[current_room][3]
if next_room is None:
print("You can't go that way.")
else:
current_room = next_room
# -- West
elif userInput.lower() == "w":
next_room = room_list[current_room][4]
if next_room is None:
print("You can't go that way.")
else:
current_room = next_room
else:
print("That's not a valid input.")
|
import unittest
from model.project import project
class test_project(unittest.TestCase):
def test_it_has_a_name(self):
self.assertTrue(type(project.name()) == str)
self.assertTrue(len(project.name()) > 0) |
flowers = [
"Daffodil",
"Evening Primrose",
"Hydrangea",
"Iris",
"Lavender",
"Sunflower",
"Tiger Lily",
]
# for flower in flowers:
# print(flower)
separator = ", "
output = separator.join(flowers)# .join() is iterating over the list for us w/o a for loop
print(output)
#all the items in the iterable must be STRINGS if you want to join them |
import unittest
import Queue
import cProfile
def chapter_four_problem_seven(projects, dependencies):
if not projects:
return "Error"
if check_for_circular_dependency(dependencies):
return "No build case"
build_order = []
while len(projects) != len(build_order):
for p in projects:
if p not in build_order:
if can_build(p, dependencies):
build(p, projects, dependencies, build_order)
return build_order
class Node():
def __init__(self, data, left, right):
self.data = data
self.left = left
self.right = right
def chapter_four_problem_three(node):
if node == None:
return
q = Queue.Queue()
ret = []
level = 0
q.put(node)
while not q.empty():
cur = q.get()
ret.append((cur.data, level))
if cur.left:
q.put(cur.left)
if cur.right:
q.put(cur.right)
return ret
def inorder_traversal(node):
ret = []
if node.left:
yield inorder_traversal(node.left)
yield (node.data)
if node.right:
yield inorder_traversal(node.right)
def chapter_four_problem_nine():
pass
def chapter_eleven_problem_three():
pass
def check_for_circular_dependency(dependencies):
for p in dependencies:
if (p[1], p[0]) in dependencies:
return True
if (p[0], p[0]) in dependencies:
return True
return False
def can_build(project, dependencies):
for p in dependencies:
if p[0] == project:
return False
return True
def build(project, projects, dependencies, order):
"""Adds project to order, removes from to-be-built list, removes dependencies"""
order.append(project)
dependencies[:] = [x for x in dependencies if x[1] != project]
|
import requests
from bs4 import BeautifulSoup
import urllib.request as urllib2
import re
def count_words(url, the_word):
print(url, the_word)
r = urllib2.urlopen(url)
return str(r.read()).count(the_word)
soup = BeautifulSoup(r.read(), 'lxml')
words = soup.find(text=lambda text: text and the_word in text)
if words == None:
return 0
print(words)
return len(words)
def search(lasturls, url):
for i in lasturls:
try:
page = urllib2.urlopen(i)
except Exception:
print("Couldn't load ", i)
pass
soup = BeautifulSoup( page, "lxml")
for link in soup.findAll('a', attrs = {'href':re.compile("^http://")}):
url.add(link.get('href'))
for link in soup.findAll('a', attrs = {'href':re.compile("^https://")}):
url.add(link.get('href'))
# print(link)
def main():
f = open('websites.txt', "r")
f2 = open("websites_ranked.txt", "w")
f3 = open("dictionary.txt", "r")
dictt = f3.readlines()
f3.close()
lines = []
z = f.readlines()
for url in z:
urls = {url}
lasturls = {url}
search(lasturls, urls)
lasturls = set()
for i in urls:
if i not in urls:
lasturls.add(i)
search(lasturls, urls)
lasturls = set()
for i in urls:
if i not in urls:
lasturls.add(i)
search(lasturls, urls)
count = 0
for i in urls:
print(i)
for word in dictt:
count = count + count_words(url, word)
if count > 0:
lines.append(url.replace("\n", "") + " " + str(count))
# print('\nUrl: {}\ncontains {} occurrences of word: {}'.format(url, count, word))
f.close()
for line in lines:
f2.write(line + "\n")
f2.close()
if __name__ == '__main__':
main() |
import corner
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy import constants
from matplotlib import rcParams
rcParams["text.usetex"] = False
outdir = "disk/"
deg = np.pi / 180.0 # radians / degree
yr = 365.25 # days / year
au_to_R_sun = (constants.au / constants.R_sun).value # conversion constant
df = pd.read_csv(f"{outdir}current.csv")
print(df.columns)
# plot the raw parameters
sample_pars = [
"mparallax",
"MAb",
"a_ang_inner",
"logP_inner",
"e_inner",
"omega_inner",
"Omega_inner",
"cos_incl_inner",
"t_periastron_inner",
"logP_outer",
"omega_outer",
"Omega_outer",
"phi_outer",
"cos_incl_outer",
"e_outer",
"gamma_outer",
"MB",
"offsetKeck",
"offsetFeros",
"offsetDupont",
"logRhoS",
"logThetaS",
"logjittercfa",
"logjitterkeck",
"logjitterferos",
"logjitterdupont",
]
# also choose a sample at random and use the starting position
row0 = df.sample()
for par in sample_pars:
print("{:} : {:}".format(par, row0[par].values[0]))
fig = corner.corner(df[sample_pars])
fig.savefig(f"{outdir}corner-sample-pars.png", dpi=120)
# convert all params
df["omega_inner"] /= deg
df["Omega_inner"] /= deg
df["incl_inner"] /= deg
df["omega_outer"] /= deg
df["Omega_outer"] /= deg
df["incl_outer"] /= deg
df["P_outer"] /= yr
# just the inner parameters
inner = [
"MAb",
"MA",
"a_inner",
"P_inner",
"e_inner",
"omega_inner",
"Omega_inner",
"incl_inner",
"t_periastron_inner",
]
fig = corner.corner(df[inner])
fig.savefig(f"{outdir}corner-inner.png", dpi=120)
# just the outer parameters
outer = [
"MA",
"MB",
"a_outer",
"P_outer",
"omega_outer",
"Omega_outer",
"e_outer",
"incl_outer",
"gamma_outer",
"t_periastron_outer",
]
fig = corner.corner(df[outer])
fig.savefig(f"{outdir}corner-outer.png", dpi=120)
# masses
masses = ["MAa", "MAb", "MA", "MB", "Mtot"]
fig = corner.corner(df[masses])
fig.savefig(f"{outdir}corner-masses.png", dpi=120)
# mutual inclination between inner orbit and outer orbit
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
import sys
def download(url, title):
"""download video from specific url"""
urllib.request.urlretrieve(url, title)
if __name__ == "__main__":
download("http://www.dygod.net/", "test.html")
|
"""
This test batch makes sure the Flask API works as expected.
"""
import sys
sys.path.append('./')
from time import sleep
import io
import pytest
import app
from misc.env_vars import *
valid_job_id: str
@pytest.fixture
def client():
app.app.config['TESTING'] = True
with app.app.test_client() as client:
yield client
def test_get_problem_list(client):
"""Test the process of getting the list of problems."""
rv = client.get('/api/get_problem_list')
assert b'sample2' in rv.data and b'test4' in rv.data
def test_get_problem_info(client):
"""Test the process of getting problem info."""
rv = client.get('/api/get_problem_info/test')
assert b'This problem serves as a way for you to test the submission system.' in rv.data
def test_submit(client):
"""Test the process of submitting code."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='python',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'code.py'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
# Make sure submission finishes to avoid issues
sleep(2)
assert b'success' in rv.data
# Save job_id to be used later in the tests
rv = eval(rv.data)
global valid_job_id
valid_job_id = rv['job_id']
def test_submit_no_form(client):
"""Make sure the right error is returned for an empty POST request."""
rv = client.post('/api/submit', data=None, follow_redirects=True, content_type='multipart/form-data')
assert b'Empty request form' in rv.data
def test_submit_no_id(client):
"""Make sure the right error is returned for a POST request with no id."""
rv = client.post('/api/submit', data=dict(
type='python',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'code.py'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Invalid problem ID' in rv.data
def test_submit_invalid_id(client):
"""Make sure the right error is returned for a POST request with an invalid id."""
rv = client.post('/api/submit', data=dict(
problem_id='lol_no_this_is_very_invalid_id',
type='python',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'code.py'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Invalid problem ID' in rv.data
def test_submit_no_type(client):
"""Make sure the right error is returned for a POST request with no language type."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'code.py'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'No submission language' in rv.data
def test_submit_invalid_type(client):
"""Make sure the right error is returned for a POST request with an invalid language type."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='brainf*ck',
code=(io.BytesIO(b'+[-->-[>>+>-----<<]<--<---]>-.>>>+.>>..+++[.>]<<<<.+++.------.<<-.>>>>+.'), 'code.bf'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Invalid submission language' in rv.data
def test_submit_no_code(client):
"""Make sure the right error is returned for a POST request with no code."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='python',
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'No code file submitted' in rv.data
def test_submit_invalid_filename(client):
"""Make sure the right error is returned for a POST request with an invalid code filename."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='python',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'input.in.txt'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Invalid code filename' in rv.data
def test_submit_invalid_extensions(client):
"""Make sure the right error is returned for POST requests with invalid extensions."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='java',
code=(io.BytesIO(b'public class java {}\n'), 'java.cpp'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Missing .java file extension' in rv.data
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='cpp',
code=(io.BytesIO(b'#include <iostream>\n'), 'cpp.py'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Missing .cpp file extension' in rv.data
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='python',
code=(io.BytesIO(b'if __name__ == "__main__": main()\n'), 'py.java'),
username='test_user',
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
assert b'Missing .py file extension' in rv.data
def test_submit_no_username(client):
"""Make sure the right error is returned for a POST request with no username."""
rv = client.post('/api/submit', data=dict(
problem_id='test',
type='python',
code=(io.BytesIO(b'N=int(input())\nprint(N)\n'), 'code.py'),
secret_key=SECRET_KEY
), follow_redirects=True, content_type='multipart/form-data')
# Make sure submission finishes to avoid issues
assert b'No username' in rv.data
def test_get_status_invalid_job(client):
"""Make sure invalid job ids sent to /api/get_status return the right JSON error."""
rv = client.get('/api/get_status/abacadabra')
assert b'NO_SUCH_JOB' in rv.data
def test_get_status(client):
"""Make sure /api/get_status returns the expected result."""
rv = client.get('/api/get_status/{}'.format(valid_job_id))
assert b'"status":"done"' in rv.data and b'101' in rv.data
def test_get_submissions(client):
"""Make sure the get submissions API call works correctly."""
rv = client.get('/api/get_submissions/1', query_string=dict(secret_key=SECRET_KEY))
assert valid_job_id.encode('utf-8') in rv.data and b'101' in rv.data
def test_get_source(client):
"""Make sure the get submission source API call works correctly."""
rv = client.get('/api/get_submission_source/{}'.format(valid_job_id))
assert b'N=int(input())' in rv.data
|
from flask import Flask,request,jsonify
from sentence_transformers import SentenceTransformer
import scipy
import json
from numpyencoder import NumpyEncoder
embedder = SentenceTransformer('bert-base-nli-mean-tokens')
app = Flask(__name__)
baseUrl ="/sentenceTransformers"
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'The girl is carrying a baby.',
'A man is riding a horse.',
'A woman is playing violin.',
'Two men pushed carts through the woods.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'A cheetah is running behind its prey.'
]
corpus_embeddings = embedder.encode(corpus)
queries = ['A man is eating pasta.', 'Someone in a gorilla costume is playing a set of drums.', 'A cheetah chases prey on across a field.']
query_embeddings = embedder.encode(queries)
# api's are below:
#base url testing
@app.route(baseUrl+'/')
def hello_world():
return "welcome to sentence transformers!!!"
# this api returns all corpus sentences
@app.route(baseUrl+'/getCorpus',methods=['GET'])
def getAllCorpus():
return jsonify(corpus)
# this api returns all given queries
@app.route(baseUrl+'/getQueries',methods=['GET'])
def getAllQueries():
return jsonify(queries)
#this api returns embeddings of corpus sentences
@app.route(baseUrl+'/getCorpusEmbedding',methods=['GET'])
def corpusEmbedding():
corpus_embeddings = embedder.encode(corpus)
corpEmbedJson = json.dumps(corpus_embeddings,cls=NumpyEncoder)
return jsonify(corpEmbedJson)
#this api gets all query embeddings
@app.route(baseUrl+'/getQueryEmbedding',methods=['GET'])
def queryEmbedding():
query_embeddings = embedder.encode(queries)
queryEmbedJson = json.dumps(query_embeddings,cls=NumpyEncoder)
return jsonify(queryEmbedJson)
# this api takes number (number of sentences to be matched) and returns matched sentences for ALL QUERIES
@app.route(baseUrl+'/getForAllQueries',methods=['GET'])
def getRelatedStatements():
try:
closest_n = (request.args.get('number'))
outputDict = []
for query, query_embedding in zip(queries, query_embeddings):
queryKey = query
sentencesValue = []
distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, "cosine")[0]
results = zip(range(len(distances)), distances)
results = sorted(results, key=lambda x: x[1])
for idx, distance in results[0:int(closest_n)]:
# print(corpus[idx].strip(), "(Score: %.4f)" % (1-distance))
sentencesValue.append(corpus[idx].strip())
tempDict = { queryKey : sentencesValue }
outputDict.append(tempDict)
json_object = json.dumps(outputDict)
return json_object
except:
return "Error occured : Please check your input"
#this api takes index of the query required and number of sentences to be matched as input and returns matched sentences for the given query
@app.route(baseUrl+'/getForSingleQuery',methods=['GET'])
def getRelatedStatementsForQuery():
try:
closest_n = request.args.get('number')
index = request.args.get('index')
queryKey = queries[int(index)]
sentencesValue = []
distances = scipy.spatial.distance.cdist([query_embeddings[int(index)]], corpus_embeddings, "cosine")[0]
results = zip(range(len(distances)), distances)
results = sorted(results, key=lambda x: x[1])
for idx, distance in results[0:int(closest_n)]:
# print(corpus[idx].strip(), "(Score: %.4f)" % (1-distance))
sentencesValue.append(corpus[idx].strip())
tempDict = { queryKey : sentencesValue }
json_object = json.dumps(tempDict)
return json_object
except:
return "Error occured : Please check your input"
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
import sys
import requests
import subprocess
from os.path import join, exists, isdir, basename, dirname
from os import environ, makedirs, listdir, replace, remove, sep, getenv, chdir
from time import sleep
import site
from shutil import rmtree
from msvcrt import getwch
from os import sep, startfile
import subprocess
from urllib.request import urlretrieve
import glob
import locale
locale.setlocale(locale.LC_ALL, "")
HAVE_PYUNPACK = True
try:
from pyunpack import Archive
except ModuleNotFoundError:
HAVE_PYUNPACK = False
''' This is also to be able to execute it manually'''
try:
from .player import find_mpv_on_windows, find_mplayer_on_windows, find_vlc_on_windows
except ImportError:
from player import find_mpv_on_windows, find_mplayer_on_windows, find_vlc_on_windows
def win_press_any_key_to_unintall():
the_path = __file__.split(sep)
the_file = sep.join(the_path[:-1]) + sep + 'install.py'
print('\nTo complete the process you will have to [red]execute a batch file[/red].')
print('Windows Explorer will open the location of the batch file to run.')
print('')
print('Please double click')
print('')
print(' [bold green]uninstall.bat[/bold green]')
print('')
print('to remove [magenta]PyRadio[/magenta] from your system.')
print('')
print('After you are done, you can delete the folder it resides in.')
from .win import press_any_key_to_continue
print('\nPress any key to exit...', end='', flush=True)
getwch()
#print('\nPress any key to exit...', end='', flush=True)
#getwch()
subprocess.call('python ' + the_file + ' -R',
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def win_print_exe_paths():
from .install import fix_pyradio_win_exe
exe = fix_pyradio_win_exe()
if exe[0] and exe[1]:
print('[magenta]PyRadio[/magenta] EXE files:')
print(' System:\n [red]{}[/red]'.format(exe[0]))
print(' User:\n [green]{}[/green]'.format(exe[1]))
else:
print('[magenta]PyRadio[/magenta] EXE file:')
if exe[0]:
print(' [green]{}[/green]'.format(exe[0]))
else:
print(' [green]{}[/green]'.format(exe[1]))
# doing it this way so that python2 does not break (#153)
from .win import press_any_key_to_continue
print('\nPress any key to exit...', end='', flush=True)
getwch()
def press_any_key_to_continue():
print('\nPress any key to exit...', end='', flush=True)
getwch()
def install_module(a_module, do_not_exit=False, print_msg=True):
if print_msg:
print('Installing module: [green]' + a_module + '[/green]')
for count in range(1,6):
ret = subprocess.call('python -m pip install --upgrade ' + a_module,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if ret == 0:
break
else:
if count < 5:
if print_msg:
print(' Download failed. Retrying [magenta]{}[/magenta]/[red]5[/red]'.format(count+1))
else:
if print_msg:
print('Failed to download module...\nPlease check your internet connection and try again...')
else:
print('Failed to download module "[magenta]{}[/magenta]"...\nPlease check your internet connection and try again...').format(a_module)
if do_not_exit:
return False
sys.exit(1)
return True
try:
from rich import print
except:
install_module('rich')
from rich import print
def find_pyradio_win_exe():
''' find pyradio EXE files
Return (system_exe, user_exe)
'''
exe = [None, None]
for a_path in site.getsitepackages():
an_exe = join(a_path, 'Scripts' , 'pyradio.exe')
if exists(an_exe):
exe[0] = an_exe
break
an_exe = join(site.getuserbase(), 'Scripts' , 'pyradio.exe')
if exists(an_exe):
exe[1] = an_exe
# print(exe)
return exe
def _is_player_in_path(a_player):
''' Return player's path in PATH variable
If player not in PATH, return None
Makes sure the path is local to user
and player EXE exists
Parameter:
a_player: 1=mpv, 2=mplayer
'''
a_player -= 1
in_path = None
pl = ('mpv', 'mplayer')
for a_path in environ['PATH'].split(';'):
if a_path.endswith(pl[a_player]):
in_path = a_path
break
#print('in_payh: {}'.format(in_path))
if in_path:
if not environ['USERPROFILE'] in a_path:
return None
if not exists(join(in_path, pl[a_player] + '.exe')):
return None
return in_path
def _get_output_folder(package, output_folder=None, do_not_exit=False):
if output_folder is None:
a_path = _is_player_in_path(package)
if a_path:
sp = a_path.split(sep)
output_folder = sep.join(sp[:-1])
else:
output_folder = join(environ['APPDATA'], 'pyradio')
# rename mpv if already there
if not exists(output_folder):
# create dir
makedirs(output_folder, exist_ok=True)
if not exists(output_folder):
print('Failed to create folder: "[magenta]{}[/magenta]"'.format(pyradio_dir))
if do_not_exit:
return None
sys.exit(1)
return output_folder
def _get_out_file(output_folder, package=1):
count = 0
p_name=('mpv-latest', 'mplayer-latest')
out_file = join(output_folder, '{}.7z'.format(p_name[package]))
while True:
if exists(out_file):
count += 1
out_file = join(output_folder, '{0}-{1}.7z'.format(p_name[package], count))
else:
break
return join(output_folder, out_file)
def download_seven_zip(output_folder):
PR = (
join(getenv('PROGRAMFILES'), '7-Zip', '7z.exe'),
join(getenv('PROGRAMFILES') + ' (x86)', '7-Zip', '7z.exe')
)
if exists(PR[0]) or exists(PR[1]):
return
url = 'https://sourceforge.net/projects/sevenzip/files/latest/download'
out_file = join(output_folder, '7-Zip_latest.exe')
print('[magenta]7-Zip not found...\n[green]Downloading...[/green]')
try:
urlretrieve(url, filename=out_file)
except:
print('[red]Failed to download 7-Zip...[/red]')
print('Please check your internet connection and try again...')
print('\nIn case you want to [green]install 7-Zip manually[/green],')
print('go to [magenta]https://www.7-zip.org/[/magenta] to get it...')
sys.exit(1)
print('\n[bold]PyRadio installation will resume as soon as\nyou complete the installation of 7-Zip...[/bold]')
subprocess.call(
out_file,
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
def download_player(output_folder=None, package=1, do_not_exit=False):
# Parameters
# output_folder : where to save files
# package : 0: mpv, 1: mplayer
package -= 1
if package == 0:
print('Downloading [magenta]MPV[/magenta] ([green]latest[/green])...')
else:
print('Downloading [magenta]MPlayer[/magenta] ([green]latest[/green])...')
purl = (
'https://sourceforge.net/projects/mpv-player-windows/files',
'https://sourceforge.net/projects/mplayerwin/files/MPlayer-MEncoder'
)
url = (
'https://sourceforge.net/projects/mpv-player-windows/files/latest/download',
'https://sourceforge.net/projects/mplayerwin/files/MPlayer-MEncoder/r38151/mplayer-svn-38151-x86_64.7z/download'
)
output_folder = _get_output_folder(
output_folder=output_folder,
package=package,
do_not_exit=do_not_exit)
if output_folder is None:
return False
if True == False and package == 0 and \
exists(join(output_folder, 'mpv', 'updater.bat')):
chdir(join(output_folder, 'mpv'))
startfile('updater.bat')
else:
print(' from "[plum4]{}[plum4]"'.format(purl[package]))
print(' into "[magenta]{}[/magenta]"'.format(output_folder))
out_file = _get_out_file(output_folder, package)
session = requests.Session()
for count in range(1,6):
try:
r = session.get(url[package])
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
if count < 5:
print(' Download failed. Retrying [magenta]{}[/magenta]/[red]5[/red]'.format(count+1))
else:
print('[red]Failed to download player...[/red]\nPlease check your internet connection and try again...')
if do_not_exit:
return False
sys.exit(1)
print(' Saving: "{}"'.format(out_file))
try:
with open(out_file, 'wb') as f:
f.write(r.content)
except:
print('[red]Failed to write archive...[/red]\nPlease try again later...')
if do_not_exit:
return False
sys.exit(1)
print('Extracting archive...')
if package == 0:
download_seven_zip(output_folder)
if not HAVE_PYUNPACK:
for a_module in ('pyunpack', 'patool'):
install_module(a_module, print_msg=False)
from pyunpack import Archive
patool_exec = join(site.USER_SITE.replace('site-packages', 'Scripts'), 'patool')
if not exists(patool_exec):
patool_exec = glob.glob(join(environ['APPDATA'], '**', 'patool.exe'), recursive=True)
if patool_exec:
patool_exec = patool_exe[0]
else:
patool_exec = None
try:
Archive(out_file).extractall(join(output_folder, 'mpv' if package==0 else ''),
auto_create_dir=True,
patool_path=patool_exec)
except:
file_only = basename(out_file)
player_name = 'mpv' if package == 0 else 'mplayer'
print('''Failed to extract the archive...
You will have to install the player [red]MANUALLY[/red]!!!
PyRadio's configuration folder will open now,
along with the archive named "{0}".'''.format(basename(out_file)))
if player_name == 'mpv':
if exists(join(output_folder, 'mpv')):
print(''' Please extract the archive in the "[dev]mpv[/red]" folder
(overwriting any existing files).''')
else:
print(''' Please create a folder named "[red]mpv[/red]" and extract
the archive there.''')
else:
# mplayer
if exists(join(output_folder, 'mplayer')):
print(''' Please delete the "[red]mplayer[/red]" folder, extract
the archive and rename the resulting folder
to "[red]mplayer[/red]".''')
else:
print('''
Please extract the archive and rename the resulting
folder to "[red]mplayer[/red]".''')
print('Press any key to continue...')
getwch()
if player_name == 'mpv':
startfile(join(dirname(out_file), 'mpv'))
else:
startfile(dirname(out_file))
startfile(out_file)
'''
if player_name == 'mpv':
while not exists(join(output_folder, 'mpv', 'updater.bat')):
sleep(1)
chdir(join(output_folder, 'mpv'))
startfile('updater.bat')
'''
if do_not_exit:
return False
sys.exit(1)
if not _post_download(package, output_folder, do_not_exit):
return False
try:
remove(out_file)
except:
pass
return True
def _post_download(package, output_folder, do_not_exit):
# rename MPlayer directory
if package == 1:
sleep(5)
mplayer_dir_found = False
extracted_dirname = None
dir_list = listdir(output_folder)
for a_file in dir_list:
if a_file == 'mplayer':
mplayer_dir_found = True
elif a_file.lower().startswith('mplayer-svn') and \
isdir(join(output_folder, a_file)):
extracted_dirname = a_file
# rename extracted dir to mplayer
if extracted_dirname:
extracted_dirname = join(output_folder, extracted_dirname)
mplayer_final_dir = join(output_folder, 'mplayer')
mplayer_old_dir = join(output_folder, 'mplayer.old')
if mplayer_dir_found:
if exists(mplayer_old_dir):
try:
rmtree(mplayer_old_dir)
except OSError:
print('Failed to remove "[green]{}[/green]"\nPlease close all programs and try again...'.format(mplayer_old_dir))
if do_not_exit:
return False
sys.exit(1)
try:
replace(mplayer_final_dir, mplayer_old_dir)
except:
print('Failed to rename folder "[green]{0}[/green]"\n to "[magenta]{1}[/magenta]"...\nPlease close all open programs and try again...'.format(mplayer_final_dir, mplayer_old_dir))
if do_not_exit:
return False
sys.exit(1)
try:
replace(join(output_folder, extracted_dirname), join(output_folder, 'mplayer'))
except:
print('Failed to rename folder "[green]{0}[/green]" to\n "[magenta]{1}[/magenta]"...\nPlease close all open programs and try again...'.format(extracted_dirname, mplayer_final_dir))
if do_not_exit:
return False
sys.exit(1)
else:
print('[red]Extracted folder not found...[/red]\nPlease try again later...')
if do_not_exit:
return False
sys.exit(1)
return True
def install_player(output_folder=None, package=0, do_not_exit=False):
while True:
in_path = [None, None, None]
to_do = ['[bold red]1[/bold red]. Install', '[bold red]2[/bold red]. Install', '[green]VLC[/green] media player is not installed']
from_path = ['', '']
for n in range(0, 2):
in_path[n] = _is_player_in_path(n)
if in_path[n]:
to_do[n] = '[bold red]{}[/bold red]. Update'.format(n+1)
from_path[n] = ' (found in [magenat]PATH[/magenta])'
if in_path[0] is None:
in_path[0] = find_mpv_on_windows()
if in_path[1] is None:
in_path[1] = find_mplayer_on_windows()
if in_path[0] == 'mpv':
in_path[0] = None
if in_path[1] == 'mplayer':
in_path[1] = None
for n in range(0, 2):
if in_path[n]:
to_do[n] = '[bold red]{}[/bold red]. Update'.format(n+1)
if find_vlc_on_windows():
to_do[2] = '[green]VLC[/green] media player is already installed.\n[bold red] It is not recommended to be used!!![/bold red]'
#print(in_path)
#print(to_do)
#print(from_path)
#print('\nDo you want to download a media player now? (Y/n): ', end='', flush=True)
#x = getwch()
#print(x)
x = 'y'
if in_path[0]:
best_choise = ''
else:
best_choise = '([yellow]best choise[/yellow])'
if x == 'y' or x == '\n' or x == '\r':
x = ''
msg = '''
Please select an action:
{0} [green]MPV[/green]{1} {2}
{3} [green]MPlayer[/green]{4}'''
print(msg.format(to_do[0], from_path[0],
best_choise, to_do[1], from_path[1]
))
msg ='''
[plum4]Note:[/plum4]
{}
'''
opts = []
prompt = ''
all_uninstall = False
if in_path[0] is None and in_path[1] is None:
opts = ['0', '1', '2', 'q']
prompt = 'Press [bold red]1[/bold red], [bold red]2[/bold red] or [bold red]q[/bold red] to Cancel: '
elif in_path[0] is not None and in_path[1] is not None:
print('\n [bold red]3[/bold red]. Uninstall [green]MPV[/green]')
print(' [bold red]4[/bold red]. Uninstall [green]MPlayer[/green]')
opts = ['0', '1', '2', '3', '4', 'q']
prompt = 'Press [bold red]1[/bold red], [bold red]2[/bold red], [bold red]3,[/bold red] [bold red]4[/bold red] or [bold red]q[/bold red] to Cancel: '
else:
if in_path[0] is not None:
print('\n [bold red]3[/bold red]. Uninstall [green]MPV[/green]')
else:
print('\n [bold red]3[/bold red]. Uninstall [green]MPlayer[/green]')
opts = ['0', '1', '2', '3', 'q']
prompt = 'Press [bold red]1[/bold red], [bold red]2[/bold red], [bold red]3[/bold red] or [bold red]q[/bold red] to Cancel: '
all_uninstall = True
print(msg.format(to_do[2]))
while x not in opts:
print(prompt, end='', flush=True)
x = getwch()
print(x)
# ok, parse response
if x in ('0', 'q'):
clean_up()
return
if x in ('1', '2'):
# install ot update
download_player(package=int(x))
print('\n\n')
elif x == '3':
# find out which player to wuninstall
print('uninstall [green]mplayer[/green] or [green]mpv[/green]')
print('\n\n')
elif x == '4':
# uninstall mplayer
print('uninstall [green]mplayer[/green]')
print('\n\n')
def install_pylnk(a_path, do_not_exit=False):
print(' Downloading [green]pylnk[/green]...')
session = requests.Session()
for count in range(1,6):
try:
r = session.get('https://github.com/strayge/pylnk/archive/refs/heads/master.zip')
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
if count < 5:
print(' Download failed. Retrying [magenta]{}[/magenta]/[red]5[/red]'.format(count+1))
else:
print(' Failed to download [green]pylnk[/green]...\nPlease check your internet connection and try again...')
if do_not_exit:
return False
sys.exit(1)
try:
with open(join(a_path, 'pylnk.zip'), 'wb') as f:
f.write(r.content)
except:
print(' Failed to write archive...\nPlease try again later...')
if do_not_exit:
return False
sys.exit(1)
print(' Installing [green]pylnk...[/green]')
ret = subprocess.call('python -m pip install ' + join(a_path, 'pylnk.zip'),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
remove(join(a_path, 'pylnk.zip'))
def clean_up(print_msg=True):
if print_msg:
print('Cleaning up makedepend modules...')
for n in ('pyunpack', 'patool', 'pylnk3', 'EasyProcess'):
subprocess.call('python -m pip uninstall -y ' + n,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_path(exe):
out_exe = ''
chk = []
for n in site.getsitepackages():
# print('adding: "{}"'.format(join(n, exe)))
# print('adding: "{}"'.format(join(n, 'Scripts', exe)))
chk.append(join(n, exe))
chk.append(join(n, 'Scripts', exe))
# print('------------------------')
x = site.getusersitepackages()
if isinstance(x, str):
# print('adding: "{}"'.format(join(x, exe)))
# print('adding: "{}"'.format(join(x, 'Scripts', exe)))
chk.append(join(x, exe))
chk.append(join(x, 'Scripts', exe))
# print('adding: "{}"'.format(join(x, exe)).replace('\site-packages', ''))
# print('adding: "{}"'.format(join(x, 'Scripts', exe)).replace('\site-packages', ''))
chk.append(join(x, exe).replace('\site-packages', ''))
chk.append(join(x, 'Scripts', exe).replace('\site-packages', ''))
else:
for n in site.getusersitepackages():
# print('adding: "{}"'.format(join(n, exe)))
# print('adding: "{}"'.format(join(n, 'Scripts', exe)))
chk.append(join(n, exe))
chk.append(join(n, 'Scripts', exe))
# print('------------------------')
for n in site.PREFIXES:
# print('adding: "{}"'.format(join(n, exe)))
# print('adding: "{}"'.format(join(n, 'Scripts', exe)))
chk.append(join(n, exe))
chk.append(join(n, 'Scripts', exe))
# for n in range(0,4):
# print('')
# for n in chk:
# print(n)
# print('------------------------')
for n in chk:
# print('checking: "{}'.format(n))
if exists(n):
return n
return ''
def get_pyradio():
return get_path('pyradio.exe')
def get_pylnk():
return get_path('pylnk3.exe')
def create_pyradio_link():
pyradio_exe = 'pyradio'
pyradio_exe = get_pyradio()
pylnk_exe = get_pylnk()
# print('pyradio_exe = "{}"'.format(pyradio_exe))
# print('pylnk_exe = "{}"'.format(pylnk_exe))
icon = join(environ['APPDATA'], 'pyradio', 'help', 'pyradio.ico')
# print('icon = "{}"'.format(icon))
link_path = join(environ['APPDATA'], 'pyradio', 'help', 'PyRadio.lnk')
# print('link_path = "{}"'.format(link_path))
workdir = join(environ['APPDATA'], 'pyradio')
# print('workdir = "{}"'.format(workdir))
# print('*** Updating Dekstop Shortcut')
if not exists(workdir):
makedirs(workdir, exist_ok=True)
if not exists(workdir):
print('Cannot create "' + workdir + '"')
sys.exit(1)
if not exists(pylnk_exe):
install_pylnk(workdir)
pylnk_exe = get_pylnk()
# print('pylnk_exe = "{}"'.format(pylnk_exe))
cmd = pylnk_exe + ' c --icon ' + icon + ' --workdir ' + workdir \
+ ' ' + pyradio_exe + ' ' + link_path
# print('cmd = "{}"'.format(cmd))
subprocess.Popen(
[pylnk_exe, 'c', '--icon', icon, '--workdir', workdir, pyradio_exe, link_path],
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
def install_pyradio_link():
from shutil import copy
desktop = getenv('DESKTOP')
user_profile = getenv('USERPROFILE')
appdata = getenv('APPDATA')
to_desktop = desktop if desktop is not None else join(user_profile, 'desktop')
to_start_menu = join(appdata, 'Microsoft', 'Windows', 'Start Menu', 'Programs')
if exists(to_desktop):
copy(
join(appdata, 'pyradio', 'help', 'PyRadio.lnk'),
join(to_desktop, 'PyRadio.lnk')
)
if exists(to_start_menu):
copy(
join(appdata, 'pyradio', 'help', 'PyRadio.lnk'),
join(to_start_menu, 'PyRadio.lnk')
)
if __name__ == '__main__':
print('\n\n[red]----[green]==== [magenta]MPV Media Player Installation [green]====[red]----[/red]')
download_player(package=1)
print('[red]----[green]==== [magenta]MPV Media Player Installed [green]====[red]----[/red]')
# _post_download(1, "C:\\Users\\spiros\\AppData\\Roaming\\pyradio")
# download_player(package=0)
#install_player()
# install_pylnk("C:\\Users\\spiros")
#create_pyradio_link()
# find_pyradio_win_exe()
|
# -*- coding: utf-8 -*-
from decorated.base.function import Function
from metaweb.errors import ValidationError
import doctest
import re
class Validator(Function):
def _call(self, *args, **kw):
value = self._evaluate_expression(self._param, *args, **kw)
error = self._validate(value)
if error is None:
return super(Validator, self)._call(*args, **kw)
else:
raise ValidationError(self._code, param=self._param, message=error)
def _init(self, param, code):
super(Validator, self)._init()
self._param = param
self._code = code
def _validate(self, value):
raise NotImplementedError()
class Type(Validator):
def _init(self, param, type_or_types, code='BAD_ARGUMENT_TYPE'):
super(Type, self)._init(param, code)
self._type_or_types = type_or_types
def _validate(self, value):
if not isinstance(value, self._type_or_types):
return 'Value should be of type %s.' % (self._type_or_types,)
type = Type
class LengthRange(Type):
def _init(self, param, lower, upper, code='BAD_ARGUMENT_LENGTH'):
super(LengthRange, self)._init(param, basestring, code=code)
self._lower = lower
self._upper = upper
def _validate(self, value):
error = super(LengthRange, self)._validate(value)
if error is not None:
return error
length = len(value)
if length < self._lower or length > self._upper:
return 'String length should be within [%s, %s].' % (self._lower, self._upper)
length_range = LengthRange
class Range(Type):
def _init(self, param, lower, upper, code='ARGUMENT_OUT_OF_RANGE'):
super(Range, self)._init(param, (int, float), code=code)
self._lower = lower
self._upper = upper
def _validate(self, value):
error = super(Range, self)._validate(value)
if error is not None:
return error
if value < self._lower or value > self._upper:
return 'Value should be within [%s, %s].' % (self._lower, self._upper)
range = Range
class Required(Validator):
def _init(self, param, code='ARGUMENT_MISSING'):
super(Required, self)._init(param, code)
def _validate(self, value):
if value is None or value == '':
return 'Argument cannot be null.'
required = Required
class Regex(Type):
def _init(self, param, pattern, code='BAD_ARGUMENT'):
super(Regex, self)._init(param, code, basestring)
self._pattern = re.compile(pattern)
def _validate(self, value):
if not self._pattern.match(value):
return 'Value does not match "%s".' % self._pattern.pattern
regex = Regex
if __name__ == '__main__':
doctest.testmod()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.