index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,700 | 11e0a24fd4dc2ca5d356c366abe37b78364108eb | import boto3
from botocore.exceptions import NoCredentialsError
from settings.constants import *
print('loool', ACCESS_KEY, len(ACCESS_KEY), ACCESS_KEY.lower(), SECRET_KEY)
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
file_name = './Hanma-Baki-anime.jpg'
upload_to_aws(file_name, BUCKET_NAME, '%s/%s' % (BUCKET_FOLDER, 'Hanma-Baki-anime.jpg'))
|
999,701 | e0a8efaa6fe2e6ec14a214478528eb53e64f17bd | #-*- coding:utf-8 -*-
#test 1
d = {'Yoeng':90,'justin':80,'oliver':70}
print (d['Yoeng'])
#test 2
d['oliver'] = 100
print (d['oliver'])
#test 3
a = 'Tom' in d
print (a)
#test 4
b = d.get('tom')
c = d.get('tom',-1)
print(b,c)
#test 5
d.pop('justin')
print (d)
#test 6
#key = [1,2,3]
#d[key] = 'a list'
#test 7
s = set([2,1,3])
print(s)
#test 8
s = set([1,2,3,4,1,2,3])
print(s)
#test 9
s.add(5)
print(s)
s.add(5)
print(s)
#test 10
s.remove(5)
print(s)
#test 11
a = 'abc'
b = a.replace('a','A')
print (a)
print (b)
#test 12
t = (1,2,3)
d = {t:10,'yoen':90}
print (d)
#t = (1, [2,3])
#d = {t:10,'yoen':90}
#print(s)
#test 13
t = (1,2,3)
s = set(t)
print (d)
t = (1, [2,3])
s = set(t)
print(s) |
999,702 | 346be60bfc8611d24bfe9a6cce6bd8dc519226cb | # -*- coding: utf-8 -*-
#Realizar un función que reciba como parámetro una cantidad de segundos, y devuelva una tupla con la cantidad de segundos expresada en hh,mm,ss.
def conversor_horario(segundos):
horas=segundos//3600
minutos=(segundos%3600)//60
segundos=(segundos%3600)%60
return (horas, minutos, segundos)
#Realizar una función que reciba como parámetros cantidades de horas, minutos y/o segundos. Y que retorne la suma de estos expresanda en segundos. (Los parámetros, son opcionales y por defecto sus valores 0.)
def conversor_segundos(horas=0, minutos=0, segundos=0):
segundos=(horas*3600)+(minutos*60)+segundos
return segundos
|
999,703 | bcd5c1ec349571ea3f925cff0974a8a5782e6e5d | #TODO Refactor so each command is a seperate function in a list. That way, the script parser can just do commands["COMMAND"]((args,))
import threading, webbrowser
from time import sleep
from functools import partial
import lp_events, lp_colors, keyboard, sound, mouse
COLOR_PRIMED = lp_colors.RED
COLOR_FUNC_KEYS_PRIMED = lp_colors.AMBER
EXIT_UPDATE_DELAY = 0.1
DELAY_EXIT_CHECK = 0.025
import files
VALID_COMMANDS = ["STRING", "DELAY", "TAP", "PRESS", "RELEASE", "SP_TAP", "SP_PRESS", "SP_RELEASE", "WEB", "WEB_NEW", "SOUND", "WAIT_UNPRESSED", "M_MOVE", "M_SET", "M_PRESS", "M_RELEASE", "M_SCROLL", "M_TAP", "M_LINE", "M_LINE_MOVE", "M_LINE_SET"]
threads = [[None for y in range(9)] for x in range(9)]
running = False
to_run = []
text = [["" for y in range(9)] for x in range(9)]
def schedule_script(script_in, x, y):
global threads
global to_run
global running
coords = "(" + str(x) + ", " + str(y) + ")"
if threads[x][y] != None:
if threads[x][y].is_alive():
print("[scripts] " + coords + " Script already running, killing script....")
threads[x][y].kill.set()
return
if (x, y) in [l[1:] for l in to_run]:
print("[scripts] " + coords + " Script already scheduled, unscheduling...")
indexes = [i for i, v in enumerate(to_run) if ((v[1] == x) and (v[2] == y))]
for index in indexes[::-1]:
temp = to_run.pop(index)
return
if script_in.split("\n")[0].split(" ")[0] == "@ASYNC":
print("[scripts] " + coords + " Starting asynchronous script in background...")
threads[x][y] = threading.Thread(target=run_script, args=(script_in,x,y))
threads[x][y].kill = threading.Event()
threads[x][y].start()
elif not running:
print("[scripts] " + coords + " No script running, starting script in background...")
threads[x][y] = threading.Thread(target=run_script_and_run_next, args=(script_in,x,y))
threads[x][y].kill = threading.Event()
threads[x][y].start()
else:
print("[scripts] " + coords + " A script is already running, scheduling...")
to_run.append((script_in, x, y))
lp_colors.updateXY(x, y)
def run_next():
global to_run
if len(to_run) > 0:
tup = to_run.pop(0)
new_script = tup[0]
x = tup[1]
y = tup[2]
schedule_script(new_script, x, y)
def run_script_and_run_next(script_in, x_in, y_in):
global running
global to_run
coords = "(" + str(x_in) + ", " + str(y_in) + ")"
run_script(script_in, x_in, y_in)
run_next()
def run_script(script_str, x, y):
global running
global exit
lp_colors.updateXY(x, y)
coords = "(" + str(x) + ", " + str(y) + ")"
script_lines = script_str.split("\n")
is_async = False
if script_lines[0].split(" ")[0] == "@ASYNC":
is_async = True
temp = script_lines.pop(0)
else:
running = True
print("[scripts] " + coords + " Now running script...")
for line in script_lines:
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
line = line.strip()
if line == "":
print("[scripts] " + coords + " Empty line")
elif line[0] == "-":
print("[scripts] " + coords + " Comment " + line[1:])
else:
split_line = line.split(" ")
if split_line[0] == "STRING":
type_string = " ".join(split_line[1:])
print("[scripts] " + coords + " Type out string " + type_string)
keyboard.controller.type(type_string)
elif split_line[0] == "DELAY":
delay = None
try:
delay =float(split_line[1])
except:
print("[scripts] " + coords + " Invalid time to delay, skipping...")
if delay != None:
print("[scripts] " + coords + " Delay for " + split_line[1] + " seconds")
while delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if delay > 0:
sleep(delay)
elif split_line[0] == "TAP":
if len(split_line) <= 2:
print("[scripts] " + coords + " Tap key " + split_line[1])
keyboard.tap(split_line[1])
elif len(split_line) <= 3:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
if (taps != None):
print("[scripts] " + coords + " Tap key " + split_line[1] + " " + split_line[2] + " times")
for tap in range(taps):
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(split_line[1])
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
keyboard.tap(split_line[1])
else:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
delay = None
try:
delay =float(split_line[3])
except:
print("[scripts] " + coords + " Invalid time to tap, skipping...")
if (taps != None) and (delay != None):
print("[scripts] " + coords + " Tap key " + split_line[1] + " " + split_line[2] + " times for " + str(split_line[3]) + " seconds each")
for tap in range(taps):
temp_delay = delay
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(split_line[1])
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
keyboard.press(split_line[1])
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(split_line[1])
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
keyboard.release(split_line[1])
elif split_line[0] == "PRESS":
print("[scripts] " + coords + " Press key " + split_line[1])
keyboard.press(split_line[1])
elif split_line[0] == "RELEASE":
print("[scripts] " + coords + " Release key " + split_line[1])
keyboard.release(split_line[1])
elif split_line[0] == "SP_TAP":
if keyboard.sp(split_line[1]) != None:
key = keyboard.sp(split_line[1])
if len(split_line) <= 2:
print("[scripts] " + coords + " Tap special key " + split_line[1])
keyboard.tap(key)
elif len(split_line) <= 3:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
if (taps != None):
print("[scripts] " + coords + " Tap special key " + split_line[1] + " " + split_line[2] + " times")
for tap in range(taps):
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(key)
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
keyboard.tap(key)
else:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
delay = None
try:
delay =float(split_line[3])
except:
print("[scripts] " + coords + " Invalid time to tap, skipping...")
if (taps != None) and (delay != None):
print("[scripts] " + coords + " Tap special key " + split_line[1] + " " + split_line[2] + " times for " + str(split_line[3]) + " seconds each")
for tap in range(taps):
temp_delay = delay
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(key)
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
keyboard.press(key)
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
keyboard.release(key)
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
keyboard.release(key)
else:
print("[scripts] " + coords + " Invalid special character to tap: " + split_line[1] + ", skipping...")
elif split_line[0] == "SP_PRESS":
if keyboard.sp(split_line[1]) != None:
print("[scripts] " + coords + " Press special key " + split_line[1])
keyboard.press(keyboard.sp(split_line[1]))
else:
print("[scripts] " + coords + " Invalid special character to press: " + split_line[1] + ", skipping...")
elif split_line[0] == "SP_RELEASE":
if keyboard.sp(split_line[1]) != None:
print("[scripts] " + coords + " Release special key " + split_line[1])
keyboard.release(keyboard.sp(split_line[1]))
else:
print("[scripts] " + coords + " Invalid special character to release: " + split_line[1] + ", skipping...")
elif split_line[0] == "WEB":
link = split_line[1]
if "http" not in link:
link = "http://" + link
print("[scripts] " + coords + " Open website " + link + " in default browser")
webbrowser.open(link)
elif split_line[0] == "WEB_NEW":
link = split_line[1]
if "http" not in link:
link = "http://" + link
print("[scripts] " + coords + " Open website " + link + " in default browser, try to make a new window")
webbrowser.open_new(link)
elif split_line[0] == "SOUND":
if len(split_line) > 2:
print("[scripts] " + coords + " Play sound file " + split_line[1] + " at volume " + str(split_line[2]))
sound.play(split_line[1], float(split_line[2]))
else:
print("[scripts] " + coords + " Play sound file " + split_line[1])
sound.play(split_line[1])
elif split_line[0] == "WAIT_UNPRESSED":
print("[scripts] " + coords + " Wait for script key to be unpressed")
while lp_events.pressed[x][y]:
sleep(DELAY_EXIT_CHECK)
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
elif split_line[0] == "M_MOVE":
if len(split_line) >= 3:
print("[scripts] " + coords + " Relative mouse movement (" + split_line[1] + ", " + str(split_line[2]) + ")")
mouse.moveXY(float(split_line[1]), float(split_line[2]))
else:
print("[scripts] " + coords + " Both X and Y are required for mouse movement, skipping...")
elif split_line[0] == "M_SET":
if len(split_line) >= 3:
print("[scripts] " + coords + " Set mouse position to (" + split_line[1] + ", " + str(split_line[2]) + ")")
mouse.setXY(float(split_line[1]), float(split_line[2]))
else:
print("[scripts] " + coords + " Both X and Y are required for mouse positioning, skipping...")
elif split_line[0] == "M_PRESS":
print("[scripts] " + coords + " Press mouse button " + split_line[1])
mouse.press(split_line[1])
elif split_line[0] == "M_RELEASE":
print("[scripts] " + coords + " Release mouse button " + split_line[1])
mouse.release(split_line[1])
elif split_line[0] == "M_SCROLL":
if len(split_line) > 2:
print("[scripts] " + coords + " Scroll (" + split_line[1] + ", " + split_line[2] + ")")
mouse.scroll(float(split_line[2]), float(split_line[1]))
else:
print("[scripts] " + coords + " Scroll " + split_line[1])
mouse.scroll(0, float(split_line[1]))
elif split_line[0] == "M_TAP":
if len(split_line) <= 2:
print("[scripts] " + coords + " Tap mouse button " + split_line[1])
mouse.click(split_line[1])
elif len(split_line) <= 3:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
if (taps != None):
print("[scripts] " + coords + " Tap mouse button " + split_line[1] + " " + split_line[2] + " times")
mouse.click(split_line[1], taps)
else:
taps = None
try:
taps = int(split_line[2])
except:
print("[scripts] " + coords + " Invalid number of times to tap, skipping...")
delay = None
try:
delay =float(split_line[3])
except:
print("[scripts] " + coords + " Invalid time to tap, skipping...")
if (taps != None) and (delay != None):
print("[scripts] " + coords + " Tap mouse button " + split_line[1] + " " + split_line[2] + " times for " + str(split_line[3]) + " seconds each")
for tap in range(taps):
temp_delay = delay
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
mouse.release(split_line[1])
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
mouse.press(split_line[1])
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
mouse.release(split_line[1])
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
mouse.release(split_line[1])
elif split_line[0] == "M_LINE":
x1 = int(split_line[1])
y1 = int(split_line[2])
x2 = int(split_line[3])
y2 = int(split_line[4])
delay = None
if len(split_line) > 5:
delay = float(split_line[5]) / 1000.0
skip = 1
if len(split_line) > 6:
skip = int(split_line[6])
if (delay == None) or (delay <= 0):
print("[scripts] " + coords + " Mouse line from (" + split_line[1] + ", " + split_line[2] + ") to (" + split_line[3] + ", " + split_line[4] + ") by " + str(skip) + " pixels per step")
else:
print("[scripts] " + coords + " Mouse line from (" + split_line[1] + ", " + split_line[2] + ") to (" + split_line[3] + ", " + split_line[4] + ") by " + str(skip) + " pixels per step and wait " + split_line[5] + " milliseconds between each step")
points = mouse.line_coords(x1, y1, x2, y2)
for x_M, y_M in points[::skip]:
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
mouse.setXY(x_M, y_M)
if (delay != None) and (delay > 0):
temp_delay = delay
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
elif split_line[0] == "M_LINE_MOVE":
x1 = int(split_line[1])
y1 = int(split_line[2])
delay = None
if len(split_line) > 3:
delay = float(split_line[3]) / 1000.0
skip = 1
if len(split_line) > 4:
skip = int(split_line[4])
if (delay == None) or (delay <= 0):
print("[scripts] " + coords + " Mouse line move relative (" + split_line[1] + ", " + split_line[2] + ") by " + str(skip) + " pixels per step")
else:
print("[scripts] " + coords + " Mouse line move relative (" + split_line[1] + ", " + split_line[2] + ") by " + str(skip) + " pixels per step and wait " + split_line[3] + " milliseconds between each step")
x_C, y_C = mouse.getXY()
x_N, y_N = x_C + x1, y_C + y1
points = mouse.line_coords(x_C, y_C, x_N, y_N)
for x_M, y_M in points[::skip]:
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
mouse.setXY(x_M, y_M)
if (delay != None) and (delay > 0):
temp_delay = delay
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
elif split_line[0] == "M_LINE_SET":
x1 = int(split_line[1])
y1 = int(split_line[2])
delay = None
if len(split_line) > 3:
delay = float(split_line[3]) / 1000.0
skip = 1
if len(split_line) > 4:
skip = int(split_line[4])
if (delay == None) or (delay <= 0):
print("[scripts] " + coords + " Mouse line set (" + split_line[1] + ", " + split_line[2] + ") by " + str(skip) + " pixels per step")
else:
print("[scripts] " + coords + " Mouse line set (" + split_line[1] + ", " + split_line[2] + ") by " + str(skip) + " pixels per step and wait " + split_line[3] + " milliseconds between each step")
x_C, y_C = mouse.getXY()
points = mouse.line_coords(x_C, y_C, x1, y1)
for x_M, y_M in points[::skip]:
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
mouse.setXY(x_M, y_M)
if (delay != None) and (delay > 0):
temp_delay = delay
while temp_delay > DELAY_EXIT_CHECK:
sleep(DELAY_EXIT_CHECK)
temp_delay -= DELAY_EXIT_CHECK
if threads[x][y].kill.is_set():
print("[scripts] " + coords + " Recieved exit flag, script exiting...")
threads[x][y].kill.clear()
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
return
if temp_delay > 0:
sleep(temp_delay)
else:
print("[scripts] " + coords + " Invalid command: " + split_line[0] + ", skipping...")
print("[scripts] (" + str(x) + ", " + str(y) + ") Script done running.")
if not is_async:
running = False
threading.Timer(EXIT_UPDATE_DELAY, lp_colors.updateXY, (x, y)).start()
def bind(x, y, script_down, color):
global to_run
if (x, y) in [l[1:] for l in to_run]:
indexes = [i for i, v in enumerate(to_run) if ((v[1] == x) and (v[2] == y))]
for index in indexes[::-1]:
temp = to_run.pop(index)
return
schedule_script_bindable = lambda a, b: schedule_script(script_down, x, y)
lp_events.bind_func_with_colors(x, y, schedule_script_bindable, color)
text[x][y] = script_down
def unbind(x, y):
global to_run
lp_events.unbind(x, y)
text[x][y] = ""
if (x, y) in [l[1:] for l in to_run]:
indexes = [i for i, v in enumerate(to_run) if ((v[1] == x) and (v[2] == y))]
for index in indexes[::-1]:
temp = to_run.pop(index)
return
if threads[x][y] != None:
threads[x][y].kill.set()
def unbind_all():
global threads
global text
global to_run
lp_events.unbind_all()
text = [["" for y in range(9)] for x in range(9)]
to_run = []
for x in range(9):
for y in range(9):
if threads[x][y] != None:
if threads[x][y].isAlive():
threads[x][y].kill.set()
def validate_script(script_str):
if script_str == "":
return True
script_lines = script_str.split('\n')
first_line_split = script_lines[0].split(" ")
if first_line_split[0] == "@ASYNC":
if len(first_line_split) > 1:
return ("@ASYNC takes no arguments.", script_lines[0])
temp = script_lines.pop(0)
for line in script_lines:
for sep in (files.ENTRY_SEPERATOR, files.BUTTON_SEPERATOR, files.NEWLINE_REPLACE):
if sep in line:
return ("You cannot use the string '" + sep + "' in any script.", line)
line = line.strip()
if line != "":
if line[0] != "-":
split_line = line.split(' ')
if split_line[0] not in VALID_COMMANDS:
if split_line[0] == "@ASYNC":
return ("@ASYNC is a header and can only be used on the first line.", line)
else:
return ("Command '" + split_line[0] + "' not valid.", line)
if split_line[0] in ["STRING", "DELAY", "TAP", "PRESS", "RELEASE", "SP_TAP", "SP_PRESS", "SP_RELEASE", "WEB", "WEB_NEW", "SOUND", "M_MOVE", "M_SET", "M_PRESS", "M_RELEASE", "M_SCROLL", "M_TAP"]:
if len(split_line) < 2:
return ("Too few arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["WAIT_UNPRESSED"]:
if len(split_line) > 1:
return ("Too many arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["DELAY", "WEB", "WEB_NEW", "PRESS", "RELEASE", "SP_PRESS", "SP_RELEASE", "M_PRESS", "M_RELEASE"]:
if len(split_line) > 2:
return ("Too many arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["SOUND", "M_MOVE", "M_SCROLL", "M_SET"]:
if len(split_line) > 3:
return ("Too many arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["TAP", "SP_TAP", "M_TAP"]:
if len(split_line) > 4:
return ("Too many arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["M_LINE"]:
if len(split_line) > 7:
return ("Too many arguments for command '" + split_line[0] + "'.", line)
if split_line[0] in ["SP_TAP", "SP_PRESS", "SP_RELEASE"]:
if keyboard.sp(split_line[1]) == None:
return ("No special character named '" + split_line[1] + "'.", line)
if split_line[0] in ["TAP", "PRESS", "RELEASE"]:
if len(split_line[1]) > 1:
return ("More than 1 character supplied.", line)
if split_line[0] == "DELAY":
try:
temp = float(split_line[1])
except:
return ("Delay time '" + split_line[1] + "' not valid.", line)
if split_line[0] in ["TAP", "SP_TAP", "M_TAP"]:
func_name = "Tap"
if split_line[0] == "SP_TAP":
func_name = "Special tap"
if split_line[0] == "M_TAP":
func_name = "Mouse tap"
if len(split_line) > 3:
try:
temp = float(split_line[3])
except:
return (func_name + " wait time '" + split_line[3] + "' not valid.", line)
if len(split_line) > 2:
try:
temp = int(split_line[2])
except:
return (func_name + " repetitions '" + split_line[2] + "' not valid.", line)
if split_line[0] == "WAIT_UNPRESSED":
if len(split_line) > 1:
return ("'WAIT_UNPRESSED' takes no arguments.", line)
if split_line[0] == "SOUND":
if len(split_line) > 2:
try:
vol = float(float(split_line[2]) / 100.0)
if (vol < 0.0) or (vol > 1.0):
return ("'SOUND' volume must be between 0 and 100.", line)
except:
return ("'SOUND' volume " + split_line[2] + " not valid.", line)
if split_line[0] == "M_MOVE":
if len(split_line) < 3:
return ("'M_MOVE' requires both an X and a Y movement value.", line)
try:
temp = int(split_line[1])
except:
return ("'M_MOVE' X value '" + split_line[1] + "' not valid.", line)
try:
temp = int(split_line[2])
except:
return ("'M_MOVE' Y value '" + split_line[2] + "' not valid.", line)
if split_line[0] == "M_SET":
if len(split_line) < 3:
return ("'M_SET' requires both an X and a Y value.", line)
try:
temp = int(split_line[1])
except:
return ("'M_SET' X value '" + split_line[1] + "' not valid.", line)
try:
temp = int(split_line[2])
except:
return ("'M_SET' Y value '" + split_line[2] + "' not valid.", line)
if split_line[0] in ["M_PRESS", "M_RELEASE", "M_TAP"]:
if split_line[1] not in ["left", "middle", "right"]:
return ("Invalid mouse button '" + split_line[1] + "'.", line)
if split_line[0] == "M_SCROLL":
try:
temp = float(split_line[1])
except:
return ("Invalid scroll amount '" + split_line[1] + "'.", line)
if len(split_line) > 2:
try:
temp = float(split_line[2])
except:
return ("Invalid scroll amount '" + split_line[2] + "'.", line)
if split_line[0] == "M_LINE":
if len(split_line) < 5:
return ("'M_LINE' requires at least X1, Y1, X2, and Y2 arguments.", line)
try:
temp = int(split_line[1])
except:
return ("'M_LINE' X1 value '" + split_line[1] + "' not valid.", line)
try:
temp = int(split_line[2])
except:
return ("'M_LINE' Y1 value '" + split_line[2] + "' not valid.", line)
try:
temp = int(split_line[3])
except:
return ("'M_LINE' X2 value '" + split_line[3] + "' not valid.", line)
try:
temp = int(split_line[4])
except:
return ("'M_LINE' Y2 value '" + split_line[4] + "' not valid.", line)
if len(split_line) >= 6:
try:
temp = float(split_line[5])
except:
return ("'M_LINE' wait value '" + split_line[5] + "' not valid.", line)
if len(split_line) >= 7:
try:
temp = int(split_line[6])
if temp == 0:
return ("'M_LINE' skip value cannot be zero.", line)
except:
return ("'M_LINE' skip value '" + split_line[6] + "' not valid.", line)
if split_line[0] in ["M_LINE_MOVE", "M_LINE_SET"]:
if len(split_line) < 3:
return ("'" + split_line[0] + "' requires at least X and Y arguments.", line)
try:
temp = int(split_line[1])
except:
return ("'" + split_line[0] + "' X value '" + split_line[1] + "' not valid.", line)
try:
temp = int(split_line[2])
except:
return ("'" + split_line[0] + "' Y value '" + split_line[2] + "' not valid.", line)
if len(split_line) >= 4:
try:
temp = float(split_line[3])
except:
return ("'" + split_line[0] + "' wait value '" + split_line[3] + "' not valid.", line)
if len(split_line) >= 5:
try:
temp = int(split_line[4])
if temp == 0:
return ("'" + split_line[0] + "' skip value cannot be zero.", line)
except:
return ("'" + split_line[0] + "' skip value '" + split_line[4] + "' not valid.", line)
return True
|
999,704 | 5f495c98b83a3e07ac8bd00c8ec933df5012c353 | import random
# O(N^2) list creation
def make_points(points, n):
for i in range(0, n):
x = random.uniform(0, 1000)
y = random.uniform(0, 1000)
if (x, y) not in points:
points.append((x, y))
|
999,705 | f4dc9f8aecfd9a5f14e872b30489bdcff8811db5 | #coding:utf-8
"""
@file: BiomedSpider
@author: lyn
@contact: tonylu716@gmail.com
@python: 3.3
@editor: PyCharm
@create: 2016-10-13 21:36
@description:
sample_url: http://www.biomedcentral.com/bmcfampract/
"""
import sys,os
up_level_N = 1
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
root_dir = SCRIPT_DIR
for i in range(up_level_N):
root_dir = os.path.normpath(os.path.join(root_dir, '..'))
sys.path.append(root_dir)
import requests,time,random
from Journals_Task.JournalSpider import JournalSpider
from journal_parser.Biomed_Parser import BioMedParser,BioMedArticle
from crawl_tools.request_with_proxy import request_with_random_ua
from bs4 import BeautifulSoup
class BioMedSpider(JournalSpider):
'''
Sample Url:
http://www.biomedcentral.com/bmcbioinformatics/
http://www.biomedcentral.com/bmcplantbiol/
'''
def __init__(self,JournalObj):
JournalSpider.__init__(self,JournalObj)
self.url = JournalObj.site_source
self.JournalObj = JournalObj
self.generate_volume_links()
def generate_volume_links(self):
if self.JournalObj.volume_links_got:
return
name = self.url[:-1].split('/')[-1]
items_fist_page_url = 'http://{}.biomedcentral.com/articles?searchType=journalSearch&sort=PubDate&page=1'.format(name)
print(items_fist_page_url)
pages_num = BioMedParser(
html_source = request_with_random_ua(
url=items_fist_page_url,timeout=3).text
).pages_amount
for i in range(1,pages_num+1):
page_url = 'http://{}.biomedcentral.com/articles?searchType=journalSearch&sort=PubDate&page={}'\
.format(name,i)
self.volume_links.append(page_url)
def run(self,internal_thread_cot=8,just_init=False):
self._run(
AllItemsPageParser = BioMedParser,
JournalArticle = BioMedArticle,
just_init=just_init,
internal_thread_cot=internal_thread_cot
)
if __name__=="__main__":
from Journals_Task.JournalClass import Journal
j = Journal()
j.site_source = 'http://www.biomedcentral.com/bmcbioinformatics/'
BioMedSpider(j) |
999,706 | 1b50969f364f19e8ec7f4e3ae545180b14affc5a | from turtle import *
meet_turtel=Turtle()
def squre():
meet_turtel.forward(100)
meet_turtel.right(90)
meet_turtel.forward(100)
meet_turtel.right(90)
meet_turtel.forward(100)
meet_turtel.right(90)
meet_turtel.forward(100)
elephant_weight=3000
ant_weight=0.3
if elephant_weight < ant_weight:
squre()
else:
squre()
squre()
done() |
999,707 | 52698dd2391a25979cd125775a5e077b8d9a4333 | """
The flask application package.
"""
from flask import Flask
import MySQLdb
app = Flask(__name__)
db = MySQLdb.connect(host="localhost", port=3306, user="root", passwd="230593Mayo", db="bthedge")
import BtHedge.user
import BtHedge.btccontroller
|
999,708 | 96d1c45256496a105eecee83b1a7edf314e5eb4c | from PIL import Image, ImageOps, ImageChops, ImageDraw, ImageFont
import os
# folders = ["output_CHONK1", "output_comprimidos1", "output_comprimidos2"]
folders = ["clean"]
PATH_OUTPUT = os.getcwd() + "/output/"
SIZE = (56, 56)
monster_data = open("monster_data.txt", "r", encoding="utf-8").read().split("\n")
FONT = ImageFont.truetype(os.getcwd() + "/pokefont.ttf", 8)
print(FONT.getmetrics())
BACKGROUND_COLOR = (202, 220, 159, 255)
for filder_index, folder in enumerate(folders):
images = []
files = os.listdir(os.getcwd() + f"/{folder}/")
for i, file_name in enumerate(files):
# poke_name = file_name.split("_")[1].split(".")[0]
img = Image.open(os.getcwd() + f"/{folder}/{file_name}")
img = img.resize((56, 56), Image.NEAREST)
# img = Image.alpha_composite(Image.new("RGBA", (56, 56), BACKGROUND_COLOR), img)
# centrar imagen
pixels = img.load()
width_range = [0, 55]
height_range = [0, 55]
for y in range(56):
for x in range(56):
colorin = pixels[x, y]
# print(colorin)
if colorin == BACKGROUND_COLOR:
pixels[x, y] = (0, 0, 0, 0)
else:
width_range = [max(width_range[0], x),
min(width_range[1], x)]
height_range = [max(height_range[0], y),
min(height_range[1], y)]
off_x = int((56 - (width_range[1] - width_range[0])) / 2) - width_range[0]
off_y = int((56 - (height_range[1] - height_range[0])) / 2) - height_range[0]
img = ImageChops.offset(img, off_x, off_y)
img = img.resize(SIZE, Image.NEAREST)
img = ImageOps.expand(img, SIZE[0]*3, fill=(0, 0, 0, 0))
d = ImageDraw.Draw(img)
monster_name, monster_description = monster_data[i].split("|")
monster_name_size = FONT.getsize(monster_name, direction=None, features=None, language=None, stroke_width=0)
monster_description_size = FONT.getsize(monster_description, direction=None, features=None, language=None, stroke_width=0)
_y = 80
d.text((int(img.size[0]/2 - monster_name_size[0]/2), _y), monster_name, font=FONT, fill=(15, 56, 15))
d.text((int(img.size[0]/2 - monster_description_size[0]/2), _y+14), monster_description, font=FONT, fill=(15, 56, 15))
img.save(f"{PATH_OUTPUT}lovemones_{str(i).zfill(3)}.png")
|
999,709 | 69a08ce998e9f8cb05d35cf45b522e11df01b6af | # Copyright 2019 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THESE CONSTANTS ARE NOT PART OF THIS LIBRARY'S PUBLIC INTERFACE
# The same values are made available by methods that are, such as
#
# TimeOffset.MAX_NANOSEC
#
# So use those instead. At some point these constants could go away without warning
MAX_NANOSEC = 1000000000
MAX_SECONDS = 281474976710656
# The UTC leap seconds table below was extracted from the information provided at
# http://www.ietf.org/timezones/data/leap-seconds.list
#
# The order has been reversed.
# The NTP epoch seconds have been converted to Unix epoch seconds. The difference between
# the NTP epoch at 1 Jan 1900 and the Unix epoch at 1 Jan 1970 is 2208988800 seconds
UTC_LEAP = [
# || UTC SEC | TAI SEC - 1 ||
(1483228800, 1483228836), # 1 Jan 2017, 37 leap seconds
(1435708800, 1435708835), # 1 Jul 2015, 36 leap seconds
(1341100800, 1341100834), # 1 Jul 2012, 35 leap seconds
(1230768000, 1230768033), # 1 Jan 2009, 34 leap seconds
(1136073600, 1136073632), # 1 Jan 2006, 33 leap seconds
(915148800, 915148831), # 1 Jan 1999, 32 leap seconds
(867715200, 867715230), # 1 Jul 1997, 31 leap seconds
(820454400, 820454429), # 1 Jan 1996, 30 leap seconds
(773020800, 773020828), # 1 Jul 1994, 29 leap seconds
(741484800, 741484827), # 1 Jul 1993, 28 leap seconds
(709948800, 709948826), # 1 Jul 1992, 27 leap seconds
(662688000, 662688025), # 1 Jan 1991, 26 leap seconds
(631152000, 631152024), # 1 Jan 1990, 25 leap seconds
(567993600, 567993623), # 1 Jan 1988, 24 leap seconds
(489024000, 489024022), # 1 Jul 1985, 23 leap seconds
(425865600, 425865621), # 1 Jul 1983, 22 leap seconds
(394329600, 394329620), # 1 Jul 1982, 21 leap seconds
(362793600, 362793619), # 1 Jul 1981, 20 leap seconds
(315532800, 315532818), # 1 Jan 1980, 19 leap seconds
(283996800, 283996817), # 1 Jan 1979, 18 leap seconds
(252460800, 252460816), # 1 Jan 1978, 17 leap seconds
(220924800, 220924815), # 1 Jan 1977, 16 leap seconds
(189302400, 189302414), # 1 Jan 1976, 15 leap seconds
(157766400, 157766413), # 1 Jan 1975, 14 leap seconds
(126230400, 126230412), # 1 Jan 1974, 13 leap seconds
(94694400, 94694411), # 1 Jan 1973, 12 leap seconds
(78796800, 78796810), # 1 Jul 1972, 11 leap seconds
(63072000, 63072009), # 1 Jan 1972, 10 leap seconds
]
|
999,710 | fecf1bea91b6698a013af5cdf837a17898286b16 | import torch
import cv2
import os
from torch.utils.data import Dataset
import numpy as np
import random
import csv
from crop_keypoint import keypoint
from crop import cropFrames
from kmeans import keyframes
class VideoDataset(Dataset):
def __init__(self, videodir, jsondir, channels,frame_num, mode, transform=None):
# frame: how many frame we want in small list. For example, if i want 3 numbers in each small list. [[1,2,3,],[4,5,6]]
# random_frame_number:how many random frame we want in small list. For example, if i want 2 numbers in each small list. [[1,2,3,],[4,5,6]]->[[1,2],[5,6]]
total_number = 0
for paths, dirs, files in os.walk(videodir):
total_number += len(files)
self.total_number = total_number
self.videodir = videodir
self.train = '/home/han006/experiment_v3/CSL5000_100class/dataset/CSL25000_500classes/train.txt'
self.test = '/home/han006/experiment_v3/CSL5000_100class/dataset/CSL25000_500classes/test.txt'
self.jsondir = jsondir
self.channels = channels
self.transform = transform
self.frame_num = frame_num
self.mode = mode
def get_path(self, train_or_test_txt):
all_path = []
with open(train_or_test_txt, 'r') as f:
output = f.readlines()
for data in output:
data_info = data.split('/')
label = int(data_info[0])
video_name = data.split('\n')[0] + '.avi'
json_name = data.split('\n')[0] + '.json'
final_path = self.videodir+'/'+video_name
final_json_path = self.jsondir+'/'+ json_name
all_path.append((final_path, final_json_path, label))
return all_path
def __len__(self):
if self.mode == 'train':
return len(self.get_path(self.train))
else:
return len(self.get_path(self.test))
def readVideo(self, videofile,jsonfile):
global final_keyframe
keypoints_class = keypoint(jsonfile)
#print('class',keypoints_class.get_x_y().shape)
cropFrame_class = cropFrames()
cap = cv2.VideoCapture(videofile)
min_x = cropFrame_class.get_min_x(keypoints_class.get_x_y())
max_x = cropFrame_class.get_max_x(keypoints_class.get_x_y())
min_y = cropFrame_class.get_min_y(keypoints_class.get_x_y())
max_y = cropFrame_class.get_max_y(keypoints_class.get_x_y())
grayframes=[]
RGBframes=[]
frames = []
while (cap.isOpened()):
ret, frame = cap.read()
#print('frame',frame)
if ret == True:
crop_frame = frame[int(min_y - 5):int(max_y + 5), int(min_x - 5):int(max_x + 5)]
frames.append(crop_frame)
else:
break
if self.channels == 3:
if self.mode == 'train':
LUVframes=keyframes(jsonfile,frames).keyframes_id(self.frame_num)
for LUVframe in LUVframes:
crop_frame = cv2.cvtColor(LUVframe, cv2.COLOR_BGR2RGB)
RGBframes.append(crop_frame)
final_keyframe=np.array(RGBframes)
elif self.mode=='val':
LUVframes=keyframes(jsonfile,frames).keyframes_id(self.frame_num)
for LUVframe in LUVframes:
crop_frame = cv2.cvtColor(LUVframe, cv2.COLOR_BGR2RGB)
RGBframes.append(crop_frame)
final_keyframe=np.array(RGBframes)
elif self.channels==1:
if self.mode=='train':
LUVframes=keyframes(jsonfile,frames).keyframes_id(self.frame_num)
for LUVframe in LUVframes:
crop_frame =cv2.cvtColor(LUVframe,cv2.COLOR_BGR2GRAY)
crop_frame = np.expand_dims(crop_frame, axis=2)
grayframes.append(crop_frame)
final_keyframe=np.array(grayframes)
elif self.mode=='val':
LUVframes=keyframes(jsonfile,frames).keyframes_id(self.frame_num)
for LUVframe in LUVframes:
crop_frame =cv2.cvtColor(LUVframe,cv2.COLOR_BGR2GRAY)
crop_frame = np.expand_dims(crop_frame, axis=2)
grayframes.append(crop_frame)
final_keyframe=np.array(grayframes)
return final_keyframe
def __getitem__(self, index):
if self.mode == 'train':
data = self.get_path(self.train)
else:
data = self.get_path(self.test)
videopath, jsonpath, videolabel = data[index]
video_data = self.readVideo(videopath, jsonpath)
video_frame_data_list = []
for video_frame_data in video_data:
video_frame_data = self.transform(video_frame_data)
video_frame_data_list.append(video_frame_data)
video = torch.stack(video_frame_data_list,
dim=0)
video=video.permute(1,0,2,3)
# 用其实现 final_frame.append(in_tensor) 的功能:先构造已经append好的final_frame(此时final_frame为list),然后final_frame = torch.stack(final_frame, dim = 0)
videolabel = torch.tensor(videolabel)
return video, videolabel |
999,711 | 81a2c28eb54fe06f045e5f217d3e15d03a51850b | from __future__ import print_function
from copy import copy
from functools import partial
import numpy as np
from joblib import Parallel, delayed
from numpy import exp, argsort, ceil, zeros, mod
from numpy.random import randint, rand, randn, geometric
from .RbfInter import predictRBFinter
from .utils import boundary_handling
#######################################
# Docker for different models
def KrgDocker(X, model):
X = np.reshape(X, (1, -1))
return model.predict_values(X)
def RBFDocker(X, rbfmodel, uncertainty):
X = np.reshape(X, (1, -1))
return predictRBFinter(rbfmodel, X, uncertainty)
def ICDocker(X, model):
return model(X),
def SVMDocker(X, model):
X = np.reshape(X, (1, -1))
return model.predict(X)
def RFDocker(X, model):
X = np.reshape(X, (1, -1))
return model.predict(X, eval_MSE=False)
#######################################
def objective(model, modelType, minimization):
# 'RBF' or 'Kriging'
if modelType == 'Kriging':
# fPV = model.predict_values
fPV = partial(KrgDocker, model=model)
elif modelType == 'RBF':
# fPV = partial(predictRBFinter, rbfmodel=model, uncertainty=False)
fPV = partial(RBFDocker, rbfmodel=model, uncertainty=False)
elif modelType == 'MGFI' or modelType == 'EI':
fPV = partial(ICDocker, model=model)
minimization = False
elif modelType == 'SVM':
fPV = partial(SVMDocker, model=model)
elif modelType == 'RF':
fPV = partial(RFDocker, model=model)
else:
raise NotImplementedError
return fPV, minimization
"""
Information of authors regarding Mi-ES
Created on Thu Sep 7 11:10:18 2017
@author: wangronin
"""
class Individual(list):
"""Make it possible to index Python list object using the enumerables
"""
def __getitem__(self, keys):
if isinstance(keys, int):
return super(Individual, self).__getitem__(keys)
elif hasattr(keys, '__iter__'):
return Individual([super(Individual, self).__getitem__(int(key)) for key in keys])
def __setitem__(self, index, values):
# In python3 hasattr(values, '__iter__') returns True for string type...
if hasattr(values, '__iter__') and not isinstance(values, str):
values = Individual([_ for _ in values])
else:
values = [values]
if not hasattr(index, '__iter__'):
index = int(index)
if hasattr(values, '__iter__'):
if len(values) == 1:
values = values[0]
else:
values = Individual([_ for _ in values])
super(Individual, self).__setitem__(index, values)
else:
index = [i for i in index]
if len(index) == 1:
index = index[0]
if len(values) == 1:
values = values[0]
super(Individual, self).__setitem__(index, values)
else:
assert len(index) == len(values)
for i, k in enumerate(index):
super(Individual, self).__setitem__(k, values[i])
def __add__(self, other):
return Individual(list.__add__(self, other))
def __mul__(self, other):
return Individual(list.__mul__(self, other))
def __rmul__(self, other):
return Individual(list.__mul__(self, other))
# TODO: improve efficiency, e.g. compile it with cython
class mies(object):
def __init__(self, search_space, obj_func, x0=None, ftarget=None, max_eval=np.inf,
minimize=True, mu_=10, lambda_=70, sigma0=None, eta0=None, P0=None, plus_selection=False,
multiple_return=False,
verbose=False):
self.verbose = verbose
self.mu_ = mu_
self.lambda_ = lambda_
self.eval_count = 0
self.iter_count = 0
self.max_eval = max_eval
self.plus_selection = False
self.minimize = minimize
self.obj_func = obj_func
self.stop_dict = {}
self.hist_best_x = []
self.hist_best_y = []
self.hist_best_y_ifmax = []
self._space = search_space
self.var_names = self._space.var_name.tolist()
self.param_type = self._space.var_type
# index of each type of variables in the dataframe
self.id_r = self._space.id_C # index of continuous variable
self.id_i = self._space.id_O # index of integer variable
self.id_d = self._space.id_N # index of categorical variable
# the number of variables per each type
self.N_r = len(self.id_r)
self.N_i = len(self.id_i)
self.N_d = len(self.id_d)
self.dim = self.N_r + self.N_i + self.N_d
# by default, we use individual step sizes for continuous and integer variables
# and global strength for the nominal variables
self.N_p = min(self.N_d, int(1))
self._len = self.dim + self.N_r + self.N_i + self.N_p
# unpack interval bounds
self.bounds_r = np.asarray([self._space.bounds[_] for _ in self.id_r])
self.bounds_i = np.asarray([self._space.bounds[_] for _ in self.id_i])
self.bounds_d = np.asarray([self._space.bounds[_] for _ in self.id_d]) # actually levels...
# step default step-sizes/mutation strength
if sigma0 is None and self.N_r:
sigma0 = 0.05 * (self.bounds_r[:, 1] - self.bounds_r[:, 0])
if eta0 is None and self.N_i:
eta0 = 0.05 * (self.bounds_i[:, 1] - self.bounds_i[:, 0])
if P0 is None and self.N_d:
P0 = 1. / self.N_d
# column names of the dataframe: used for slicing
self._id_var = np.arange(self.dim)
self._id_sigma = np.arange(self.N_r) + len(self._id_var) if self.N_r else []
self._id_eta = np.arange(self.N_i) + len(self._id_var) + len(self._id_sigma) if self.N_i else []
self._id_p = np.arange(self.N_p) + len(self._id_var) + len(self._id_sigma) + len(
self._id_eta) if self.N_p else []
self._id_hyperpar = np.arange(self.dim, self._len)
self.multiple_return = multiple_return
# initialize the populations
if x0 is not None: # given x0
par = []
if self.N_r:
par += [sigma0]
elif self.N_i:
par += [eta0]
elif self.N_p:
par += [P0] * self.N_p
# individual0 = Individual(np.r_[x0, eta0, [P0] * self.N_p])
# individual0 = Individual(np.r_[x0, sigma0, eta0, [P0] * self.N_p])
individual0 = Individual(np.r_[x0, par[0]])
self.pop_mu = Individual([individual0]) * self.mu_
fitness0 = self.evaluate(self.pop_mu[0])
self.f_mu = np.repeat(fitness0, self.mu_)
self.xopt = x0
self.fopt = sum(fitness0)
else:
x = np.asarray(self._space.sampling(self.mu_), dtype='object') # uniform sampling
par = []
if self.N_r:
par += [np.tile(sigma0, (self.mu_, 1))]
if self.N_i:
par += [np.tile(eta0, (self.mu_, 1))]
if self.N_p:
par += [np.tile([P0] * self.N_p, (self.mu_, 1))]
par = np.concatenate(par, axis=1)
self.pop_mu = Individual([Individual(_) for _ in np.c_[x, par].tolist()])
self.f_mu = self.evaluate(self.pop_mu)
self.fopt = min(self.f_mu) if self.minimize else max(self.f_mu)
try:
a = int(np.nonzero(self.fopt == self.f_mu)[0][0])
except IndexError:
print(self.fopt)
print(self.f_mu)
print(np.nonzero(self.fopt == self.f_mu))
a = np.argmin(self.f_mu) if self.minimize else np.argmax(self.f_mu)
# raise IndexError
self.xopt = self.pop_mu[a][self._id_var]
self.pop_lambda = Individual([self.pop_mu[0]]) * self.lambda_
self._set_hyperparameter()
# stop criteria
self.tolfun = 1e-5
self.nbin = int(3 + ceil(30. * self.dim / self.lambda_))
self.histfunval = zeros(self.nbin)
def _set_hyperparameter(self):
# hyperparameters: mutation strength adaptation
if self.N_r:
self.tau_r = 1 / np.sqrt(2 * self.N_r)
self.tau_p_r = 1 / np.sqrt(2 * np.sqrt(self.N_r))
if self.N_i:
self.tau_i = 1 / np.sqrt(2 * self.N_i)
self.tau_p_i = 1 / np.sqrt(2 * np.sqrt(self.N_i))
if self.N_d:
self.tau_d = 1 / np.sqrt(2 * self.N_d)
self.tau_p_d = 1 / np.sqrt(2 * np.sqrt(self.N_d))
def recombine(self, id1, id2):
p1 = copy(self.pop_mu[id1]) # IMPORTANT: this copy is necessary
if id1 != id2:
p2 = self.pop_mu[id2]
# intermediate recombination for the mutation strengths
p1[self._id_hyperpar] = (np.array(p1[self._id_hyperpar]) +
np.array(p2[self._id_hyperpar])) / 2
# dominant recombination
mask = randn(self.dim) > 0.5
p1[mask] = p2[mask]
return p1
def select(self):
pop = self.pop_mu + self.pop_lambda if self.plus_selection else self.pop_lambda
fitness = np.r_[self.f_mu, self.f_lambda] if self.plus_selection else self.f_lambda
fitness_rank = argsort(fitness)
if not self.minimize:
fitness_rank = fitness_rank[::-1]
sel_id = fitness_rank[:self.mu_]
self.pop_mu = pop[sel_id]
self.f_mu = fitness[sel_id]
def evaluate(self, pop):
if not hasattr(pop[0], '__iter__'):
pop = [pop]
N = len(pop)
f = np.zeros(N)
for i, individual in enumerate(pop):
var = individual[self._id_var]
f[i] = np.sum(self.obj_func(var)[0]) # in case a 1-length array is returned
self.eval_count += 1
return f
def mutate(self, individual):
if self.N_r:
self._mutate_r(individual)
if self.N_i:
self._mutate_i(individual)
if self.N_d:
self._mutate_d(individual)
return individual
def _mutate_r(self, individual):
sigma = np.array(individual[self._id_sigma])
if len(self._id_sigma) == 1:
sigma = sigma * exp(self.tau_r * randn())
else:
sigma = sigma * exp(self.tau_r * randn() + self.tau_p_r * randn(self.N_r))
# Gaussian mutation
R = randn(self.N_r)
x = np.array(individual[self.id_r])
x_ = x + sigma * R
# Interval Bounds Treatment
x_ = boundary_handling(x_, self.bounds_r[:, 0], self.bounds_r[:, 1])
# Repair the step-size if x_ is out of bounds
individual[self._id_sigma] = np.abs((x_ - x) / R)
individual[self.id_r] = x_
def _mutate_i(self, individual):
eta = np.array(individual[self._id_eta])
x = np.array(individual[self.id_i])
if len(self._id_eta) == 1:
eta = max(1, eta * exp(self.tau_i * randn()))
p = 1 - (eta / self.N_i) / (1 + np.sqrt(1 + (eta / self.N_i) ** 2))
x_ = x + geometric(p, self.N_i) - geometric(p, self.N_i)
else:
eta = eta * exp(self.tau_i * randn() + self.tau_p_i * randn(self.N_i))
eta[eta > 1] = 1
p = 1 - (eta / self.N_i) / (1 + np.sqrt(1 + (eta / self.N_i) ** 2))
x_ = x + np.array([geometric(p_) - geometric(p_) for p_ in p])
# TODO: implement the same step-size repairing method here
x_ = boundary_handling(x_, self.bounds_i[:, 0], self.bounds_i[:, 1])
individual[self._id_eta] = eta
individual[self.id_i] = x_
def _mutate_d(self, individual):
P = np.array(individual[self._id_p])
P = 1 / (1 + (1 - P) / P * exp(-self.tau_d * randn()))
individual[self._id_p] = boundary_handling(P, 1 / (3. * self.N_d), 0.5)[0].tolist()
idx = np.nonzero(rand(self.N_d) < P)[0]
for i in idx:
level = self.bounds_d[i]
individual[self.id_d[i]] = level[randint(0, len(level))]
def stop(self):
if self.eval_count > self.max_eval:
self.stop_dict['max_eval'] = True
if self.eval_count != 0 and self.iter_count != 0:
fitness = self.f_lambda
# tolerance on fitness in history
self.histfunval[int(mod(self.eval_count / self.lambda_ - 1, self.nbin))] = fitness[0]
if mod(self.eval_count / self.lambda_, self.nbin) == 0 and \
(max(self.histfunval) - min(self.histfunval)) < self.tolfun:
self.stop_dict['tolfun'] = True
# flat fitness within the population
if fitness[0] == fitness[int(max(ceil(.1 + self.lambda_ / 4.), self.mu_ - 1))]:
self.stop_dict['flatfitness'] = True
return any(self.stop_dict.values())
def _better(self, perf1, perf2):
if self.minimize:
return perf1 < perf2
else:
return perf1 > perf2
def optimize(self):
while not self.stop():
for i in range(self.lambda_):
p1, p2 = randint(0, self.mu_), randint(0, self.mu_)
individual = self.recombine(p1, p2)
self.pop_lambda[i] = self.mutate(individual)
self.f_lambda = self.evaluate(self.pop_lambda)
self.select()
curr_best = self.pop_mu[0]
xopt_, fopt_ = curr_best[self._id_var], self.f_mu[0]
xopt_[self.id_i] = list(map(int, xopt_[self.id_i]))
self.iter_count += 1
if self.multiple_return:
ind = np.searchsorted(self.hist_best_y_ifmax, fopt_) \
if self.minimize else np.searchsorted(self.hist_best_y_ifmax, -fopt_)
self.hist_best_y_ifmax.insert(ind, fopt_) \
if self.minimize else self.hist_best_y_ifmax.insert(ind, -fopt_)
self.hist_best_y.insert(ind, fopt_)
self.hist_best_x.insert(ind, xopt_)
if len(self.hist_best_y) > 10:
self.hist_best_y = self.hist_best_y[:10]
self.hist_best_x = self.hist_best_x[:10]
self.hist_best_y_ifmax = self.hist_best_y_ifmax[:10]
if self._better(fopt_, self.fopt):
self.xopt, self.fopt = xopt_, fopt_
if self.verbose:
print('iteration ', self.iter_count + 1)
print(self.xopt, self.fopt)
self.stop_dict['funcalls'] = self.eval_count
if self.multiple_return:
if len(self.hist_best_y) > 5:
hist_best_x = np.array(self.hist_best_x[:5], ndmin=2, dtype=int)
hist_best_y = np.array(self.hist_best_y[:5])
return hist_best_x[:5], hist_best_y[:5], self.stop_dict
elif len(self.hist_best_y) < 1:
hist_best_x = np.array(self.xopt, ndmin=2, dtype=int)
hist_best_y = np.array([self.fopt])
return hist_best_x, hist_best_y, self.stop_dict
else:
hist_best_x = np.array(self.hist_best_x, ndmin=2, dtype=int)
hist_best_y = np.array(self.hist_best_y)
return hist_best_x, hist_best_y, self.stop_dict
else:
return np.array(self.xopt, ndmin=2, dtype=int), np.array(self.fopt, ndmin=1), self.stop_dict
# End of MI-ES
##################################################################
# Call MI-ES in our context
def _single_mies(arguments):
x, input_space, nIter, acquisition_func, acquisition_type, minimization, flag, multi_return = arguments
fPV, doMin = objective(acquisition_func, acquisition_type, minimization)
# mu_ = input_space.dim
# lambda_ = mu_ * 7
mu_ = 10
lambda_ = mu_ * 7
if flag:
opt = mies(input_space, fPV, mu_=mu_, lambda_=lambda_, max_eval=nIter, verbose=False, minimize=doMin,
x0=x, multiple_return=multi_return)
else:
opt = mies(input_space, fPV, mu_=mu_, lambda_=lambda_, max_eval=nIter, verbose=False, minimize=doMin,
multiple_return=multi_return)
xopt, fopt, stop_dict = opt.optimize()
return xopt, fopt, stop_dict
# Interface of multi-start MI-ES
def MIES_multistart(acquisition_func, acquisition_type, nVar, space, hist_x, nRandStart,
nIter=None, minimization=False, multi_return=True, out_optimizer_log=None, par=None, nTotal=6):
if par is None:
par = Parallel(n_jobs=nTotal)
if nIter is None:
nIter = int(1e3 * nVar)
# if nIter < 25000:
# nIter = 25000
_, doMin = objective(acquisition_func, acquisition_type, minimization)
process = []
# space = OrdinalSpace([0, 2], 'I') * nVar
for xx in hist_x:
flag = True
process.append((xx, space, nIter, acquisition_func, acquisition_type, minimization, flag, multi_return))
for j in range(nRandStart):
# xx = np.random.randint(2, size=nVar)
xx = space.sampling()[0]
flag = False
process.append((xx, space, nIter, acquisition_func, acquisition_type, minimization, flag, multi_return))
results = par(delayed(_single_mies)(p) for p in process)
fFirst = True
for item in results:
if item[0].size > 0:
if fFirst:
new_xx = item[0]
yy = item[1]
fFirst = False
else:
tmp = new_xx
new_xx = np.concatenate([tmp, item[0]], axis=0)
tmp = yy
yy = np.concatenate(([tmp, item[1]]), axis=0)
# print(item[2])
# new_xx = np.concatenate(, axis=0)
# yy = np.concatenate(([item[1] for item in results]), axis=0)
original_unique_index = np.argsort(yy)
top_c = 10 if len(original_unique_index) > 10 else len(original_unique_index)
top_c = 15 if len(original_unique_index) > 15 else top_c
xs = np.ones((top_c, nVar), dtype=int)
if doMin:
top = original_unique_index[:top_c]
for idx, i in enumerate(top):
xs[idx, :] = new_xx[i]
else:
top = original_unique_index[:top_c]
for idx, i in enumerate(top):
xs[top_c - 1 - idx, :] = new_xx[i]
return xs
|
999,712 | 73fb5de2e9c260b1cf864978529a8d2a198902cb | # -*- coding:utf-8 -*-
import operator
from api.api import API
from pages.ios.common.superPage import SuperPage
from pages.ios.ffan.dianyinggoupiao_page_configs import dianyinggoupiaoconfigs as DYGPC
from pages.logger import logger
class dianyinggoupiaopage(SuperPage):
'''
作者 刘潇
电影购票
'''
def __init__(self, testcase, driver, logger):
'''
初始化
'''
super(dianyinggoupiaopage, self).__init__(testcase, driver, logger)
def validSelf(self):
'''
usage: 验证购物中心界面
'''
logger.info("Check 购物中心 begin")
API().assertElementByName(testCase=self.testcase,
driver=self.driver,
logger=self.logger,
name=DYGPC.xpath_xuanzuo
)
API().screenShot(self.driver, "gouWuZhongXin")
logger.info("Check 购物中心 end")
def clickOnxuanzuo(self):
'''
usage: 点击选座按钮
'''
logger.info("Click 选座按钮 begin")
API().clickElementByXpath(testCase = self.testcase,
driver = self.driver,
logger = self.logger,
xpath = DYGPC.xpath_xuanzuo,
timeout = DYGPC.click_on_button_timeout)
logger.info("Click 选座按钮 end")
def clickOnyingcheng(self):
'''
usage: 点击选择影城
'''
logger.info("Click 选择影城 begin")
API().clickElementByXpath(testCase = self.testcase,
driver = self.driver,
logger = self.logger,
xpath = DYGPC.xpath_yingcheng,
timeout = DYGPC.click_on_button_timeout)
logger.info("Click 选择影城 end")
def clickOnqiehuanchengshi(self):
'''
usage: 点击切换城市按钮
'''
logger.info("Click 切换城市按钮 begin")
API().clickElementByXpath(testCase = self.testcase,
driver = self.driver,
logger = self.logger,
xpath = DYGPC.xpath_qiehuanchengshi,
timeout = DYGPC.click_on_button_timeout)
logger.info("Click 切换城市按钮 end")
def inputchengshi(self):
'''
usage: 输入城市名称
'''
logger.info("Input 输入城市名称 begin")
API().inputStringByXpath(self.testcase, self.driver, self.logger,
DYGPC.xpath_shuruchengshi,
DYGPC.chengshiname,
DYGPC.input_timeout)
logger.info("Input 输入城市名称 end")
def clickOnbaotoushi(self):
'''
usage: click on the search button.
'''
logger.info("Click 包头市 begin")
API().clickElementByName(self.testcase, self.driver, self.logger,
DYGPC.baotoushi,
DYGPC.click_on_button_timeout)
logger.info("Click 包头市 end") |
999,713 | 42560c2d946975b354c128057d199a428d9c61df | import torch
import torch.nn.functional as F
from torch import nn
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, padding, batch_norm):
super(UNetConvBlock, self).__init__()
block = []
block.append(layer_conv(in_size, out_size, kernel_size=3,
padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(layer_batchnorm(out_size))
block.append(layer_conv(out_size, out_size, kernel_size=3,
padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(layer_batchnorm(out_size))
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, up_mode, padding, batch_norm):
super(UNetUpBlock, self).__init__()
if up_mode == 'upconv':
self.up = layer_convtrans(in_size, out_size, kernel_size=2,
stride=2)
elif up_mode == 'upsample':
self.up = nn.Sequential(nn.Upsample(mode=interp_mode, scale_factor=2),
layer_conv(in_size, out_size, kernel_size=1))
self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)
def center_crop(self, layer, target_size):
_, _, layer_depth, layer_height, layer_width = layer.size()
diff_z = (layer_depth - target_size[0]) // 2
diff_y = (layer_height - target_size[1]) // 2
diff_x = (layer_width - target_size[2]) // 2
return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])]
# _, _, layer_height, layer_width = layer.size() #for 2D data
# diff_y = (layer_height - target_size[0]) // 2
# diff_x = (layer_width - target_size[1]) // 2
# return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])]
def forward(self, x, bridge):
up = self.up(x)
# bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2
up = F.interpolate(up, size=bridge.shape[2:], mode=interp_mode)
out = torch.cat([up, bridge], 1)
out = self.conv_block(out)
return out
class UNet(nn.Module):
"""
Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling.
droprate (float): Rate of dropout. If undesired, then 0.0
is3D (bool): If a 3D or 2D version of U-net
returnBlocks (bool) : If True, it will return the blocks created during downPath. If downPath is False, then it will be ignored
downPath and upPath (bool): If only the downpath or uppath of the U-Net is needed, make the other one False
Forward call:
x (Tensor): Input Tensor
blocks (list of Tensors): If only upPath is set to True, then this will be used during the forward of the uppath. If not desired, then supply blank list
"""
def __init__(self, in_channels=1, out_channels=1, depth=3, wf=6, padding=True,
batch_norm=False, up_mode='upconv', droprate=0.0, is3D=False,
returnBlocks=False, downPath=True, upPath=True):
super(UNet, self).__init__()
layers = {}
if is3D:
layers["layer_conv"] = nn.Conv3d
layers["layer_convtrans"] = nn.ConvTranspose3d
layers["layer_batchnorm"] = nn.BatchNorm3d
layers["layer_drop"] = nn.Dropout3d
layers["func_avgpool"] = F.avg_pool3d
layers["interp_mode"] = 'trilinear'
else:
layers["layer_conv"] = nn.Conv2d
layers["layer_convtrans"] = nn.ConvTranspose2d
layers["layer_batchnorm"] = nn.BatchNorm2d
layers["layer_drop"] = nn.Dropout2d
layers["func_avgpool"] = F.avg_pool2d
layers["interp_mode"] = 'bilinear'
globals().update(layers)
self.returnBlocks = returnBlocks
self.do_down = downPath
self.do_up = upPath
self.padding = padding
self.depth = depth
self.dropout = layer_drop(p=droprate)
prev_channels = in_channels
self.down_path = nn.ModuleList()
for i in range(depth):
if self.do_down:
self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i),
padding, batch_norm))
prev_channels = 2**(wf+i)
self.latent_channels = prev_channels
self.up_path = nn.ModuleList()
for i in reversed(range(depth - 1)):
if self.do_up:
self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode,
padding, batch_norm))
prev_channels = 2**(wf+i)
if self.do_up:
self.last = layer_conv(prev_channels, out_channels, kernel_size=1)
def forward(self, x, blocks=()):
if self.do_down:
for i, down in enumerate(self.down_path):
x = down(x)
if i != len(self.down_path)-1:
blocks += (x,)
x = func_avgpool(x, 2)
x = self.dropout(x)
if self.do_up:
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i-1])
x = self.last(x)
if self.returnBlocks and self.do_down:
return x, blocks
else:
return x
if __name__ == '__main__':
print('#### Test Case ###')
from torch.autograd import Variable
x = Variable(torch.rand(2,1,64,64)).cuda()
model = UNet().cuda()
y = model(x)
print(y.shape)
|
999,714 | e1e4d75f1743a72e302948aa957805d4567c81fd | # Generated by Django 2.2.1 on 2019-05-10 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AML', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='facebookinfo',
name='page_id',
field=models.CharField(default=None, max_length=255, null=True),
),
]
|
999,715 | 489920d99ec8282cf54fc05d04b015a2860222e3 | import math
# Apresentação
print('Programa para calcular a área de um triângulo')
print()
# Entradas
lado_a = float(input('Informe a medida do lado A: '))
lado_b = float(input('Informe a medida do lado B: '))
angulo = float(input('Informe o ângulo formado entre o lado A e o lado B: '))
# Processamento
area = lado_a * lado_b * math.sin(math.pi * angulo / 180) / 2
# Saídas
print('A área desse triângulo é:', area)
|
999,716 | 95534c67b4863c2255949706db495f5b76959a3b | {% extends "base.html" %}
{% block content %}
You have been Logged out!
{% endblock %} |
999,717 | be7f348eee2190c08b266ff93142de2669a932c9 | import csv
import math
import mysql.connector
from mysql.connector import Error
connection = mysql.connector.connect(
host="localhost",
user="debian-sys-maint",
passwd="HwearQMC4nkPeYmP",
database="fundeb"
)
def inserirValorAluno(id_estado, id_segmento, valor, ano, tipo, educacao):
try:
cursor = connection.cursor()
sql = "INSERT INTO VLestimadoAluno (id_estado, id_segmento, valor, ano, educacao, tipo) VALUES (%s, %s, %s, %s, %s, %s)"
values = (id_estado, id_segmento, valor, ano, educacao, tipo )
cursor.execute(sql, values)
connection.commit()
except:
print("Erro", values)
line_count = 0
with open('estimativaPorAluno/2018.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
ano = 2018
for row in csv_reader:
uf=row[0]
cursor = connection.cursor()
cursor.execute("SELECT * FROM estados WHERE estados.uf = %s", (uf, ) )
# old - work
#cursor = connection.cursor()
#cursor.execute(sql)
result = cursor.fetchone()
id_estado = result[0]
print(uf , "/ id" , id_estado)
# ESCOLAS PUBLICAS
# tipo R ou U
valor = row[1]
inserirValorAluno(id_estado, 1, valor, ano, 'P', '')
valor = row[2]
inserirValorAluno(id_estado, 3, valor, ano, 'P', '')
valor = row[3];
inserirValorAluno(id_estado, 2, valor, ano, 'P', '')
valor = row[4]
inserirValorAluno(id_estado, 4, valor, ano, 'P', '')
# educacao P ou C
# ensino fundamental
valor = row[5]
inserirValorAluno(id_estado, 5, valor, ano, 'P', 'U')
valor = row[6]
inserirValorAluno(id_estado, 5, valor, ano, 'P', 'R')
valor = row[7]
inserirValorAluno(id_estado, 6, valor, ano, 'P', 'U')
valor = row[8]
inserirValorAluno(id_estado, 6, valor, ano, 'P', 'R')
valor = row[9]
inserirValorAluno(id_estado, 7, valor, ano, 'P', '')
## ensino medico
valor = row[10]
inserirValorAluno(id_estado, 8, valor, ano, 'P', 'U')
valor = row[11]
inserirValorAluno(id_estado, 8, valor, ano, 'P', 'R')
valor = row[12]
inserirValorAluno(id_estado, 9, valor, ano, 'P', '')
valor = row[13]
inserirValorAluno(id_estado, 10, valor, ano, 'P', '')
# AEE
valor = row[14]
inserirValorAluno(id_estado, 14, valor, ano, 'P', '')
# EDUCACAO ESPECIAL
valor = row[15]
inserirValorAluno(id_estado, 13, valor, ano, 'P', '')
# INDIGENA
valor = row[16]
inserirValorAluno(id_estado, 15, valor, ano, 'P', '')
# EJA 1
valor = row[17]
inserirValorAluno(id_estado, 11, valor, ano, 'P', '')
# EJA 2
valor = row[18]
inserirValorAluno(id_estado, 12, valor, ano, 'P', '')
# ESCOLAS CONVENIADAS
valor = row[19]
inserirValorAluno(id_estado, 1, valor, ano, 'C', '')
valor = row[20]
inserirValorAluno(id_estado, 2, valor, ano, 'C', '')
valor = row[21];
inserirValorAluno(id_estado, 3, valor, ano, 'C', '')
valor = row[22]
inserirValorAluno(id_estado, 4, valor, ano, 'C', '')
valor = row[23]
inserirValorAluno(id_estado, 14, valor, ano, 'C', '')
|
999,718 | cde369efa65cb82e6a8d1d938005937424c6005f | from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
#path for MAC
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
# path for windows - need to complete path!!!
#executable_path = {"executable_path": "C:/Users/Heather Bree/chromedriver_win32/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
# create mars data dict that we can insert into mongo
mars_data = {}
# visit Mars Nasa News site
mars_url = 'http://mars.nasa.gov/news/'
browser.visit(mars_url)
# gives it one second...can change to what you need for site to load
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
# get the latest news title from the NASA Mars News Site and assign a variable
news_title = soup.body.find_all('div', class_="content_title")[1].text
# get the latest Paragraph text from the NASA Mars News Site and assign a variable
news_p = soup.find('div', class_="article_teaser_body").text
# add data to our mars data dict
mars_data["news_title"] = news_title
mars_data["news_p"] = news_p
# visit the URL from the Jet Propulsion Laboratory to scrape
jet_url = 'http://jpl.nasa.gov/spaceimages/?search=&category=Mars/'
browser.visit(jet_url)
time.sleep(1)
# use splinter to click the button to bring up the full resolution image of the Featured Image
results = browser.find_by_id('full_image').first.click()
time.sleep(3)
# scrape the new browser into soup and use soup to find the full resolution Featured Image
# save image of url to variable 'img_url' + then to variable 'featured_image_url' for full path of image
html = browser.html
soup = bs(html, "html.parser")
img_url = soup.find("img", class_="fancybox-image")["src"]
featured_image_url = 'http://jpl.nasa.gov' + img_url
# add data to our mars data dict
mars_data["featured_image_url"] = featured_image_url
# visit the space facts mars website
facts_url = 'https://space-facts.com/mars/'
browser.visit(facts_url)
time.sleep(2)
# scrape the mars facts table using pandas and get 1st table
tables = pd.read_html(facts_url)
df = tables[0]
html_table = df.to_html()
mars_facts = html_table.replace('\n', '')
# add mars table data to mars dict data
mars_data["mars_facts"] = mars_facts
# visit USGS Astrogeology site
usgs_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(usgs_url)
astro_url = 'https://astrogeology.usgs.gov'
time.sleep(5)
# create hemisphere list of dictionaries
mars_dict_list = []
# use splinter to click the links/buttons to bring up the title & full resolution image of cerberus
first_results = browser.click_link_by_partial_text("Cerberus")
time.sleep(2)
# scrape the new browser into soup and use soup to find the title and full resolution image
# save image of url to variable 'img_url' + then to variable 'cereberus_image_url' for full path of image
# save image url string and title to a dictionary and add to list
html = browser.html
soup = bs(html, "html.parser")
first_title = soup.body.find('h2', class_="title").text
img_url = soup.find("img", class_="wide-image")["src"]
cereberus_image_url = astro_url + img_url
dictionary = {}
dictionary["title"] = first_title
dictionary["img_url"] = cereberus_image_url
mars_dict_list.append(dictionary)
# go back to main page
browser.visit(usgs_url)
# use splinter to click the links/buttons to bring up the full resolution image of shiaparelli
second_results = browser.click_link_by_partial_text("Schiaparelli")
time.sleep(2)
# scrape the new browser into soup and use soup to find the title and full resolution Schiaparelli Image
# save image of url to variable 'img_url' + then to variable 'schiap_image_url' for full path of image
# save image url string and title to a dictionary and add to list
html = browser.html
soup = bs(html, "html.parser")
second_title = soup.body.find('h2', class_="title").text
img_url = soup.find("img", class_="wide-image")["src"]
schiap_image_url = astro_url + img_url
dictionary = {}
dictionary["title"] = second_title
dictionary["img_url"] = schiap_image_url
mars_dict_list.append(dictionary)
# go back to main page
browser.visit(usgs_url)
# use splinter to click the links/buttons to bring up the full resolution image of syrtis
third_results = browser.click_link_by_partial_text("Syrtis")
time.sleep(2)
# scrape the new browser into soup and use soup to find the title & full resolution Syrtis Image
# save image of url to variable 'img_url' + then to variable 'syrtis_image_url' for full path of image
# save image url string and title to a dictionary and add to list
html = browser.html
soup = bs(html, "html.parser")
third_title = soup.body.find('h2', class_="title").text
img_url = soup.find("img", class_="wide-image")["src"]
syrtis_image_url = astro_url + img_url
dictionary = {}
dictionary["title"] = third_title
dictionary["img_url"] = syrtis_image_url
mars_dict_list.append(dictionary)
# go back to main page
browser.visit(usgs_url)
# use splinter to click the links/buttons to bring up the full resolution image of valles
fourth_results = browser.click_link_by_partial_text("Valles")
time.sleep(2)
# scrape the new browser into soup and use soup to find the title and full resolution Valles Image
# save image of url to variable 'img_url' + then to variable 'valles_image_url' for full path of image
# save image url string and title to a dictionary and add to list
html = browser.html
soup = bs(html, "html.parser")
fourth_title = soup.body.find('h2', class_="title").text
img_url = soup.find("img", class_="wide-image")["src"]
valles_image_url = astro_url + img_url
dictionary = {}
dictionary["title"] = fourth_title
dictionary["img_url"] = valles_image_url
mars_dict_list.append(dictionary)
# add list of dictionaries to mars_data
mars_data["mars_dict_list"] = mars_dict_list
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
|
999,719 | 025e4f402aa60054dd5f0d5be93175b7080986e2 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('exe', [
'/usr/bin/pg_dump',
'/usr/bin/pg_restore',
'/usr/bin/psql'
])
def test_postgresql_amazon_linux_extras_exes(host, exe):
assert host.file(exe).exists
def test_postgresql_version(host):
pg_dump_version_output = host.check_output('pg_dump --version')
assert '(PostgreSQL) 9.6' in pg_dump_version_output
@pytest.mark.parametrize('file', [
'/media/atl/jira/shared',
'/media/atl/jira/shared/hello',
'/media/atl/jira/shared/hello/hello.txt'
])
def test_shared_home_owner(host, file):
assert host.file(file).exists
assert host.file(file).user == 'jira'
assert host.file(file).group == 'jira'
def test_file_modes(host):
assert host.file('/media/atl/jira/shared/hello').mode == 0o755
assert host.file('/media/atl/jira/shared/hello/hello.txt').mode == 0o640
def test_version_file_owned_by_root(host):
assert host.file('/media/atl/jira/shared/jira-software.version').exists
assert host.file('/media/atl/jira/shared/jira-software.version').user == 'root'
assert host.file('/media/atl/jira/shared/jira-software.version').group == 'root' |
999,720 | 85e77729387d381328e66456bc5425d4d6e73c06 | import json, fnmatch, re, cantools
def matcher(x):
return re.compile(fnmatch.translate(x))
with open("config.json") as fd:
config = json.load(fd)
messageWhitelist = []
for key in config["messages"]:
messageWhitelist.append(re.compile(fnmatch.translate(key)))
def signalFilter(name):
for regex in messageWhitelist:
if regex.search(name) is not None:
return True
db = cantools.database.load_file(config["can-spec"])
extractions = []
ids = []
bitLen = 0
for msg in db.messages:
for signal in msg.signals:
name = msg.name + "/" + signal.name
if signalFilter(name):
ids.append(msg.frame_id)
start = cantools.database.utils.start_bit(signal)
if len(extractions) > 0 \
and extractions[-1]["frame_id"] == msg.frame_id \
and extractions[-1]["start"] + extractions[-1]["length"] == start:
previous = extractions[-1]
previous["name"] = previous["name"] + ", " + name
previous["length"] = previous["length"] + signal.length
else:
extractions.append({
'name': name,
'frame_id': msg.frame_id,
'start': cantools.database.utils.start_bit(signal),
'length': signal.length,
})
bitLen = bitLen + signal.length
byteLen = bitLen // 8
if bitLen % 8 != 0:
byteLen = byteLen + 1
#TODO support more than one LoRa Message
loraMessages = [
{
'repetition': config["repetition"],
'len': byteLen,
'extractions': extractions
},
]
extractionsCode = ""
messageCode = "Message messages[] = {\n"
for i, message in enumerate(loraMessages):
extractionsName = "message" + str(i) + "Extractions"
extractionsCode = extractionsCode + "CANExtraction " + extractionsName + "[] = {\n"
for extraction in extractions:
extractionsCode = extractionsCode + "\t{\n" + \
"\t\t// " + extraction['name'] + ",\n" + \
"\t\t.id = " + hex(extraction['frame_id']) + ",\n" + \
"\t\t.pos = " + str(extraction['start']) + ",\n" + \
"\t\t.len = " + str(extraction['length']) + ",\n" + \
"\t},\n"
extractionsCode = extractionsCode + "};\n"
messageCode = messageCode + "\t{\n" + \
"\t\t.repetition = " + str(message['repetition']) + ",\n" + \
"\t\t.len = " + str(message['len']) + ",\n" + \
"\t\t.extractions = " + extractionsName + ",\n" + \
"\t\t.extractionCount = sizeof(" + extractionsName + ") / sizeof(CANExtraction)\n" + \
"\t},\n"
messageCode = messageCode + "};\n"
ids = ",\n\t".join(["0x{:x}".format(x) for x in set(ids)])
print("int canIds[] = {\n\t" + ids + "\n};\n")
print(extractionsCode)
print(messageCode)
|
999,721 | 90b285b53506d03f728370b935521888a7c8446e | from python_test_case import PythonTestCase, run_tests
from unittest.mock import patch, call
import sys
from io import StringIO
from pylab import *
class Tests(PythonTestCase):
def setUp(self):
try:
del sys.modules["attempt"]
except KeyError:
pass
def test_allowed(self):
""""You can take this course." is printed because they have completed HELP1000 and HELP1500 only."""
user_input = ["1","1","0"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You can take this course.")
def test_000(self):
""" Student has taken no courses, so "You cannot take this course, sorry!" is printed."""
user_input = ["0","0","0"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_001(self):
""" Student has taken only HELP2001, so "You cannot take this course, sorry!" is printed."""
user_input = ["0","0","1"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_010(self):
""" Student has taken only HELP1500, so "You cannot take this course, sorry!" is printed."""
user_input = ["0","1","0"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_011(self):
""" Student has taken HELP1500 and HELP2000, so "You cannot take this course, sorry!" is printed."""
user_input = ["0","1","1"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_100(self):
""" Student has taken only HELP1000, so "You cannot take this course, sorry!" is printed."""
user_input = ["1","0","0"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_101(self):
""" Student has taken only HELP1000 and HELP2001, so "You cannot take this course, sorry!" is printed."""
user_input = ["1","0","1"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
def test_111(self):
""" Student has taken all courses, so "You cannot take this course, sorry!" is printed."""
user_input = ["1","1","1"]
with patch("builtins.input", side_effect=user_input) as input_call:
with patch("sys.stdout", new=StringIO()) as output:
import attempt
self.assertEqual(output.getvalue().strip(),"You cannot take this course, sorry!")
# Run the unit tests
if __name__ == "__main__":
run_tests(Tests)
|
999,722 | a98651ca1ecaab7faaafcb32406044976a023e69 | from fabric2 import task
from cloudify import ctx
@task
def remove_node(connection, *argc, **kargs):
hostname = ctx.source.instance.id
hostname = hostname.replace('_', '-')
hostname = hostname.lower()
ctx.logger.info('Remove node instance: {hostname}'
.format(hostname=hostname))
connection.run('kubectl delete node {hostname}'.format(hostname=hostname))
|
999,723 | 55c9c2021e6ffff34394603066c1fa512a893f50 | import numpy as np
import pickle
import h5py
import os
from keras.optimizers import SGD, Adagrad
from keras.models import load_model, Sequential
from keras.layers import Dense, Activation, Dropout
from keras.callbacks import TensorBoard
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
batch_size = 200
epochs = 10
unit = 500
dropout = 0.001
activation = 'relu'
optimizer = SGD(lr=0.9, decay=1e-5, momentum=0.9, nesterov=True)
#optimizer = 'Adagrad'
X = np.loadtxt("train.txt")
y = np.loadtxt("labels.txt", dtype = int)
print ("training dataset shape: ", X.shape)
print ("label shape: ", y.shape)
n_input = X.shape[1]
n_output = y.shape[1]
model = Sequential()
model.add(Dense(unit,input_dim=n_input, activation=activation))
model.add(Dense(unit,input_dim=n_input, activation=activation))
model.add(Dropout(dropout))
model.add(Dense(n_output, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])
print (model.summary())
tbCallBack= TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
hist = model.fit(X, y,batch_size=batch_size,epochs=epochs,validation_split = 0.1,callbacks=[tbCallBack])
model.save('test.h5')
|
999,724 | cd9fdfdfe9636287bc2110466657d357d532d6f9 | import time
import numpy as np
import torch
from torch import nn
from torch import optim
from config import device, label_names, print_every, hidden_size, encoder_n_layers, dropout, learning_rate, \
epochs
from data_gen import SaDataset
from models import EncoderRNN
from utils import AverageMeter, ExpoAverageMeter, accuracy, Lang, timestamp, adjust_learning_rate, save_checkpoint
def train(epoch, train_data, encoder, optimizer):
'''
训练
流程:input -> model -> output -> criterion -> loss
将input放入模型进行训练,得到output,使用评判函数进行评判得到loss,
使用优化函数进行优化,我们期望loss下降
:param epoch: 训练次数
:param train_data: SaDataset对象,存放训练集和词表
:param encoder: 模型
:param optimizer: 优化器算法 sgd -> adam ->adagrad -> rmsprop 看torch文档
:return:
'''
# 确保模型的状态为训练状态(框架会启用某些机制 ps:模型状态一般有 train 和 eval)
encoder.train()
# Loss function 创建评判函数,没有状态
criterion = nn.CrossEntropyLoss().to(device)
batch_time = AverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
accs = ExpoAverageMeter() # accuracy
start = time.time()
# 开始训练,监督学习
for i_batch, (input_variable, lengths, target_variable) in enumerate(train_data):
# 每个tensor包含输入的数据和grad(梯度),
# 清零所有tensor的grad
optimizer.zero_grad()
# Set device options
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
# 使用模型进行计算
outputs = encoder(input_variable, lengths)
loss = 0
acc = 0
# 使用criterion函数对结果进行评价,计算正确率
for idx, _ in enumerate(label_names):
loss += criterion(outputs[:, :, idx], target_variable[:, idx]) / len(label_names)
acc += accuracy(outputs[:, :, idx], target_variable[:, idx]) / len(label_names)
# 根据loss算所有参数的梯度,并给tensor.grad赋值
loss.backward()
# 开始优化被允许优化的参数
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
accs.update(acc)
start = time.time()
# Print status
if i_batch % print_every == 0:
print('[{0}] Epoch: [{1}][{2}/{3}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {accs.val:.3f} ({accs.avg:.3f})'.format(timestamp(), epoch, i_batch, len(train_data),
batch_time=batch_time,
loss=losses,
accs=accs))
def valid(val_data, encoder):
'''
跑认证数据,避免过拟合,(意思是避免模型只在训练的数据上运行结果很好)
:param val_data: 验证数据集
:param encoder: 模型
:return:
accs.avg, losses.avg
'''
encoder.eval() # eval mode (no dropout or batchnorm)
# Loss function
criterion = nn.CrossEntropyLoss().to(device)
batch_time = AverageMeter() # forward prop. + back prop. time
losses = AverageMeter() # loss (per word decoded)
accs = AverageMeter() # accuracy
start = time.time()
with torch.no_grad():
# Batches
for i_batch, (input_variable, lengths, target_variable) in enumerate(val_data):
# Set device options
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
outputs = encoder(input_variable, lengths)
loss = 0
acc = 0
for idx, _ in enumerate(label_names):
loss += criterion(outputs[:, :, idx], target_variable[:, idx]) / len(label_names)
acc += accuracy(outputs[:, :, idx], target_variable[:, idx]) / len(label_names)
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
accs.update(acc)
start = time.time()
# Print status
if i_batch % print_every == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {accs.val:.3f} ({accs.avg:.3f})'.format(i_batch, len(val_data),
batch_time=batch_time,
loss=losses,
accs=accs))
return accs.avg, losses.avg
def main():
# 加载词库,加载数据集
voc = Lang('data/WORDMAP.json')
print("词库数量 " + str(voc.n_words))
train_data = SaDataset('train', voc)
val_data = SaDataset('valid', voc)
# 初始化模型
encoder = EncoderRNN(voc.n_words, hidden_size, encoder_n_layers, dropout)
# 将模型使用device进行计算,如果是gpu,则会使用显存,如果是cpu,则会使用内存
encoder = encoder.to(device)
# 初始化优化器 优化器的目的是让梯度下降,手段是调整模型的参数,optim是一个pytorch的一个包,adam是一个优化算法,梯度下降
print('Building optimizers ...')
'''
需要优化的参数
学习率
'''
optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
# 基础准确率
best_acc = 0
epochs_since_improvement = 0
# epochs 训练的次数
for epoch in range(0, epochs):
# Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
if epochs_since_improvement == 20:
break
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(optimizer, 0.8)
# 训练一次
train(epoch, train_data, encoder, optimizer)
# 使用验证集对训练结果进行验证,防止过拟合
val_acc, val_loss = valid(val_data, encoder)
print('\n * ACCURACY - {acc:.3f}, LOSS - {loss:.3f}\n'.format(acc=val_acc, loss=val_loss))
# 检查是否有提升
is_best = val_acc > best_acc
best_acc = max(best_acc, val_acc)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(epoch, encoder, optimizer, val_acc, is_best)
# Reshuffle samples 将验证集合测试集打乱
np.random.shuffle(train_data.samples)
np.random.shuffle(val_data.samples)
if __name__ == '__main__':
main()
|
999,725 | f62a9346406f8815b492ef7496832bfed43c5d1f | print("hello python,你好python!!!!")
print("你好")
print("hello") |
999,726 | a16f4f5b53fcbd99461d85a1693ad95c44de584a | class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
|
999,727 | fc4d95332123e91940feb1b1c136a9488e44fe78 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
import datetime
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Product'
db.create_table(u'Products_product', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
('description', self.gf('django.db.models.fields.TextField')()),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
('quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
('image', self.gf('django.db.models.fields.files.ImageField')(default='product_images/product_images_placeholder.png', max_length=100, blank=True)),
('quantityType', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['IngredientProductMapping.QuantityType'])),
('producer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['Products.Producer'])),
('ingredientMapping', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='products', null=True, to=orm['IngredientProductMapping.IngredientProductMapping'])),
))
db.send_create_signal(u'Products', ['Product'])
# Adding M2M table for field productCategories on 'Product'
m2m_table_name = db.shorten_name(u'Products_product_productCategories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm[u'Products.product'], null=False)),
('productcategory', models.ForeignKey(orm[u'Products.productcategory'], null=False))
))
db.create_unique(m2m_table_name, ['product_id', 'productcategory_id'])
# Adding model 'Producer'
db.create_table(u'Products_producer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
))
db.send_create_signal(u'Products', ['Producer'])
# Adding model 'ProductCategory'
db.create_table(u'Products_productcategory', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
))
db.send_create_signal(u'Products', ['ProductCategory'])
# Adding M2M table for field parents on 'ProductCategory'
m2m_table_name = db.shorten_name(u'Products_productcategory_parents')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_productcategory', models.ForeignKey(orm[u'Products.productcategory'], null=False)),
('to_productcategory', models.ForeignKey(orm[u'Products.productcategory'], null=False))
))
db.create_unique(m2m_table_name, ['from_productcategory_id', 'to_productcategory_id'])
def backwards(self, orm):
# Deleting model 'Product'
db.delete_table(u'Products_product')
# Removing M2M table for field productCategories on 'Product'
db.delete_table(db.shorten_name(u'Products_product_productCategories'))
# Deleting model 'Producer'
db.delete_table(u'Products_producer')
# Deleting model 'ProductCategory'
db.delete_table(u'Products_productcategory')
# Removing M2M table for field parents on 'ProductCategory'
db.delete_table(db.shorten_name(u'Products_productcategory_parents'))
models = {
u'IngredientProductMapping.ingredientproductmapping': {
'Meta': {'object_name': 'IngredientProductMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'IngredientProductMapping.quantitytype': {
'Meta': {'object_name': 'QuantityType'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'Products.producer': {
'Meta': {'object_name': 'Producer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'Products.product': {
'Meta': {'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "'product_images/product_images_placeholder.png'", 'max_length': '100', 'blank': 'True'}),
'ingredientMapping': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'products'", 'null': 'True', 'to': u"orm['IngredientProductMapping.IngredientProductMapping']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'producer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['Products.Producer']"}),
'productCategories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'products'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['Products.ProductCategory']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'quantityType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['IngredientProductMapping.QuantityType']"})
},
u'Products.productcategory': {
'Meta': {'object_name': 'ProductCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['Products.ProductCategory']"})
}
}
complete_apps = ['Products'] |
999,728 | a592879bcaf051cf1b5f4c7ad153c16560dd2752 | import Caffe_AlexNet as C
import Make_activation as m
import h5_analysis_jitterer as h
import os
import shutil
import numpy as np
import set_up_caffe_net
from Test_AlexNet_on_directory import what_am_I_from_image
# ---------------------------------------------------------------------------------------------------------------------
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1!!
# IMPORTANT! YOU NEED TO SET Make_activation to load 'prob'ability data!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
# ---------------------------------------------------------------------------------------------------------------------
# !!!!! NB THIS MAKES A 'TEST' SET OF SHRUNK SQUARIFIED ORIGINALS, FROM THE IMAGENET TRAINING SET!
# !!!!! THIS IS NOT THE IMAGENET TEST SET, ALEXNET HAS SEEN THIS MANY TIMES BEFORE
local_list=[]
acts = None
verbose = True
class_dict={}
net = None
transformer = ""
# -------------------------------------------------------------------------------------------------------------------- #
# FUNCTIONS!
# -------------------------------------------------------------------------------------------------------------------- #
def grab_files(local_list=local_list,
acts=acts,
class_dict=class_dict,
verbose=verbose,
imagenet_root='/storage/data/imagenet_2012/',
in_class_sub_dirs=True):
"""Function to get the selected images and return their addresses
in_class_sub_dirs: True if like imagenet, false if like imagenet test set"""
selected_image_list = []
found_classes = []
for selected_point in local_list:
# grab filename
selected_file = acts.get_file_name(selected_point).decode('UTF-8')
if verbose:
pass
#print(selected_file)
class_dir_label = selected_file.split('_')[0]
if in_class_sub_dirs:
# we've assumed files are in folders labelled by class!
selected_image_list.append(imagenet_root + class_dir_label + '/' + selected_file)
else:
selected_image_list.append(imagenet_root + selected_file)
class_no = class_dict[selected_file.split('_')[0]]
if not class_no in found_classes:
found_classes.append(class_no)
return selected_image_list
def check_image_correct(true_class='',
local_list=local_list,
acts=acts,
class_dict=class_dict,
verbose=verbose,
imagenet_root='/storage/data/imagenet_2012/',
net=net,
transformer=transformer,
in_class_sub_dirs=True):
"""wrapper function to check that a given image is correct in a fresh instantiation of ALexNet
ture_class needs to be input"""
selected_image_list = grab_files(local_list=local_list,
acts=acts, class_dict=class_dict,
verbose=verbose, imagenet_root=imagenet_root, in_class_sub_dirs=in_class_sub_dirs)
image_list = selected_image_list
image_directory=''
mistake_list_name = []
mistake_list_no = []
correct_list_name = []
correct_list_no = []
corrected_local_list=[]
for image_no in range(len(image_list)):
image_name = image_list[image_no]
try:
image = C.caffe.io.load_image(image_directory + image_name)
good_to_go=True
except:
good_to_go=False
if good_to_go:
out_list, is_correct = what_am_I_from_image(
image=image,
net=net,
transformer=transformer,
verbose=verbose,
found_labels=found_labels,
class_labels=class_labels,
true_class=true_class
)
if is_correct == False:
if verbose:
print('Error: {} is incorrect'.format(image_name))
mistake_list_name.append(image_name)
mistake_list_no.append(image_no)
else:
# if its true or the functions doesnot know
correct_list_name.append(image_name)
correct_list_no.append(image_no)
corrected_local_list.append(local_list[image_no])
#else:
#mistake_list_name.append(image_name)
#mistake_list_no.append(image_no)
return corrected_local_list, correct_list_name, correct_list_no, mistake_list_name, mistake_list_no
do_check=False
usingDocker=0
no_of_guesses=1
# code!!
CaffeSettings = set_up_caffe_net.main()
net = CaffeSettings.net
transformer = CaffeSettings.transformer
m.main()
acts = m.acts
class_labels = m.class_labels
# why did i changethe names? argh
#class_labels = labels # this is the readin text file
found_labels = m.s.short_labels #[label.split(' ')[0] for label in labels] # this is the list of the class codes
class_dict = h.make_class_to_line_number_look_up_table(class_labels=class_labels, verbose=False)
# this builds the look-up table between points and the class they are in
## This bit is slow, it loads the label data for all acts
label_dict, found_labels, no_files_in_label = h.build_label_dict(acts)
# from Caffe_AlexNet import Get_Model_File
# model_file=Get_Model_File('no_reg_AlexNet')
# if usingDocker:
# # new set-up with safer deployment for use on all machines
# caffe_root, image_directory, labels_file, model_def, model_weights, dir_list, labels = set_up_caffe(model_file=model_file)
# net, transformer = C.Caffe_NN_setup(imangenet_mean_image='python/caffe/imagenet/ilsvrc_2012_mean.npy',
# batch_size=50, model_def=model_def, model_weights=model_weights,
# verbose=True, root_dir=caffe_root)
# else:
# # old set-up with hardcoded links and old-style unsafe deployment
# caffe_root, image_directory, labels_file, model_def, model_weights, dir_list, labels = \
# C.set_up_caffe(image_directory='/storage/data/imagenet_2012',
# model_file=model_file,
# label_file_address='data/ilsvrc12/synset_words.txt',
# dir_file='/storage/data/imagenet_2012_class_list.txt',
# root_dir='/home/eg16993/src/caffe', verbose=True)
# net, transformer = C.Caffe_NN_setup(
# imangenet_mean_image='/home/eg16993/src/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy',
# batch_size=50, model_def=model_def, model_weights=model_weights,
# verbose=True, root_dir=caffe_root)
def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):
"""Function to grab files and move them"""
for file_no in range(len(selected_image_list)):
shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])
return
## DO NOT RUN THESE LINES WITHOUT FIRST SORTING OUT THE MAIN BIT OF GRANTOPMOSTIMAGES!
#from Grab_top_most_images import grab_files
#from Grab_top_most_images import check_image_correct
make_collage_pictures = False # this makes a collage of the found pictures
do_move_files= False# this moves the original pictures over -- you probably dont want this
make_square_originals = True # this copies photos over and makes a canonical set with the correct square crop
do_file_write=True # write out a file with the filenames and correct classes
top_certainty = {}
no_top_certainty = {}
do_double_check = False # check one a new instantiaion of AlexNet that the image is correctly classified
do_triple_check = False # Yes I am that paranoid
top_dir='/command/results/'
no_correct=[]
no_mistake=[]
do_pictures=True
with open('correct_classes.txt', 'w') as file:
for class_label in found_labels:
print('yo!{}'.format(class_label))
assert class_label == label_dict[class_label][0][0]
certainty_list = []
correct_list=[]
correct_point_list = []
current_directory = top_dir + class_label
true_class=class_label
if not os.path.exists(current_directory):
os.makedirs(current_directory)
## 1. sqaurify and shrink files and copy em over
# local list is the file tuples in acts
local_list=label_dict[class_label]
local_list=local_list
# selected_image_list is the file names and addresses
selected_image_list = grab_files(local_list=local_list,
acts=acts,
class_dict=class_dict,
verbose=verbose,
imagenet_root='/storage/data/imagenet_2012/',
in_class_sub_dirs=True)
if make_square_originals:
for index in range(len(local_list)):
# not that image_no is an index into local_list
h.make_collage(out_file=selected_image_list[index].split('/')[-1],
local_list=[local_list[index]],
shrink=True, do_square=True, no_of_cols=1,
acts=acts, class_dict=class_dict, class_labels=class_labels,
verbose=False, imagenet_root='/storage/data/imagenet_2012/')
# now grab the local squarified copies
new_selected_image_list=grab_files(local_list=local_list,
acts=acts,
class_dict=class_dict,
verbose=verbose,
imagenet_root=top_dir,
in_class_sub_dirs=False)
# TEST 'em!
corrected_local_list, correct_list_name, correct_list_no, mistake_list_name, mistake_list_no = check_image_correct(
true_class=true_class,
local_list=local_list,
acts=acts,
class_dict=class_dict,
verbose=verbose,
imagenet_root=top_dir,
net=net,
transformer=transformer,
in_class_sub_dirs=False)
# get some stats
no_correct.append(len(correct_list_name))
no_mistake.append(len(mistake_list_name))
# remove the mistakes!
for image in mistake_list_name:
if not 'data/imagenet_2012/' in image: # seriously, now we fucking check -- do not delete the originals!
# kill kill kill
os.remove(image)
# move the pictures to where they should be!
simple_move_files(selected_image_list=correct_list_name, out_dir=current_directory + '/')
if do_file_write:
for point in corrected_local_list:
file.write(acts.get_file_name(point).decode() + ' ' + str(class_dict[class_label]) + '\n')
# write some stats
csv_file = open('correct_stats.csv', 'w')
csv_file.write('Class\t, No. correct \t, No mistake\n')
for class_no in range(len(found_labels)):
csv_file.write('{},\t {},\t {}\n'.format(found_labels[class_no], no_correct[class_no], no_mistake[class_no]))
#csv_file.close()
|
999,729 | 754f19fba1eab121458c2232678efb83c91db277 | from __future__ import unicode_literals
from django.contrib import messages
from django.db import models
import bcrypt
from bcrypt import checkpw
import re
EMAIL_REGEX = re.compile (r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def isValidRegistration(self, userInfo, request):
passFlag = True
if not userInfo['first_name'].isalpha():
messages.warning(request, 'First name contains non-alpha character(s)')
passFlag = False
if len(userInfo['first_name']) < 2:
messages.warning(request, 'First name is not long enough.')
passFlag = False
if not userInfo['last_name'].isalpha():
messages.warning(request, 'Last name contains non-alpha character(s)')
passFlag = False
if len(userInfo['last_name']) < 2:
messages.warning(request, 'Last name is not long enough.')
passFlag = False
if not EMAIL_REGEX.match(userInfo['email']):
messages.warning(request, 'Email is not a valid email!')
passFlag = False
if len(userInfo['password']) < 7:
messages.warning(request, 'Password is not long enough.')
passFlag = False
if User.objects.filter(email = userInfo['email']):
messages.error(request, "This email already exists in our database.")
passFlag = False
if passFlag == True:
messages.success(request, "Success! Welcome, " + userInfo['first_name'] + "!")
hashed = bcrypt.hashpw(userInfo['password'].encode(), bcrypt.gensalt())
User.objects.create(first_name = userInfo['first_name'], last_name = userInfo['last_name'], email = userInfo['email'], password = hashed)
return passFlag
def val_user(self, userInfo, request):
passFlag = True
user = User.objects.get(email=userInfo['email'])
if bcrypt.checkpw(userInfo['password'].encode(), user.password.encode()):
messages.success(request, "Success! Welcome," + " " + user.first_name)
print (user.first_name)
else:
messages.error(request, "This password is incorrect")
passFlag = False
class User(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.EmailField()
password = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
|
999,730 | 4250021db200b46a93ca47bd17ef4b2f172406e0 |
# coding: utf-8
# In[14]:
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
#print("Train shape : ", train.shape)
#print("Test shape : ", test.shape)
PM = [2,10,25,50,75,90]
for prime in PM:
# process columns, apply LabelEncoder to categorical features
for c in train.columns:
if train[c].dtype == 'object':
lbl = LabelEncoder()
lbl.fit(list(train[c].values) + list(test[c].values))
train[c] = lbl.transform(list(train[c].values))
test[c] = lbl.transform(list(test[c].values))
# shape
#print('Shape train: {}\nShape test: {}'.format(train.shape, test.shape))
train.y = train.y.astype(int)
X = train.values[0:, 2:377] # X0 - X385
Y = train.y.values
#Y = np.asarray(balance_data['y'], dtype="|S6")
#X #values
#Y #values
#Let’s split our data into training and test set. We will use sklearn’s train_test_split() method.
X_train = []
X_test = []
y_train = []
y_test = []
X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size = prime/100, random_state = 100)
#Decision Tree Classifier with criterion gini index
clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth=prime, min_samples_leaf=prime)
clf_gini.fit(X_train, y_train)
#Decision Tree Classifier with criterion information gain
clf_entropy = DecisionTreeClassifier(criterion = "entropy", random_state = 100, max_depth=prime, min_samples_leaf=prime)
clf_entropy.fit(X_train, y_train)
y_pred = []
y_pred_en = []
y_pred = clf_gini.predict(X_test)
y_pred_en = clf_entropy.predict(X_test)
print("Accuracy is ", accuracy_score(y_test,y_pred)*100)
print("Accuracy is ", accuracy_score(y_test,y_pred_en)*100)
print(prime)
# In[ ]:
|
999,731 | a5cdf66ee78a4217cd71343ca1509497b54e8723 | from django.shortcuts import render,redirect
from .models import Todo
from .forms import TodoForm
from django.contrib import messages
# Create your views here.
def index(request):
if request.method == 'POST':
form = TodoForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('index')
else:
print('Invalid form')
else:
todos = Todo.objects.all()
return render(request,'todo.html',{'todos':todos})
def delete(request, todo_id):
todo = Todo.objects.get(id=todo_id)
todo.delete()
return redirect('index')
def edit(request, todo_id):
if request.method == 'POST':
todo = Todo.objects.get(id=todo_id)
form = TodoForm(request.POST or None, instance=todo)
if form.is_valid():
form.save()
return redirect('index')
else:
todo = Todo.objects.get(id=todo_id)
todos = Todo.objects.all()
return render(request, 'edit_todo.html', {'todo': todo,'todos':todos})
|
999,732 | a5980694e911f0ebf2f7e81e512078a2902d94ee | # William
# Generate line graphs for comparison of the average cost of moves comparing player experience and days since release.
from helper_fns import *
import numpy as np
import matplotlib.pyplot as plt
import math, pymongo
from bson import objectid
# Some tunable constants
minimum_played = 3
bucket_count = 15
s1 = process_lookup("beta")
s2 = process_lookup("tango-2-3")
set_config("beta")
def legal_move(state, move):
# Can always skip.
if "skip" in move:
return True
if len(move) < 3:
return False # ?
# If actor can't act.
if state[chars.index(move[0])+1] == 0 or state[9] == chars.index(move[0]):
return False
# target isn't targetable
if state[chars.index(move[2])+10] == 0:
return False
# Archer second target not targetable
if move[0] == "A" and len(move) > 3:
if state[chars.index(move[3])+10] == 0:
return False
# Healer heal target
if move[0] == "H" and len(move) > 3:
if state[chars.index(move[3])+1] == 0 or state[chars.index(move[3])+1] == set_health(full_name(move[3])):
return False
return True
def get_cost(log, s):
state = [1]
for i in range(len(chars)):
if chars[i] == log["uc1"]:
state += [int(log["uc1_health"])]
elif chars[i] == log["uc2"]:
state += [int(log["uc2_health"])]
else:
state += [0]
if log["uc1_stun"] == "True":
state += [chars.index(log["uc1"]) + 1]
elif log["uc2_stun"] == "True":
state += [chars.index(log["uc2"]) + 1]
else:
state += [0]
# p2 states
for i in range(len(chars)):
if chars[i] == log["oc1"]:
state += [int(log["oc1_health"])]
elif chars[i] == log["oc2"]:
state += [int(log["oc2_health"])]
else:
state += [0]
if log["oc1_stun"] == "True":
state += [chars.index(log["oc1"]) + 1]
elif log["oc2_stun"] == "True":
state += [chars.index(log["oc2"]) + 1]
else:
state += [0]
# State
move = ""
for c in log["action"]:
if c in chars:
move += c
if "_" not in move:
move += "_"
pair = log["uc1"]+log["uc2"]
if chars.index(pair[0]) > chars.index(pair[1]):
pair = pair[1] + pair[0]
if legal_move(state,move):
if check_actions_available(state, pair, 0.15, s):
v, max_p = cost(state, pair, move, s, classify_mistake=True)
return (max_p - v) / max_p
return 0
fig, (ax0, ax1) = plt.subplots(ncols=2, nrows=1, figsize=(10,4))
"""
for p in db.players.find({"Username":{"$exists":True}}):
if p["Username"] in ["probablytom", "cptKav", "Ellen"]: # Do not process the devs, they should know better. Also Frp97 has several impossible moves logged.
continue
# If player played enough S1 games, then process those games.
if db.page_hits.count_documents({"user_move":"True", "user":p["Username"], "balance_code":{"$exists":False}, "kind":"move_viewed", "error":{"$exists":False}}) > 1000:# and db.completed_games.count_documents({"winner":{"$exists":True}, "balance_code":{"$exists":True}, "usernames":p["Username"]}) > 50 :
costs = []
results = []
for m in db.page_hits.find({"user_move":"True", "user":p["Username"], "balance_code":{"$exists":False}, "kind":"move_viewed", "error":{"$exists":False}}):
costs += [get_cost(m, s1)]
start = np.mean(costs[:100])
end = np.mean(costs[-100:])
#print(p["Username"], start, end, len(costs), "season 1")
# x = [x/len(costs) for x in range(len(costs))]
# coefs = np.polyfit(x,costs,1)
# poly = np.poly1d(coefs)
# new_x = np.linspace(x[0],x[-1])
# new_y = poly(new_x)
# clump_costs = [np.mean(costs[y*5:y*5+4]) for y in range(len(costs)/5)]
#ax0.scatter(range(len(costs)),costs)
# results[p["Username"]] = {"delta":poly(1) - poly(0),"actions":len(costs)}
# ax0.plot(new_x, new_y, label = p["Username"] + " - " + str(len(costs)))
#print(len(costs), costs.count(0))
# process costs[] and sort them into 20 buckets under results[]
for i in range(bucket_count):
bucket_values = []
for j in range(math.floor(len(costs)/bucket_count)):
bucket_values += [costs[j+(i*math.floor(len(costs)/bucket_count))]]
#print(p["Username"], len(bucket_values), sum(bucket_values))
results += [sum(bucket_values)/len(bucket_values)]
x = [x/len(results) for x in range(len(results))]
coefs = np.polyfit(x,results,1)
poly = np.poly1d(coefs)
new_x = np.linspace(x[0],x[-1])
new_y = poly(new_x)
#ax0.scatter(x, results)
ax0.plot(new_x, new_y)
if new_y[0] > new_y[1]:
print(p["Username"], "got better")
else:
print(p["Username"], "got worse")
#ax0.scatter(range(len(results)), results)
#x = [x/len(results) for x in range(len(results))]
# for p in results:
# print("{0}'s expected cost changed by {1}".format(p, results[p]))
# ax0.legend()
# results = dict(sorted(results.items(), key = lambda x: x[1]["actions"]))
# x = np.arange(len(results))
# ax0.bar(x, [results[p]["delta"] for p in results])
# ax0.set_xticks(x)
# ax0.set_xticklabels([results[p]["actions"] for p in results], rotation=90)
ax0.title.set_text("Season 1")
ax0.set_ylim([0, 0.05])
ax0.set_xlabel("Proportion of critical moves made")
ax0.set_ylabel("Average relative cost of moves in bucket")
"""
set_config("tango-2-3")
better = 0
worse = 0
print("SEASON 2")
for p in db.players.find({"Username":{"$exists":True}}):
if p["Username"] in ["probablytom", "cptKav", "Ellen"]: # Do not process the devs, they should know better.
continue
if p["Username"] not in ["Anakhand"]:
continue
# If player played enough S1 games, then process those games.
if db.page_hits.count_documents({"user_move":"True", "user":p["Username"], "balance_code":{"$exists":True}, "kind":"move_viewed", "error":{"$exists":False}}) > 10:# and db.completed_games.count_documents({"winner":{"$exists":True}, "balance_code":{"$exists":True}, "usernames":p["Username"]}) > 50 :
costs = []
results = []
for m in db.page_hits.find({"user_move":"True", "user":p["Username"], "balance_code":{"$exists":True}, "kind":"move_viewed", "error":{"$exists":False}}):
costs += [get_cost(m, s2)]
start = sum(i > 0.2 for i in costs[:200])
#mid = np.mean(costs[math.floor(len(costs)/2)-51:math.floor(len(costs)/2) + 50])
end = sum(i > 0.2 for i in costs[-200:])
# if len(costs) > 400:
# plt.plot(["start","end"], [start,end], label = p["Username"])
# if end > start:
# print("{0} got worse".format(p["Username"]))
# worse += 1
# else:
# print("{0} got better".format(p["Username"]))
# better += 1
#print(p["Username"], start, mid, end, len(costs), "season 2")
# if len(costs) > 100:
# break
# x = [x/len(costs) for x in range(len(costs))]
# coefs = np.polyfit(x,costs,1)
# poly = np.poly1d(coefs)
# new_x = np.linspace(x[0],x[-1])
# new_y = poly(new_x)
ax1.scatter(range(len(costs)),costs)
# print(len(costs), costs.count(0))
# results[p["Username"]] = {"delta":poly(1) - poly(0),"actions":len(costs)}
# ax1.plot(new_x, new_y, label = p["Username"] + " - " + str(len(costs)))
for i in range(bucket_count):
bucket_values = []
for j in range(math.floor(len(costs)/bucket_count)):
bucket_values += [costs[j+(i*math.floor(len(costs)/bucket_count))]]
results += [sum(bucket_values)/len(bucket_values)]
#ax1.scatter(range(len(results)), results)
#x = [x/len(results) for x in range(len(results))]
x = [x/len(results) for x in range(len(results))]
coefs = np.polyfit(x,results,1)
poly = np.poly1d(coefs)
new_x = np.linspace(x[0],x[-1])
new_y = poly(new_x)
#ax1.scatter(x, results)
ax1.plot(new_x, new_y)
if new_y[0] > new_y[1]:
print(p["Username"], "got better")
else:
print(p["Username"], "got worse")
# print("worse {0}".format(worse))
# print("better {0}".format(better))
# plt.legend()
# ax1.title.set_text("Season 2")
# ax1.set_ylim([0, 0.05])
# ax1.set_xlabel("Proportion of critical moves made")
# ax1.set_ylabel("Average relative cost of moves in bucket")
plt.tight_layout()
plt.savefig(r"C:\Users\bkav9\OneDrive\Pictures\figures\top_15_1d_learning.png")
plt.show()
# for p in results:
# print("{0}'s expected cost changed by {1}".format(p, results[p]))
# results = dict(sorted(results.items(), key = lambda x: x[1]["actions"]))
# x = np.arange(len(results))
# ax1.bar(x, [results[p]["delta"] for p in results])
# ax1.set_xticks(x)
# ax1.set_xticklabels([results[p]["actions"] for p in results], rotation = 90)
# ax1.set_xlabel("Users (and number of moves made, ascending order)")
# ax1.set_ylabel("")
# ax1.set_ylabel("Change in expected cost per move between first and last move")
# plt.tight_layout()
# plt.show()
# # Show number of games played. Order and colour the results?\
# TODO: Find users where there is fittable data. |
999,733 | 2041a1d97ae57e11c6fe5bb035a2712cf9525c0b | import sys
import os
import shutil
import PyQt5
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
from physicoModule import config, PhysicoManage
from physico_main import PhysicoMain
current_dir = os.getcwd()
physicoUI = os.path.join(current_dir, 'uiFiles', 'physico_intro.ui')
class PhysicoIntro(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi(physicoUI, self)
self.initUI()
self.initial_directory = None
def initUI(self):
qPixmapVar = QPixmap()
pix_path = os.path.join(current_dir, 'iconFiles', 'parkcoach.png')
qPixmapVar.load(pix_path)
qPixmapVar = qPixmapVar.scaledToWidth(250)
self.labelIntro.setPixmap(qPixmapVar)
self.pushButtonNew.clicked.connect(self.new)
self.pushButtonStart.clicked.connect(self.start)
self.pushButtonQuit.clicked.connect(QCoreApplication.instance().quit)
self.show()
# @pyqtSlot
def new(self):
select_directory = QFileDialog.getExistingDirectory(
self, "Select Folder", self.initial_directory)
check_file_path = os.path.join(select_directory, 'physico_project.psj')
if os.path.isfile(check_file_path):
self.showPopupMsg(
'Directory Error.', '{} is already physico project directory.\nPlz check again!!'.format(select_directory))
else:
f = open(check_file_path, 'w')
f.close()
input_data_directory = os.path.join(
select_directory, '#01_input_data')
workout_directory = os.path.join(
input_data_directory, '#001_workout')
wellness_directory = os.path.join(
input_data_directory, '#002_wellness')
os.makedirs(input_data_directory)
os.makedirs(workout_directory)
os.makedirs(wellness_directory)
select_workout_files = QFileDialog.getOpenFileNames(
self, "Select Workout File", self.initial_directory, 'excel file (*.xlsx)')
select_workout_files_name = select_workout_files[0]
print(type(select_workout_files_name))
if not select_workout_files_name:
print("0 files selected.")
else:
print("{} files selected".format(
len(select_workout_files_name)))
for file_name in select_workout_files_name:
workout_basename = os.path.basename(file_name)
shutil.copyfile(file_name, os.path.join(
workout_directory, workout_basename))
select_wellness_files = QFileDialog.getOpenFileNames(
self, "Select wellness File", self.initial_directory, 'excel file (*.xlsx)')
select_wellness_files_name = select_wellness_files[0]
print(type(select_wellness_files_name))
if not select_wellness_files_name:
print("0 files selected.")
else:
print("{} files selected".format(
len(select_wellness_files_name)))
for file_name in select_wellness_files_name:
wellness_basename = os.path.basename(file_name)
shutil.copyfile(file_name, os.path.join(
wellness_directory, wellness_basename))
pManage = PhysicoManage(select_directory)
pManage.updateManager()
self.ex1 = PhysicoMain(select_directory)
def start(self):
select_directory = QFileDialog.getExistingDirectory(
self, "Select Folder", self.initial_directory)
print(select_directory)
if os.path.isfile(os.path.join(select_directory, 'physico_project.psj')):
pManage = PhysicoManage(select_directory)
pManage.updateManager()
self.ex1 = PhysicoMain(select_directory)
else:
self.showPopupMsg(
'Directory Error.', '{} is not physico project directory.\nPlz check again!!'.format(select_directory))
def showPopupMsg(self, title, content):
msg = QMessageBox()
msg.setWindowTitle(title)
msg.setText(content)
msg.setStandardButtons(QMessageBox.Ok)
result = msg.exec_()
if result == QMessageBox.Ok:
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = PhysicoIntro()
# ex.show()
sys.exit(app.exec_())
|
999,734 | 4b810e7ca570c5d0a136f2740e214bc17126aed7 | import numpy as np
import pandas as pd
import time
# Base de dados iris
dados = pd.read_csv('iris.data', sep=",", header=None)
dados =dados.iloc[:,:].values
for vez in range(0,4):
print("\n"+str(vez+1)+" ªvez")
np.random.shuffle(dados)
################################### ENTRADA ###################################
entradas = dados[:,0:4]
normalizacao = np.zeros((entradas.shape[0],entradas.shape[1]))
# Normalização da entrada
for j in range(0,len(entradas[0])):
# media das colunas
minimo = np.min(entradas,0)
maximo = np.max(entradas,0)
#maximo = 1.0
#minimo = 0.0
for i in range(0,len(entradas)):
normalizacao[i,j] = (entradas[i,j] - minimo[j])/(maximo[j]-minimo[j]) #Media zero
# Adicionar uma coluna de 1's para o bias
X0 = np.ones((normalizacao.shape[0],1))
X = np.hstack((X0,normalizacao))
# Treino e validacao
XTreino = X[0:74,:]
XValidacao = X[74:113,:]
# Treino e teste
XTreino__ = X[0:113,:]
XTeste = X[113::,:]
#################################### ROTULOS #################################
rotulosAlfabeto = dados[:,4:]
# Saídas Desejadas transformadas para numero
dicionario = {'Iris-setosa': 0,'Iris-versicolor':1, 'Iris-virginica': 2}
Yd = np.zeros((len(dicionario),entradas.shape[0]))
rotulos = np.zeros(len(entradas))
# Saídas Desejadas transformadas para numero
for i in range(0,len(rotulosAlfabeto)):
rotulos[i] = dicionario[rotulosAlfabeto[i][0]]
if(rotulos[i] == 0):
Yd[:,i] = np.transpose(np.array([1,0,0]))
if(rotulos[i] == 1):
Yd[:,i] = np.transpose(np.array([0,1,0]))
if(rotulos[i] == 2):
Yd[:,i] = np.transpose(np.array([0,0,1]))
YdTreino = Yd[:,0:74]
YdValidacao = Yd[:,74:113]
# Treino e teste
YdTreino__ = Yd[:,0:113]
YdTeste = Yd[:,113::]
######################## PARAMETROS ##########################################
# numero de neuronios na camada oculta
ocultaNeuronios = 10
entradaNeuronios = X.shape[1]
saidaNeuronios = Yd.shape[0]
# Criar pesos aleatorios
# A primeira coluna é do neuronio 1 (bias), a segunda coluna do neuronio 2
WEntradaOculta = np.random.rand(ocultaNeuronios+1, entradaNeuronios)
WOcultaSaida = np.random.rand(saidaNeuronios,ocultaNeuronios+1)
# Definir epocas, erro e aprendizagem
epoca = 0
erroAtual = 1
aprendizagem = 0.3
###################### TREINO E VALIDACAO ####################################
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
yVetorCalculado =np.zeros((YdTreino.shape[0],YdTreino.shape[1]))
while(epoca < 100 and erroAtual>0.001):
startTime = time.time()
erroAtual = 0
for i in range(0,XTreino.shape[0]):
# Foward
#
mult = np.dot(WEntradaOculta,XTreino[i,:])
# Funcao de ativacao tang hiperbolica
saidaCamadaOculta = nonlin(mult)
#Adicionar uma linha de uns
#saidaCamadaOculta = np.append([1],saidaCamadaOculta)
mult2 = np.dot(WOcultaSaida,saidaCamadaOculta)
saida = nonlin(mult2)
for j in range(0,3):
yVetorCalculado[j,i] = np.round(saida[j])
# Calculo do erro
erro = np.power(YdTreino[:,i]-np.transpose(saida),2)
erroAtual = erroAtual + np.mean(erro)
# Backward
gradiente2 = (saida-YdTreino[:,i])*saida*(1-saida)
# atualização do peso
for basico in range(0,gradiente2.shape[0]):
WOcultaSaida[basico,:] = WOcultaSaida[basico,:] - aprendizagem*gradiente2[basico]*saidaCamadaOculta
soma = np.matmul(gradiente2,WOcultaSaida)
# Atualização do peso
for k in range(0,XTreino.shape[1]):
WEntradaOculta[:,k] = WEntradaOculta[:,k] - aprendizagem*(saidaCamadaOculta*(1-saidaCamadaOculta))*soma*XTreino[i,k]
epoca = epoca + 1
# Tirar a media do vetor de erro e depois dividir pelo numero de amostras
erroAtual = erroAtual/XTreino.shape[0]
acerto = 0
erro = yVetorCalculado - YdTreino
for i in range(0,yVetorCalculado.shape[1]):
if(np.count_nonzero(erro[:,i])==0):
acerto = acerto + 1
print("Acuracia Treino : " + str(100*acerto/XTreino.shape[0])+"%")
yVetorCalculado =np.zeros((YdValidacao.shape[0],YdValidacao.shape[1]))
for i in range(0,XValidacao.shape[0]):
# Foward
#
mult = np.dot(WEntradaOculta,XValidacao[i,:])
# Funcao de ativacao tang hiperbolica
saidaCamadaOculta = nonlin(mult)
#Adicionar uma linha de uns
#saidaCamadaOculta = np.append([1],saidaCamadaOculta)
mult2 = np.dot(WOcultaSaida,saidaCamadaOculta)
saida = nonlin(mult2)
for j in range(0,3):
yVetorCalculado[j,i] =np.round(saida[j])
acerto = 0
erro = yVetorCalculado - YdValidacao
for i in range(0,yVetorCalculado.shape[1]):
if(np.count_nonzero(erro[:,i])==0):
acerto = acerto + 1
print("Acuracia Validacao: " + str(100*acerto/XValidacao.shape[0])+"%")
fim = time.time()
print("Tempo de execuçao Treino e Validação:" +str(fim - startTime))
####################### Treino e Teste ####################################
# Criar pesos aleatorios
# A primeira coluna é do neuronio 1 (bias), a segunda coluna do neuronio 2
WEntradaOculta = np.random.rand(ocultaNeuronios+1, entradaNeuronios)
WOcultaSaida = np.random.rand(saidaNeuronios,ocultaNeuronios+1)
yVetorCalculado =np.zeros((YdTreino__.shape[0],YdTreino__.shape[1]))
epoca = 0
while(epoca < 100 and erroAtual>0.001):
startTime = time.time()
erroAtual = 0
for i in range(0,XTreino__.shape[0]):
# Foward
#
mult = np.dot(WEntradaOculta,XTreino__[i,:])
# Funcao de ativacao tang hiperbolica
saidaCamadaOculta = nonlin(mult)
#Adicionar uma linha de uns
#saidaCamadaOculta = np.append([1],saidaCamadaOculta)
mult2 = np.dot(WOcultaSaida,saidaCamadaOculta)
saida = nonlin(mult2)
for j in range(0,3):
yVetorCalculado[j,i] = np.round(saida[j])
# Calculo do erro
erro = np.power(YdTreino__[:,i]-np.transpose(saida),2)
erroAtual = erroAtual + np.mean(erro)
# Backward
gradiente2 = (saida-YdTreino__[:,i])*saida*(1-saida)
# atualização do peso
for basico in range(0,gradiente2.shape[0]):
WOcultaSaida[basico,:] = WOcultaSaida[basico,:] - aprendizagem*gradiente2[basico]*saidaCamadaOculta
soma = np.matmul(gradiente2,WOcultaSaida)
# Atualização do peso
for k in range(0,XTreino__.shape[1]):
WEntradaOculta[:,k] = WEntradaOculta[:,k] - aprendizagem*(saidaCamadaOculta*(1-saidaCamadaOculta))*soma*XTreino__[i,k]
epoca = epoca + 1
# Tirar a media do vetor de erro e depois dividir pelo numero de amostras
erroAtual = erroAtual/XTreino__.shape[0]
acerto = 0
erro = yVetorCalculado - YdTreino__
for i in range(0,yVetorCalculado.shape[1]):
if(np.count_nonzero(erro[:,i])==0):
acerto = acerto + 1
print("\nAcuracia 2º Treino: " + str(100*acerto/XTreino__.shape[0])+"%")
yVetorCalculado =np.zeros((YdTeste.shape[0],YdTeste.shape[1]))
for i in range(0,XTeste.shape[0]):
# Foward
#
mult = np.dot(WEntradaOculta,XTeste[i,:])
# Funcao de ativacao tang hiperbolica
saidaCamadaOculta = nonlin(mult)
#Adicionar uma linha de uns
#saidaCamadaOculta = np.append([1],saidaCamadaOculta)
mult2 = np.dot(WOcultaSaida,saidaCamadaOculta)
saida = nonlin(mult2)
for j in range(0,3):
yVetorCalculado[j,i] =np.round(saida[j])
acerto = 0
erro = yVetorCalculado - YdTeste
for i in range(0,yVetorCalculado.shape[1]):
if(np.count_nonzero(erro[:,i])==0):
acerto = acerto + 1
print("Acuracia Teste: " + str(100*acerto/XTeste.shape[0])+"%")
fim = time.time()
print("Tempo de execuçao Treino e Teste:" +str(fim - startTime))
|
999,735 | f455ca9c163680f2f91c8eb3bcb069fd2557ff5a | '''
You are given two strings word1 and word2. Merge the strings by adding letters in alternating order, starting with word1. If a string is longer than the other, append the additional letters onto the end of the merged string.
Return the merged string.
Example 1:
Input: word1 = "abc", word2 = "pqr"
Output: "apbqcr"
Explanation: The merged string will be merged as so:
word1: a b c
word2: p q r
merged: a p b q c r
Example 2:
Input: word1 = "ab", word2 = "pqrs"
Output: "apbqrs"
Explanation: Notice that as word2 is longer, "rs" is appended to the end.
word1: a b
word2: p q r s
merged: a p b q r s
Example 3:
Input: word1 = "abcd", word2 = "pq"
Output: "apbqcd"
Explanation: Notice that as word1 is longer, "cd" is appended to the end.
word1: a b c d
word2: p q
merged: a p b q c d
Constraints:
1 <= word1.length, word2.length <= 100
word1 and word2 consist of lowercase English letters.
'''
class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
l1 = len(word1)
l2 = len(word2)
i = 0
j = 0
res = ""
if l1 < l2:
while i < l1 or j < l2:
if i <l1:
res = res + word1[i]
i = i + 1
if j <l2:
res = res + word2[j]
j= j + 1
else:
while i < l1 or j < l2:
if i <l1:
res = res + word1[i]
i = i + 1
if j <l2:
res = res + word2[j]
j= j + 1
return(res)
test = Solution()
w1 = "abcd"
w2 = "pq"
out=test.mergeAlternately(w1,w2)
print(out)
|
999,736 | e87f00efe72e6953c83731ff1ca7966086fcf135 | import warnings
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import SiteProfileNotAvailable
from django.contrib.auth.models import UserManager as BaseUserManager
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
class Error(Exception):
pass
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
if 'username' in extra_fields:
del extra_fields['username']
u = self.create_user(email=email, password=password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class User(AbstractUser):
remote_user = models.BooleanField(default=False)
@classmethod
def get_or_create_remote_user(cls, user_id):
if "@" not in user_id:
raise Error('user_id should be of the form of an email')
username, domain = map(lambda x: x.strip(), user_id.split('@'))
if domain in settings.DOMAINS:
try:
user, created = User.objects.get(username=username)
except User.DoesNotExist:
raise Error('User is not external user and doesn\'t exist on our db')
else:
return user
else:
user, created = User.objects.get_or_create(username=user_id, remote=True)
return user
|
999,737 | 4f33a9ffeea939424a283694b3656ad4af8b5958 | from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
class Type(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=1)
role = models.CharField(max_length=100)
class Project(models.Model):
user_link = models.ManyToManyField(User)
name = models.CharField(max_length=100)
#f1
class F1(models.Model):
name = models.CharField(max_length=100)
bags = models.IntegerField(blank=True)
location = models.CharField(max_length=100)
vehicle = models.IntegerField(blank=True)
trips = models.IntegerField(blank=True)
time = models.CharField(max_length=100)
#f2
class F2(models.Model):
name = models.CharField(max_length=100)
garbage = models.CharField(max_length=100)
bio = models.BooleanField()
hazard = models.BooleanField()
recycle = models.BooleanField()
class Form(models.Model):
name = models.CharField(max_length=100)
project_link = models.ForeignKey(Project, on_delete=models.CASCADE, related_name="projects", null=True, blank=True)
class Question(models.Model):
name = models.CharField(max_length=100)
form_link = models.ForeignKey(Form, on_delete=models.CASCADE, related_name='question', null=True, blank=True)
class Option(models.Model):
name = models.CharField(max_length=100)
question_link = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='option', null=True, blank=True)
class Answer(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=1)
# form = models.ForeignKey(Form, on_delete=models.CASCADE)
question_link = models.ForeignKey(Question, on_delete=models.CASCADE, related_name="question")
answer = models.CharField(max_length=200)
|
999,738 | db937a15a6133b3f0aa17695d7d64ba6c7314f1e | no=int(input("enter no:"))
for x in range(2,no):
if no%x==0:
print(no,"is not prime number")
break
else:
print(no,"is prime number") |
999,739 | 9d69db29bb3a0085514e680182d08ea4de3577bd | from django.conf.urls import include, url
from . import views
from django.urls import path
urlpatterns = [
path('home', views.home, name='home'),
# path('student', views.studenthome),
# path('faculty', views.facultyhome),
# path('admin', views.adminhome),
path('loginValidate', views.loginvalidate, name='loginValidate'),
# Services:
path('createCourse', views.createCourse, name='createCourse'),
path('createStudent', views.createStudent, name='createStudent'),
path('createFaculty', views.createFaculty, name='createFaculty'),
path('loadInstructor', views.loadInstructor, name='loadInstructor'),
path('viewMyCourses', views.viewMyCourses, name='viewMyCourses'),
path('assignCourseToStudent', views.assignCourseToStudent, name='assignCourseToStudent'),
path('assignTAToCourse', views.assignTAToCourse, name='assignTAToCourse'),
path('viewStudents', views.viewStudents, name='viewStudents'),
path('viewFaculties', views.viewFaculties, name='viewFaculties'),
path('viewAllCourses', views.viewAllCourses, name='viewAllCourses'),
]
|
999,740 | 1b42d4424827fde9b3ceabfe30ba959459f8a8fd | import argparse
import random
import jsonlines
import os
def main(args):
for arg in vars(args):
print(arg, getattr(args, arg))
random.seed(args.random_seed)
os.makedirs(args.output_dir, exist_ok=True)
lines_len = 0
with jsonlines.open(args.train_file) as f:
src_docs = []
tgt_docs = []
for line in f.iter():
lines_len += 1
sentences = []
for sentence in line['article_original']:
sentences.append(sentence)
tgt_docs.append(line['abstractive'].replace('\n', '') + "\n")
src_docs.append(" ".join(sentences).replace('\n', '') + "\n")
print("lines", lines_len)
docs = list(zip(src_docs, tgt_docs))
random.shuffle(docs)
val_test_cnt = round(len(docs) * args.val_test_ratio)
print(val_test_cnt)
val_test_docs = docs[:val_test_cnt]
train_docs = docs[val_test_cnt:]
val_docs = val_test_docs[:len(val_test_docs) // 2]
test_docs = val_test_docs[len(val_test_docs) // 2:]
train_source, train_target = zip(*train_docs)
val_source, val_target = zip(*val_docs)
test_source, test_target = zip(*test_docs)
print("train", len(train_source))
print("val", len(val_source))
print("test", len(test_source))
total_source, total_target = zip(*docs)
with open(os.path.join(args.output_dir, "train.source.total"), "w+") as f:
f.writelines(total_source)
with open(os.path.join(args.output_dir, "train.target.total"), "w+") as f:
f.writelines(total_target)
with open(os.path.join(args.output_dir, "train.source"), "w+") as f:
f.writelines(train_source)
with open(os.path.join(args.output_dir, "train.target"), "w+") as f:
f.writelines(train_target)
with open(os.path.join(args.output_dir, "val.source"), "w+") as f:
f.writelines(val_source)
with open(os.path.join(args.output_dir, "val.target"), "w+") as f:
f.writelines(val_target)
with open(os.path.join(args.output_dir, "test.source"), "w+") as f:
f.writelines(test_source)
with open(os.path.join(args.output_dir, "test.target"), "w+") as f:
f.writelines(test_target)
print("done")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', type=str,
default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/train.jsonl')
parser.add_argument('--output_dir', type=str,
default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/preprocessed')
parser.add_argument('--random_seed', type=int, default=1)
parser.add_argument('--val_test_ratio', type=float, default=0.2)
main(parser.parse_args())
|
999,741 | 3abfff18f1abfeef775133dddaaba0784ad1d496 | from django.db import models
class Codec(models.Model):
name = models.CharField(max_length=256, null=False, blank=False)
def __str__(self):
return u'Codec :: ' + \
self.name
class Meta:
verbose_name = 'Codec'
verbose_name_plural = 'Codecs'
class Specification(models.Model):
name = models.CharField(max_length=256, null=False, blank=False)
def __str__(self):
return u'Specification :: ' + \
self.name
class Meta:
verbose_name = 'Specification'
verbose_name_plural = 'Specifications'
class ScanType(models.Model):
name = models.CharField(max_length=256, null=False, blank=False)
def __str__(self):
return u'Scan Type :: ' + \
self.name
class Meta:
verbose_name = 'Scan Type'
verbose_name_plural = 'Scan Types'
class EntityType(models.Model):
name = models.CharField(max_length=256, null=False)
def __str__(self):
return u"Entity Type :: " + \
self.name
class Meta:
verbose_name = "Entity Type"
verbose_name_plural = "Entity Types"
class Endpoint(models.Model):
name = models.CharField(max_length=256, null=False)
def __str__(self):
return u"Endpoint " + \
u' :: ' + \
self.name
class Meta:
verbose_name = "Endpoint"
verbose_name_plural = "Endpoints"
class ImageType(models.Model):
name = models.CharField(max_length=256, null=False)
def __str__(self):
return u'Image Type :: ' + \
self.name
class Meta:
verbose_name = 'Image Type'
verbose_name_plural = 'Image Types'
class Platform(models.Model):
name = models.CharField(max_length=256, null=False)
def __str__(self):
return u"Platform " + \
u" :: " + \
self.name
class Meta:
verbose_name = "Platform"
verbose_name_plural = "Platforms"
class TechnicalVideoRequirements(models.Model):
codec = models.ForeignKey(Codec, blank=True, null=True)
specification = models.ForeignKey(Specification, blank=True, null=True)
scan_type = models.ForeignKey(ScanType, blank=True, null=True)
video_average_bitrate = models.FloatField(blank=True, null=True)
frame_rate = models.FloatField(blank=True, null=True)
audio_sample_rate = models.IntegerField(blank=True, null=True)
audio_bit_depth = models.IntegerField(blank=True, null=True)
audio_channels = models.IntegerField(blank=True, null=True)
audio_channel_configuration = models.CharField(max_length=256, blank=True, null=True)
x_pixels_per_inch = models.IntegerField(blank=True, null=True)
y_pixels_per_inch = models.IntegerField(blank=True, null=True)
audio_track_configuration = models.CharField(max_length=256, null=True, blank=True)
description = models.CharField(max_length=256, null=False)
def __str__(self):
return u"TVR :: " + \
self.description
class Meta:
verbose_name = "Technical Video Requirement"
verbose_name_plural = "Technical Video Requirements"
class StaticImageRequirements(models.Model):
image_type = models.ForeignKey(ImageType, null=True, blank=True)
image_content_relationship = models.CharField(max_length=256, blank=True, null=True)
description = models.CharField(max_length=256, null=False)
def __str__(self):
return u"SIR :: " + \
self.description
class Meta:
verbose_name = "Static Image Requirement"
verbose_name_plural = "Static Image Requirements"
class MetadataSchema(models.Model):
schema = models.CharField(max_length=256, null=False)
xform = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return u"Metadata Schema :: " + \
self.schema
class Meta:
verbose_name = "Metadata Schema"
verbose_name_plural = "Metadata Schemas"
class PackageStructure(models.Model):
structure_template = models.CharField(max_length=256, null=False)
def __str__(self):
return u"Package Structure :: " + \
self.structure_template
class Meta:
verbose_name = "Package Structure"
verbose_name_plural = "Package Structures"
class DeliveryProcess(models.Model):
bpmn = models.CharField(max_length=256, null=False)
delivery_location = models.CharField(max_length=1024, null=True, blank=True)
delivery_method = models.CharField(max_length=1024, null=True, blank=True)
receipt_acknowledgement = models.BooleanField(default=False)
def __str__(self):
return u"Delivery Process :: " + \
self.bpmn
class Meta:
verbose_name = "Delivery Process"
verbose_name_plural = "Delivery Processes"
class DeliveryContext(models.Model):
endpoint = models.ForeignKey(Endpoint)
platform = models.ForeignKey(Platform)
entity_type = models.ForeignKey(EntityType)
technical_requirements = models.ForeignKey(TechnicalVideoRequirements)
image_requirements = models.ForeignKey(StaticImageRequirements)
metadata_schema = models.ForeignKey(MetadataSchema)
package_structure = models.ForeignKey(PackageStructure)
delivery_process = models.ForeignKey(DeliveryProcess)
def __str__(self):
return u"Delivery Context ID " + \
str(self.id)
class Meta:
verbose_name = "Delivery Context"
verbose_name_plural = "Delivery Contexts"
|
999,742 | db22da6b09272dd1c3bfe5fcb79a7566c2a1e72c | import numpy as np
import matplotlib.pyplot as plt
import random, time, math
# Part c
def flip_coins(flips,p):
myFlips = np.random.choice(2,flips,p=[(1-p),p])
return sum(myFlips)
fig1 = plt.figure(1)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig1.add_subplot(2,2,i)
heads = flip_coins(flips,.7)
plt.bar([0,1],[flips-heads,heads])
plt.title("Part c: k=%d"% flips)
plt.xlabel("[Tails, Heads]")
plt.ylabel("Number of Heads or Tails")
plt.tight_layout()
plt.show()
# Part d
def runs(flips):
RUNS = 1000
return [flip_coins(flips,.7) for _ in range(RUNS)]
fig2 = plt.figure(2)
for flips, i in zip([10,100,4000], range(1,4)):
fig2.add_subplot(3,1,i)
plt.hist(runs(flips))
plt.title("Part d: k=%d" % flips)
plt.xlabel("Number of Heads for Run")
plt.ylabel("Frequency")
plt.tight_layout()
plt.show()
# Part e
def plot_fractions(flips, p):
m = 1000
myFlips = [flip_coins(k,.7)/k for k in range(1,flips+1)]
return myFlips
fig3 = plt.figure(3)
for flips, i in zip([10,100,4000], range(1,4)):
fig3.add_subplot(3,1,i)
plt.plot(plot_fractions(flips, .7))
plt.title("Part e: k=%d" % flips)
plt.xlabel("Number of Heads for Run")
plt.ylabel("Frequency")
plt.tight_layout()
plt.show()
# Part f
##plt.figure(4)
def q_curve_norm(k,p):
RUNS = 1000
def head():
norm_heads = flip_coins(k,p) / float(k)
norm_heads = p + (norm_heads - p) * math.sqrt(k)
return norm_heads
heads = [head() for _ in range(RUNS)]
heads.sort()
plt.plot(heads, list(range(1, RUNS + 1)))
for k in [10, 50, 100, 500, 2000]:
q_curve_norm(k,.7)
plt.title("Part f: Normalized Plot")
plt.xlabel("All Tails to All Heads")
plt.ylabel("Number of Trials")
plt.show()
# Part g
def hist_norm(k,p):
RUNS = 1000
return [(flip_coins(k,p)-(k*p))/math.sqrt(k) for _ in range(RUNS)]
fig5 = plt.figure(5)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig5.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.7))
plt.title("Part g: Normalized, k=%d" % flips)
plt.tight_layout()
plt.show()
#Part h
fig6 = plt.figure(6)
fig6.add_subplot(3,2,1)
for p, i in zip([.9,.6,.5,.4,.3],range(1,6)):
fig6.add_subplot(3,2,i)
for k in [10, 50, 100, 500, 2000]:
q_curve_norm(k,p)
plt.title("Part h: p=%s" % str(p))
plt.tight_layout()
plt.show()
fig7 = plt.figure(7)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig7.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.9))
plt.title("Part h: Normalized, k=%d p=.9" % flips)
plt.tight_layout()
plt.show()
fig8 = plt.figure(8)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig8.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.6))
plt.title("Part h: Normalized, k=%d p=.6" % flips)
plt.tight_layout()
plt.show()
fig9 = plt.figure(9)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig9.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.5))
plt.title("Part h: Normalized, k=%d p=.5" % flips)
plt.tight_layout()
plt.show()
fig10 = plt.figure(10)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig10.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.4))
plt.title("Part h: Normalized, k=%d p=.4" % flips)
plt.tight_layout()
plt.show()
fig11 = plt.figure(11)
for flips, i in zip([10,100,1000,4000], range(1,5)):
fig11.add_subplot(2,2,i)
plt.hist(hist_norm(flips,.3))
plt.title("Part h: Normalized, k=%d p=.3" % flips)
plt.tight_layout()
plt.show()
# Part i
|
999,743 | 883abf800a1d62a38fcec9d6dee3208d06392761 | # coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for models.py
"""
# Standard packages
import pytest
import numpy as np
from sklearn.ensemble import RandomForestRegressor
# Keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# Own modules
from atom import ATOMClassifier, ATOMRegressor
from atom.models import MODEL_LIST
from atom.utils import ONLY_CLASS, ONLY_REG
from .utils import X_bin, y_bin, X_class2, y_class2, X_reg, y_reg, mnist
# Variables ======================================================== >>
binary = [m for m in MODEL_LIST.keys() if m not in ["CatNB"] + ONLY_REG]
multiclass = [m for m in MODEL_LIST.keys() if m not in ["CatNB", "CatB"] + ONLY_REG]
regression = [m for m in MODEL_LIST.keys() if m not in ONLY_CLASS]
# Functions ======================================================= >>
def neural_network():
"""Create a convolutional neural network in Keras."""
model = Sequential()
model.add(Conv2D(64, kernel_size=3, activation="relu", input_shape=(28, 28, 1)))
model.add(Conv2D(64, kernel_size=3, activation="relu"))
model.add(Flatten())
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='adam', loss='categorical_crossentropy')
return model
# Test custom models =============================================== >>
@pytest.mark.parametrize("model", [RandomForestRegressor, RandomForestRegressor()])
def test_custom_models(model):
"""Assert that ATOM works with custom models."""
atom = ATOMRegressor(X_reg, y_reg, random_state=1)
atom.run(models=model, n_calls=2, n_initial_points=1)
assert atom.rfr.fullname == "RandomForestRegressor"
assert atom.rfr.estimator.get_params()["random_state"] == 1
def test_deep_learning_models():
"""Assert that ATOM works with deep learning models."""
atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)
pytest.raises(PermissionError, atom.clean)
atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))
# Test predefined models =========================================== >>
@pytest.mark.parametrize("model", binary)
def test_models_binary(model):
"""Assert that all models work with binary classification."""
atom = ATOMClassifier(X_bin, y_bin, test_size=0.24, random_state=1)
atom.run(
models=model,
metric="auc",
n_calls=2,
n_initial_points=1,
bo_params={"base_estimator": "rf", "cv": 1},
)
assert not atom.errors # Assert that the model ran without errors
assert hasattr(atom, model) # Assert that the model is an attr of the trainer
@pytest.mark.parametrize("model", multiclass)
def test_models_multiclass(model):
"""Assert that all models work with multiclass classification."""
atom = ATOMClassifier(X_class2, y_class2, test_size=0.24, random_state=1)
atom.run(
models=model,
metric="f1_micro",
n_calls=2,
n_initial_points=1,
bo_params={"base_estimator": "rf", "cv": 1},
)
assert not atom.errors
assert hasattr(atom, model)
@pytest.mark.parametrize("model", regression)
def test_models_regression(model):
"""Assert that all models work with regression."""
atom = ATOMRegressor(X_reg, y_reg, test_size=0.24, random_state=1)
atom.run(
models=model,
metric="neg_mean_absolute_error",
n_calls=2,
n_initial_points=1,
bo_params={"base_estimator": "gbrt", "cv": 1},
)
assert not atom.errors
assert hasattr(atom, model)
def test_CatNB():
"""Assert that the CatNB model works. Separated because of special dataset."""
X = np.random.randint(5, size=(100, 100))
y = np.random.randint(2, size=100)
atom = ATOMClassifier(X, y, random_state=1)
atom.run(models="CatNB", n_calls=2, n_initial_points=1)
assert not atom.errors
assert hasattr(atom, "CatNB")
|
999,744 | 55371041e1e1724a2077e926a123ebb3706bf4cc | class Solution(object):
def dfs(self, source, target, startindex, result, resultcollection):
# inplement by in-place to save the space complexity to O(1)
if target < 0:
return
elif target == 0:
# assume the target equal to zero reprecenting find all numbers of the sum equal to target
result.append(resultcollection)
for searchindex in range(startindex, len(source), 1):
if source[searchindex] > target:
return
elif searchindex > startindex and source[searchindex] == source[searchindex-1]:
# avoid the duplicate result
continue
self.dfs(source, target - source[searchindex], searchindex + 1, result, resultcollection + [source[searchindex]])
def combinationSum(self, candidates, target):
# corner case
if not candidates:
return
# the source must be sorted
candidates.sort()
# operation variable
result = []
self.dfs(candidates, target, 0, result, [])
return result
if __name__ == '__main__' :
source = [10,1,2,7,6,1,5]
target = 8
result = Solution().combinationSum(source, target)
print(result) |
999,745 | d23275ad8936fb32c3950c804cfd0b6e998d1290 | import re
import subprocess, shlex
def execute_cmd(cmd_str):
call_params = shlex.split(cmd_str)
subprocess.call(call_params, stderr=subprocess.STDOUT)
def gen_cookie_map(cstr):
cmap = {}
for pair in cstr.split(';'):
(key, value) = pair.split('=', 1)
cmap[key.strip()] = value.strip()
return cmap
# work for youtube video
def gen_base_fname(title):
fname = re.sub(r'[^A-Z^a-z^0-9^]',r' ', title)
fname = re.sub(' +','_', fname.strip())
return fname
def extract_speaker_info(speaker_str):
(name, corp) = speaker_str.rsplit(',', 1)
return (name.strip(), corp.strip())
# work for vimeo video
def extract_basename(video_link):
basename = ''
l = re.findall('.*/(\d*).mp4.*', video_link)
if len(l) > 0:
basename = l[0]
return basename
def isYoutubeVideo(link):
import re
regex = r'.*(youtube\.com|youtu\.be).*'
return re.match(regex, link)
def isVimeoVideo(link):
import re
regex = r'.*player\.vimeo\.com.*'
return re.match(regex, link)
def repairSlideLink(link):
import re
regex = r'^\/\/.*' # missing 'https:'
link_ok= link
if re.match(regex, link):
link_ok = "https:%s" % (link)
return link_ok
|
999,746 | 24a585d3dd4b8d36e8ef8928a01ad2a124332857 | __code__='Gau^c$ZmB?GpZXsY(!+m!:_=#Z3V\'gRfXajU*PU#Bi07KZmc8-.8_]jL)[W+YT&R>TW1783n97#."9--k`"PH]UfP!a^cbT8S0<=aZ<a(f%FT>H(Sn2PO\\?YZ[$C_1n4$t0q6F$B;5@>s&N,Y5fiZ[S2i),eQ\'*H]lbFsrSR@aJ#IJfQ_D0,kg-l@.uI/>7ArPRW[jO?Xm!3ri`WH\'[dfk3%$>d?&IBhiWS=:(91aGrXJT%HG\'YHEB3)<]QL^s?S5ASo-Z/%WCp2[<^BM6b^O:8Is7Y4GoUE#O=W+FKpcFo/i6%-E`:qsMnB-PMZ8-0UY3`RjY3`LhXc1qC/,S`4.gPDC.gPHo/!b%S*3p*WN8*ACL[Du08*hHN\'DBAe;>]S"1&Dg>PH,a3.G@5)n#;m\\H,P/BA^[/T3GG*b\'kdHYlD?sUoY.K\\;kbE2kkEJ.61fp]HXr"Tar(`mS(`;@RreiaO#[;`Vostr3;)s\'1U-P*(jr"1:\\*<Qh*!\'=`J>?GHc80\'-ME?"intVOp/thW:X^^5\'&tg+j-uKc:)k$>W;_!t$#u2YVo:B1S:8eBU,YHR(W7Ql];``E3(YDk;Mf&)P^&GL>bokJi\\=&RN)H8_YD^lhn^ge<OuO\\XT_FelPN"%f]ct0hS/"iKS#\\k-4XWE?cA?bme+7$PS>Z,5`5fh#nSUsEO*F-!Um1UOc-M8?<:_F$l#iHZ(F\\CN\\3_WG_NYr.$1XO1Sk?[E4t5>5hFi(!i`"jJG_,;>-b`6tL_o=DLA8-C(%7p0S)Ols6enKL>N1rnM8&SEphk(U]L"&#Mt[UK;JqC\'Lf`j8(%FHH2=WtYo6t[dHq9Lcc5*#ao4!:=4s7R@3+A$<?57r[/;ZH1pR:lY.#SGgI?8&d\\N74j+o=X,6LCq5\'l*K#o-%jk8^p/!lCSln@Z>gQ`:jR6pf\'kAc\\ZdaQgPi5:Gh:kU=^#OqHX`kB&=2OB[M62HrtbXrDpj,`-9SsZM:A(F]QHCZ?BT!T68;G$:1)/;^m0FoBCtWWk[s_enSp@nZ50g^1,>n^1o6cX,],Qh7RdIneU$OW^!u\\;%DpWM`7`(DI._Yb1)n_PXV]k2V_2Ia_<+phEnb8Zd?BteS*4iki+#!9\']Lkl:mN/J#VQk?G1[:&N?6!/$<*#B8V!s2)eSeXZ1BCmEFA@8mSN"EM&_i>?`a\\jdR>hg_A;ZQa`Q*X%pQV9.(gRH=H0EduShC_P:PfUZg1\'P.ttVEP(\\;<]=99b)W0,)I65j5IVC(IP5?8YH1X^]Q1^::>U-Kq79\':S@eW\\D81ok("Kb\'.MbI$!I"pJg>.on/@;`VNdoN%bI+0^0=E\\/nEbnuG]_hA?Zg+)V4*cmSW[Zak<t_-gNVFrWoVEhr:4-kSLf&V]reJo-e]HI8pT5jR6I,jfDO*Bf1o5Ddr6)VM6](PnMn4=8e9<T]f,2B=3>d%<qH5eD1TiU^3aX*Q)Z]Cb6uKn#=YVOeF%_+>BL_Z)cT-%C:QWY2r2&PU<iHiDpb)$Mfj5T[S@o_-d+@c"j^=J]-\\:\\FpUP2,[GdD:jdCaEsDbXMl-7M9erX2PVsR2oOJ6P:B\\h;3^eH>,u>^<ctD\'EPo<JGXLYg[4h4/raQ%5e_CJdtV/"Nm7C/O6Hf_2%M0U-i;`G/-iO!am@QX1%WOhLO\'X\\CB3_UPn)]^nXe!T(M*T&i/k%D1k)?+9ImR_a$(AaH^52f(s4l"K\'?LQ`HGCiFGpN"aQI5u6#3p@\\S6]^2(6u,MpL(:dBOR`[0SN@^5W2iu].QuD>.td`\\55JWn=5R]VA?WL1an8i*HSgGPDpg5/Ll2dfjo<Z;5qh"l\\9pq4^n<pdE"C[U`f\'F\'>\'_uXcJ"+X@,A>CT=<<OhboNMV&%rdHeN5s/N*KmRX.WO\'iQ_MeU7K8FLA6^D7.qqrO+cE<Yt/QWQ.auIccJ7TDFl$Y;,Xsp$79Mh$!""ECk\':6Dkom"jr#T06RF2VB?UJ_\\R9m>;YtVl2s?U&!R<6Mk5S[8ebo@G>H]$62(]@qmnI9%DIBBBna;G3CN]-r=+(F_YYXUP%\\Qh-[eph"6.6g\'Tq2#0.X5c\\+DrWPC=uX!^4n!E`i-4HmuD$D5s)*WiT3<.t1Z4WC9u:o758fo=5_T\'B5_kDlM3(^.nX@V-[j\'pDA*Y/*idFbr\\sSd/jZ^N_N;D>r_O)3o?G7o.8C,^/OW7hE.RGNof&=C0#2"A_qE(Ot_::pY7kGHF#l\'^Vj4hZYcI((q#o"266AU=tTU[7r>s)eCnMKPph?*nL\\sLP6q9[;?1;#SEf+h>g[LDi;Ir4fLC;F^YW#7%j?Z"Rc>!"GDM^F,\'(2Hknu`2[!m-c:!p_-Q;UG"80(dfg$T`si$>$7D5ruN5ZpjnB0,YJ=^CWDgfNQ!;Hcs8QiUB*WgqNaV5;S07HIjLql&;*U2hI2dXB8l\\K=[P7cQ(h9k.<LT!o?2JNg*@&t%LQ\'UnKD:l161h%,6YbkR1^lcZ1&-(jU%U@ps*.8lE$]-9u2UYIWlGG8!C%oYJufq]N>#A,J8o7\\WkK(+%fTf:AC^1e\'D0hK/g/*36$5qo]XK_h/1P<iYVV(;4b;3NE,7Zb0G$5*jLXmU$pUpnd\'7]RjVUa<+6M-s=9s%<q6BMK.W@V/?OU_a.$I!r[t]VdX%6#<+Xe3*\'le5VQ=Z)RD,)#E+mW&.;-HL7)<+hJ-I.`/bJ#s4I9XK<-NEi]S.r\\%k/W#F2AAU?(J1KSI#\\i>&uo!M-"g=[8qh7@k0o!RY`h7Djb^qdR$a)uM]pc:%R@gQ]"Wb7g_)X=Pj"MM;g->MB0lE4u\'9D"J>n!(M>4!gW_21f*q>@6km<S92j_%4Jc38Su3h[0%*?#VHBZ#tTeHPl<P%/Dp[1\\f7gH[;FYN.r(0dd_,2?6`.G)/<L&d?D4@MS@]`;nDjWme*;s\';EYn+ki0*:F2B,!WU?3CWSGR;k[ipSkfsBo/#>cK5dHoN@ZAf\\gIAY9U7PC&f.aC(.P/W2)K5(Z\'(KHKVVFX*A;+uTeHL9c$^Jn6M!t>*3lcO&\\CFgQPQO.Rpk\\KL-F^eR%ueC2"suYJ//X1+T3U]!>s0F"5p6:O[\'(7.$OrT&&JpP#Kicq6(i<%km!ge=u<hf4ON<p/KRUr":6Mhb6Tqu/>qrPmahQ6!C@mkC!"#f[BNb-Oeb&,#0E031BVJcD/&aP`iFZ6=h1`LG,(<0;NrunaolE^p:C-^5sjcCWG&5_N5uPkdG0f2,\\35Tma"fN1ALQkFlW(BZBCsD.#AepRhJD^>#0PI1<B`\'9\'[)bn2V<GJE3Z;=+dU<"j3Ui*_J"8-Ra,\'meW5Lg_!-$[.6MDcj=^aqH8bYcqB&<"EO+:qDeV0rh+I#Bb_P9iV,`#p<f-2hs\\#M8E:k;S]3\\R6a!\'m2;o&/pG,4Q5NinKKR8;6Z<$]-[FaYLkPMG?)Ld_a%!jii-DU`0IkWRVi%"@2"UIg2%Is$HNQ\'U9]<0Sc;X$IgXo?=#.]nK&&&b:P\\F(d@FXifSE?>\\aKO`lr.Kk#O_X6e.\\qo)la*m>BKEarUnj1"<Mim2NH\\7h>9sn-(Jq#sRg8Fc#a0;;IN*59@8kjY$=aQ_%Fc9Q$RrVj"mKY0\'(ijL.BO2(o]^D^/o=t=aqW-,ZlaN.p?s$+0^iBuC:1O)@=>i^Wc;E8IJK[$IM;VCV0B\'TXc*WZnfB69r+o\\O^bhofC@f$uu\\\':u8.]p?qAs%SVo+N0T!0XEh\\$Y+C&3+\'YHth-r^4dI./c0_b.`kN4AJU\'X"Qd(unmSufY\'>,9eoaB+_6:I>D2,f:-SO#NlROj]9dU%W$F?u%DFj/,`&B4ZXM\'oE-;=N8qM_jpgltoo,V?^t$L3g5F_Wj!poVasE;TbDeJJ5:LlF6Vk2ElDRd:M-C8\\Eo2-m<t^[5<olK!/mTRl$3S"hXe]_0h*3[$Rnc-AFaKgRTRriI++^*M)M(o(BIpH/<7\\@G9jRn(%/Z%uhT;Q-uANA7i=H2.*P)`-)XSi]H,[5;65/AH\'[(IFt8PUp8;>eG@Hg9n7_aJinqg83#@a`FPi:KXp@RqPiDnDq\\!o;..PU7;?0Q72PTqCVEG[*^L2^2iJW(M7Sj\\U!f#$Q#GIEjMl$3PLtm3*LI@9^4?@^q@-tK_Fe(iSbhj80jW3/sIf4rhAR9Y$\\?`Ui`/8FUurAG6Y;$9,0Q^%6aU,\'p_pZK5C^mC9*]UaUjd*hRZ>gENN1L\'ACR4%sBRO_e"Y[\'t1R\'TYD4RE8>/oCl)<L^_#7(D1TK^b*d/G*jNp[Y2=;uRGFMt.QHn2q!`C6HZqa0IuC<)mZ=^`OB>FX%AgiaRR(aEka\\W?>$eMWPaYJq__=?7F+tN`!IC@^p,U)C1tVnirX.p&^05iCEa*9b02QL4QXq0>h.^V*RZ=S>`+i*Xb(/bp!%cB<l32d;d^/U94*erMYMpo%Ct3M.-f&gd/%@;e\\j$/N@*QqSkn!BE_=KjkFA%*MHX3n!_R]sM\'"o#,r?fGpOV6p1<SJ3*Oen9fdCLCq9o6NH-B-fnW3P&Z[54Oam%H@@0d$\\b2^]rF7]&+-i[@dJFI.bDRGRU\'V+eW-@>RADgF("[@J]@-8]$;(gAM.9K_AO,<e_fs?Al?$/aNR\\\'bETdd8oCS8^FAI>W\'7\\Ho:\\f&@e1!-Rf<;cu4fFBte3SRcS=FY<DNEkH_eFG,[::6JEb^>:e6Z7ncrF6OAd2>HaS?pn=<jSV;;$h:Ff9bUsJ)<pNC0e>Js.]FMri!/<BGpK9Il%j9soVrkMEh_6oZFh8Tn5)O[C4[Q+7e;O&Le$?=-:0kKU1uPW^G1LY7b,h$\'g?I.&WEb\'t#D*$9d/Au1+J/eWEin]SbpCn2#%Wf\'s7-$o[CPbb\\X87H/stUn,T.DQP@A$"P`"J_I9!4>g=8G5JsTRr?8XE&Ql\\p2:CT$M<HBJ>15REJ,kE-s+tt(r/Q1#q/\'oAO7!p^(`_:G/*#0Z.1orq^`:s+oJ$2P)mr8oT<K)]/BXg%`L8ro8n]5-!P$#FK7X(KEjUGJ!m+q-Hf^sj.ii.(8>kQme"(]jo(8_,*_7.[C43k_69?!_(RAcEjC>MT^PC1L4RnfMZa[9OZ6QI%F6cG!N;ioGI#da0(bp0ho_F$cP-)-1$JR8p)q%Lmd/X$FdV9iKB=p=3C$]r2QYU*Jlr4\'&21(p8MY9]P#d[3a;Q@+ObO\\dL8NMqH.fRo4n779#j;PT:ik66/s\'o@<CXO6Zb&V6L=2"rd:PLq*pLEedb9CK>i[i:6%9XU(X,sc;1R&m-_Wf,([h-e[8;Be/Q[i\\%3FD_E4N`*"1<c:AW_.VbnI@`&l4@=e2,rR]hVY/3f34"\\P7rBe]0K_LjVpmIn0MB2R$)(*])*n7@5]K&I@iB8aoTuGi9Gu94mQT[3&/&[q<@Cf6X,RJfnc/@6f?TJb60U[q-&eE<*V9mjC\';YOB,\'-NT&(f5hs(B;?87d@pCi!R(bB&A_NZ^9"Hs159-f4"q/\'])/eXqt)+,PjT?\\sHJKTF[N)Y?fr)?L[<n;`^2O8fX^X*ehOd+DRg-bQ$Tj#3+EWVL5p2&h3Yl(cV,>=dpcq$nK&F+;iID0\\^(uR0%,QT/n,\'bp\\Cr(FZ%gE>jnYHZ.<7>G&\'BJA(SJ!B7qOVV!"bY=)UQdgh)@hM=H>[^C_qfH3q-1=n[HDI<KMl$f%kp<pNB=r^jo;Ah7&6duU\\LZdO&?KYVj585)Tu"!-*NkVm1&anBib"VNQ*[4([--dOVZ6HP\\BapWT>D=rMa18X"W-&5ftfGFW8s0\\!=rMI5`Cc)ic/&id$\\af?A)Kg=NQ\\I?\\5pEa\\kRe?6&aN6ihrTrf?5)YHQa_gq_iMI:t1Ok\'7mUDDt<Rauq][LBp#CJV1,7c49?eNVcf8dAN\\:dVo%VNhurBHA[9lX4ZcShDUu0OH*[e7_ed!fQ?>Ph%"-0X1KSb0VJGX?R)ob&G=q;HYVpLn7+?j0^#R27a+3j/[if%QCQ>]=l_3B3Rm9`ZnIC&WmN_TEZ<Ch7Z&uS>6(&MTDN_i@#eF^)X,2nEB.Q)3PVZ[e9l[WJ9p]a`J_&V6!4nYGFX*CE.1#`i4`\'6&sMds4.`Ij?t\\!)/^H(%Y]?.8oQ1<.^J\\q5=t.%jM(%(&:IO4Th#ho47+-No_kMVF8Qg8fRl+^V^2NgW*p/6Ps\\d9_)U.:hjp,RiV/!&IV;pI*p0A_Bgia"QXDC)./&Z&\'-*e1I7P_pWQFIhq+69QA5$Yn[`=$\\:laElgqO,T<VEkYraGgtE#eY(neDJ=mtW_oG6A62S\\-!\\<L,J;o2>ZcMajbg"Q?#XL`dC7DfEQP5,"W-ImRiJ;/qEK.DZ<>T4aE`m1Y?oT8qbLhW8UCHD",2`jidYT2<&48#Bi^-5]2n#p,ef:YegV#:RnQRMAGg>SFL8NqgX$6H2"H+MgI_SI>u4O&3HL#*7+l8?>Mu2OL.;EI_-0FaD2.Y0T"$&3_U.:fo)"fbb&C08!/n$r1]2GX+S@Q1T"T>!WJ3\\jr_o$#lW`InaC8"UU4YH-+lm$6Sgr$!WS;Zc]\\0lFML`P`5_c$P=F$/H:a;T1eRdQa&UO+ZZn:GZ%IQQ$hn^"][(f4(I(s=0\'c&_"ELPWeeF;>n>Z+M39YX@i[3UG!2H^5MgZ&0?,<7%^Blr64N9?qcOs+ZhD1W<pFlL5p?G1X`ium1hh4&(I8$o8uQNCm\\cN:b<f%+_P)KcEh+tL4D),G6CuZl25`aY\'O\'Zr/2,Uc#FTQ@L[1J<<5+86>[OmD=N6eXoC\\r&0/BFjaXh9bXK1aqKgi&d0-K(-Hb4A?i!%q^&G"m4P/V;X0Qu3j<>l;?27uN>(5ZoB,@N]-ifa3+p]P@J5al1X#7meidlA"[3o`V8<+;MV\\KWpJM?Jog[7uW/J0qh!:jk[scJal@BFp@>lPA4>#!<&i9"nMIlsP?mWJ^d<]aJntSf&>-oOBiRkF3td!c%,$ZE9[\\iV=hCO\'h30m*3:jqsClmZbMb)<kn:mSM/[Y$TpuIoF9gEem/Pd)%8gP:Hmce%&MNoJUYK7">8X&h!a;1`Yt.TM@>&kE#)*N\'%A/\'c@)7)/_!i\\ga[2_AO(:<<+Wnj+HG@F10K-oF<AE^n)=EJZCR;Y"bPo\\VLpVWaUj9a019WR<0H%W"`1!HK$m6SB.dN%eP9""pnh7o,(oFu>fLdZ#82p:21(Fj772(*o<lFTb:P!3/_9+3kdGaQ/eFVFXq@IoWo^*F+qZfgG.%c=fpN>EB^;L0[@LVl$4r/e_t@&9XsL[bl"Haq8X@EQmVtfB0%7]#F?:T)#5MMchD78(SDsMm-*gLU9[\'-f4)0<O#+Y+SfsH)B)!2h^?`pta;=[5%nLTdGI>@;]gF1[lC"q^(drp<h1HW4QmK0M1h<?t/;3u2YGIaj+kNr?#=d\\0K@=R"1W+U9_\'f4K;70q!-\\X1^=ZoDX77lG_-b`bR>5ok-cj7:4fI(`tS_HF1Pq0?1O-%r9N\\(-i"lJ5"@pkKI&bP;F1oPk(`XK84kk-Vd5OH:XcnnMLSYe:C$,:John286^Yhc0SY[2%6C8`"%USZV\'Pt)._?ctn6OJ31\\lB\'ju?]d5.[60;9Tit\'DMT_6&ptGs)"bg#I_s`,5P5T\'`#qMF3ju+Xl<MM34Xd,fs-<eY$ZN<ba-C$Tdp?O/BqZ0^Za[f&DrUs17-F\\/KqU%FpNNqA!YrC5B!WN:bfF(hR%G1Xi9oeB8-jG)uM+\'U3mc%\\M]M\'i,C1kWB.B;Qe\'=\\WgHi=T*"\'c\'KY^(2S#\'T0i>F8C$9S,N)NFsC"&NB8+[1Ut?]k%>dX&+nV;Gfd9\\]0AAA]oTa(oD\'1bXjQp90q:c1=8VCCp=L@o=>Gk/^QimVI6=eX=t7Zl^%0<"5-seJ:F1MBpBH"QB;c[_ilXl]L-5C6jm,@\'N5WYI1hlB`Mg\'irl?\\CE>Zg?;>iID<@UH(8GRCfY`TpTmpIua=t7E3M[HmeaZSo0mC]CT$9nN]efUrT]huu;&a8E.k+Q\'MIX=YTi%<ITpMp#4l#Lo4%qbtdMB*Uc<W^Oj&ZpYtUc(V#1NlE(k^h!=kGFN;3CpC)Ee*4!%6DrCR(FX1MB[WD`]2`_[Z"%gl_cf95[@-rYL)Y#kEWGV8F@H?8]@JC9/^Fu/n,0T22[5\\)=t2=mT]@,n:u[>k1>$M,4K:f9p;fAUED@aI](L\\_C@d#mf$-i[[>Febd`__btKi8B+o@-KMKrdfQ2sb\\J,HO9iR:YP.8Z&q^#M81ArARir4.LkARHiS@K]E<%Ii6B%V\\!cdbiRV!(:(ZQ^KTIeql#D6g9qGjQ@(CBo%[ZI7/>cbP4oWtfl$>n5AAc6KGCS*h`_&?;?.o[nOd.kK%MXu1J_<CkdDer8I5HBC`h`5o5^(L/Mn(uhH\'*NsTj/f)2@^7\'D*ej:DBFChQ)l`8r<o5ZR-oM<hM90*u$#<BH%8Y28Hd?$tuYK.$De[QPn@cdc/QWN)aKCNP\'`6liC]8UU*&_k=[`]/$3Oc!AY]6$lJB^Dal_,h-7-dWY@*o]8Tn]3l*5V*Fg*`Og+O/2[uI>=MGSr30s&\\?Z07buN:BRXTI5a]?.pY&b!5qju\\fK\'o0\'rb%(B\\[eu@:?RX\\dn\'`)h\'f*@7j*"![eJcH$D9RZD5p1BIk5Zi`InO1!=_"NSe<[+dD)QRg2@4;Lu9Q4CIo\\MFn^Qg!Z5l-%Z7cVqH;@J;`,t.iBTM:2BK;&n^ne6"cA#RW&PkLa6d\';O]/4.pIj\\mOKV`i+o5N^Zh=Z/s:W?jBXiAOA,?=Co)ck&-%D,VKmieJq;UTSY=OAV7*p\'aK%V!f?,rD#+8)Lg/M`sl!9;p@#_qn$b%@W0-_(`G]-`0`?+cG?pm0q9otmFrJ\\+gLi.;c,5Jc0Ot8b/%[_]_Y+;#pptV.&M_A=tgc7?.igO;9j?uI2s"g<4-_^QIj/]lT)KBY+p@/;uZj3@Zq5Vn0MKYFSJ[m^_f(Q\\b@Vo,PeP]Y;,NM2#;lENML<J)U[^m-AEHCsW3g35l;Jb,5^.\'nG9EqtmPZEU!JM7LIi,Nr%>.Qt%RaiW\\"BV#D>OoB`(pu\\B5_i^9H_":W"(8KuB6\'5ef@k)<G/-q%D)pW_Q-K_)_Og;jS\'"5g(PV`iD2RTYTY26\\/X)b:cnkTYIYR,B]T;mq6"CEQZBb(,/HSR!)7?g,kqE?OT1gb(?V#$F:m0AA8gR"\'NE2K@VGZou>5JXhhH9#\'cJSD,^IP*2^6+Jkd?X\\_I3Th`K9hWjo/FrlH7t<gd\\!6*%&Z%N\'E[^m^`_"\\bFi,HZBQjUA@D>d>c<UP\\ZJ^(rZDl?<rt@9T_r`UEpdXHnFm(*ej)dIVcn@LMKsC9;(>Nebb8rgE:NJkjYW8!>`]>>JikQM5K@mW#!Lbr+/YLum!M]>2PRZ\\TXR`[Jt[hoaOiEkK*%#u?dM<>L@<#QCa:FEd>!qD"b0MDZ9-6Vr4Si_mgBK-ZW&jH?.<&)\\GpoS,m+1Q/TYE$g76o)/$MWe?\\_>O#N:G/216rN.>JXg7HpFQohWs8f=(+S\'8r]e7q-ITSKpX&b)l7Q,c[^r4lKZPD/O1]C7A+-C)V&^QAD:MZR8EieTRCkTDC]1+*2T<U>60iBbobs-u#6En=\'4rnkGYD/AaJ[U\'YtOGc8O7[o;a<2V:*B?\'D,,D`/fYFu"/jF^YQB-GHj/Xeut@m-Y,R:$ah&rWEl6`s@5fYs)cog-(\\*K6c5o7p99?qr6b?)cnZ+E0JQB*U-oDINFQ"(,H%XWUBe<i>ImDO?\'QFK99d$?h`/^A.5e[42K@Zm#==*Ves>N&e"GTGc<`@<Ob74,AhhONb$qomQ,P+.+)C/XZGn=m)!b8hbO7p#!cCL<9W6?l-<*62&G7?8;rWXF6X=[EF$mOUt:"\\oj.pmqkZ-L`0-btDK1JW)O:Mk!e6\':)l(4$G?<(0RnU-4@mGs6CF8%\\<;+$MBi&B7KS2Z-=`TX*J9<P-H4SC!:+]F;.D#t:h0Ph5^OogJRFam(9/L5=!dHm_#Y-!2M;]Z++^U.Q9+7-sCpbN_114NJai"gUOPEIe/Np$%gr[I0>FT@#ZAt:m_jl)%>F/F47q4gMW3o`_p3O)4h#Ag,8]7&P"TGG=$pPS1Z.#b(UrHOKrJtHYqK\\Sd+]k*?B7*dQJuDquRO47O2X>HED(Qe@@f.ep]3ZG-Vgg+`JZYVCACdSAA6i/NA`sC^?gHuM-ab2Um\'P&jT=J(+G$(3ZC@S_A3(O$GGI>[[<%s&[D-ljOoLeGDoVUAMqg#\'jRepoVjtjtXfhXcIbi3P.n<Deg[EjU"I\'N2jVKE&O/]"Tj6o?\\7Zc"/k;BCJ]HuEMWfLfX(&^n"VGlu39RIW\\#r<9&rWE-TMRSAq5<!FtJ7*1,H-NU]o*FIph%*:^54,3A$E;=oerW&9LN/.@mE9UI?(^9mn?(1X36LuqWZ\\g1<_B4[:pM2JrrYO>)RXk>mgSai(iW>oi-WhB;/,?_m3-",e8*s[!bTGr7#D1F#.sU89Smh9AcWC+rG1B9&B9+4#_t3HV.\']7QER2pY2rBP:pZ\'+olCZ,Y(NB0b>^d<PhbYLlr3qrQA!Q,3a6;j!r*St2mC&)=,BPX`VaJEs*@R`3WDr$7Q0FO)b4bS,ZDjXoB>u_`KTga\\-Kj9`FGhjmp7F1e5U702L]T@Yn&M[U5R,VBT72%[cs))96+l)L?EJ17*Ff\'BjsAXLfA2ng(h@Qh\'H^G%%Ao^sf-(GcmU%BmXL\\sKqeG%I8=8;H_d;h1n<SDFGb;nh3Q<``&40doqe9;Yo*ERbXbt(Xl2*+4Gt2Zj@OFD7cd7$8lTb%@)>*o.[G.D#f#r$.H^7t&<\'(]\'e<qL0;X?r@()_:k,9HDud+ob*Vr*GsE=C5fq"p6<\'E(lc.p]`C11f]E@9RYkoK,$l?-T\'qS^TqTUQbg=42oK,H$UO,kPaB?h!LV<H9OmGdbV(a\'^0h4T&ot=^2X*qJ%0uTO16AT&X-2cJ29GNMj3h53%:^QA[!m3*egqP4ld#4L,267kFW<oI4j<k4mWSUM,RC5H/?`Hg*=lKV*%<uf>LVI/>\\;_Vo:[mc<b-:]m1F-;=/ejNp\\XYc`PjsSHG.0M^n2\\<c*apS*t(/WKa@b)\\X.hbZj=8h3)B.(rbJnVYB#+Hs#L\'+]S\\]<S7mZQWkuQUOFCp0,jO/1]qkNq1/DG3H(@7!&K"6kns,\'p*T,4$*7CAH!V6Qlp>?q]=3oc+QTr7ls`mJn(3`<#Q7Xac.fnXNHUV+j^sj&7S9Rp60OI*ou0AGn\\\'+?<%TcRg.>aq#e.=;5"*u4MiSsTO.QFYO"f_=0\\(cqWYsZG0K2LFN,$DJ0;?En#C<*frDQ=B\'FH^QWkb%!p9i7I?H(bbTh*f6=*5?N>Pc>>a(3OuN0^`3BI-KPp-k-K9p/^F\'UDFuhk.W^X,pRNgg)#P@m[`kV`#9Q!n/D2EF,)goE2,\\_s3#b%hfB\\dTbB04@R01<f^5\'nj\\LY?t/eGjo&N#&0rcSRi69?C$CLN&nquYPTU]iS##2+AruNZ8b,e0"!T1<%CS$CYmIroZSb9/>HT^K&&Gf2f"t7a-cg07e!grjHD+l;]<-H?j7cK?4M;aEntGnrDidINWU>2"0i64-GE$2$\'E;Wic>L9%q@8jN#HGO\'8>Bhd49kB!5b8:5>U/K&/:snXL).af+)a+b,1Gs_&FK[J))MRG]M\'[CiO0^s@@>2oO+"DHq%9p_\\D*(+ab\\AV8Y;)8Hr,YOa0mmMG;mG0f<s7MgaqjrDH9?rl7mOVSLN0*Nm$,1]!pY+;bghRNN)Vbj:^b8.D(0]nMd=W2lQ;rmf/46r^U;pf2f*^5RO&aF)E)\'F=OcXJ4?qng"Ut9WccsZ[%1B@HmuF.6e8(!XB$dV1_Tgi\\F:hZUZ=%Q0)0.)-sH1"+q)VrY=OAq)Ad$EHHEFUoEmPE`\'\'\\BAgI@2`Y1Ij]i*q=l9>1&[Iq*qhs$\'S9ZhCNk0MdOS`[/I.+_MOrO,j9pZYb*i\\M$lAf!e&i0"!R/Ta<Z=AHf[;N9mRSt4^-geWE0l2g`M]!P\'>8a@N_CSCDTb\'.*-%dlBQ0gJm!2!m!"qQ_0c%IDsZ][W[`Jc2$A.O6AYFbhuR/s,FbOKo[.[E`+&!5%`fZ_b>"(9JrT),TO;\\Q(,GPU/X*$5!kp46\'Nf-FCK\\Ts^\'XeHY`8C9/eo%l57=)0e0JM^?q=\'Xks;lg@iQcBORn%$KsFqC,Sqh>+;33Bu:^e(EDKV>coU6!\';.AB/mBgI%?^3I?krh%rZ6W8tj^SY]M9\\3.GH"f]\\\'I$jA69$<8f\\+nf6X[[$<USoor12@*ePV<EoQ#])`D[N8fh&]Gr[:-4VmF1DGg:t%NY\\9=l2S5roe*93!LIUJtKC"$BIDm9RS2p4,\\ub&\'L=o9oD:iee]s#..gZf6R!I^6(3?K/037G>4-tT2o>kB`Ug/%0,g$Q/-0^tYF([pC1[5EpQWFPRLJDUlZ^W2/=#&-g;l$Gc`cdr)\'Ln3sgK44ED^?3.)A(D_1qN-R=K0cb4mPs[NO>\'i_)nB@D2aEB9T&335n%`sub5Sohp`eN4+e8H#.gianFB]+qoqlDg#<5bB5rC)q#H>+X\\(1-&gM4lb26kmb>["hukk&Q3ia3VAo:T*u_="1fM1*4S+NIiWV*7b;3\\qB6,keWd<9D5H*Y5Zu.*#G&Xt%=+s&NLe3\'.@L1fM\'j)uEhEJh`k%KA<JZTt;XCS9PgrFnRCZ\',/\\scppWVW;-KJDYAWKn/3q[eTuW]hI7e\'PF\\/0`(c5`(>QLaFY%NrQK\'Vl`K"lu?[qt?[qCNgK$.F%FsCCSk.u`aKMAlU:3`_iJMhCDIWE;@-71(B]Qgi0fV,[;P.M]O[9LS0C1QLT%[V),162X\']-M`$(3thQ\'Cp"rL"RN%VGLDWGssde2iB2/(VPQr;a]*_eq7jQ7\'=AWY$JJq=kC3")`R&)T1C3=Ej;\\#JF&XC6Y\\";f7"CI&aZaM[$Z3C04C\'2-#J8Ofl.<,Zpj"KC"`k0K@?YMi_<eb-Xok1\'//Kmh7o,30_;U:M::mZ<ZlL)%\\?L.U/q:QEqL!+lL]iH[-\\!)pM6,Ohr7hVnRC[`^II=h4gk$?ZF>^a4`an_4"=?b54#o]Sd@&u5RN@rg6D>tbH(C*&4**!(*!h/,<JPjA956m\'pFO<Fs#O,o0C0*\\6m;TIm?)t%d+;/Z)B=-\'_/+W0l1W`Vqi?;"S]hm-.KF,.N4*JmZ3pWXri=Ad"L?.Z[.oC/2R4Z$0`e%Ui%k.@[71H=ak4c(<\\=M06c2ps*B\\!p[_4b3<#J[Yg#UqSnP#^&Pa-c7WqGU<-.KtVsC(ZPm)q"@eZ4mllo%!-bB4983(!+3Y].0msZ4<S)on:&SjbWCJq%N0H!Z#.D6;(rkRgA\\e:#&WHm@TQ<,kgL)%]GG"]uaH-MTL\\=U@7%D-%NJS<GT_%[TjN0.OY>PP"*3)<00Y>Dj`c%tZEG3[NOaHU<,D<sWFkDOVAiGK^rZF4\'58/sU2EI);fH]QK%65rsMMhQ_a]lW,SZ%#>r\'+!%u"Z]bZ"OQ>JM\'O\\WI2c&3]c/TiF]Dio<UQ"O#8\'%a,Y\'lG##)\\BEW(Z?ip25=LmNsJ?dtCk%OFM\'VXtauj;-bCTkhm#;[T#IH.2CATtHLtY!?p%>IbLhp2cq7P`sPD&4nU*5>golO=*o:V^!Q#<?>OC.!8n:0&Y-NETl45/d9o]K?\'*DVO(u7:QUN8?lA6bM*b-Yb0r-d8WS4%"/*7sBjnp&*g,@0>\'oINJF<^%D<kQ\'PhtnI:__u]k\\CXI:VTD!ARJ@[qE4uQ3MlXs/J[Uq/o53)WJsch50io#[/u=_^HW8<`q6E0BMT4/P?ZBm$CsAd`)[/C/ocQlLo_W,*H"N/k$LB,g!fkl`5IW&6VJoa^I*;oZ0!<:YMafF^(]6X!7l.X`B(hgj+OLKg"F0l^nO4V9Q\'pQ1MrZiZgoWRi4j<M=_p9NXgUK*$(`bT_uq%+XU1>."^2"DJ`grNAS[c66qKYO?nEk[esVt7D_T8f]s<-#BE#0:N-inZ\\BmmUA)[:r"!K,)5DT\\jE0fC14%+.K3h)n#BE8`@,IGjppEai:aQ/&SLSR"*EXM"gn7au)MA.[07&F=NPei;LpYq;eDh!D_gUM&9ZeP\\hYi)r*Oh0PVF?7g^"obZq7Mq:g6pS*-eE]qH@gF:.Z4RDJ^9Y\'\'rju9i@\\rXA,Yts"25J!1Jak\\n;j&Rk<=@d&c?:L,+mU89ZSP8E*`5sMgM$s:"9U`*Fs#gQV(R!HV.)Z_WO&otN2*Yu\\6m_;1]n_X/)b8<MHiM&hAJ)&0Y9LoFl)rq_XeWTNhUMQ\'MUq)WeK`W?ZUTr!GreSO7gP,D6,s>-m>J<Hkl3O9?4Iep4Z&Al\\oqtF%KiiT43",fJSDKA]&p[Phso7>EH:)OL/R3T:R<R`$BUrpk_7*,YA+Ga]d<9679!Jo18\'cA[*KMUdZEh&M$q_>3g0)6FiXdROi]W@<FF4@iqr%Ko@%fAr7ok$dp95_`Phr7PQ4"LbIe/(p$#tMGS`g"Ri.*pLD\'_f3*fEh:q0>a\\)T*`Vf-5k>qkeO]I<*BT+OAc/G5bZ-s*)lm?q[.iiZ>V$]*8-)Q7_#q]-DKgmR&^]^M5b6V,N+%HJ"W[&FD-Xn5i%hWMXrk&jhR*Z`;N<^+DHs<klfApec)V\\kf1U[V8MGVq9UUs#.mbrcH5^5UGinCJZG[/;L*^1U.jc3J:F_5r0hUh/2OQIdJ:6mB,V61Rp\'=]tlCseY(<j2\'^Z2t1V\'q/9s\\Y"nD93YVGbl\'smrUMSrdm!WnDsCW_XMLqGd-M`\\a6M$8Fk[m!b]j&#"F/Z::HWP;N>IX4(CKhpB]d+1FO(rORuES!$RXgOS!n-A>uKNJH-_+r-%_q_%6l!YghK$C<K[Aj_:Uc_@=Y)H.KLHOeiYADM^ZDU_W4,#_AV@dLaIf+!TW]!2+#h`@g\'Pp)T!6<:51$t1BHp=Qs$QbR\'!K\\VVfk73H0c!`m3Qck`OJm$=@mR*Dm\'oLV>dWb=\':f[RD\'mr?cUEa@!f9L+5@K!3A!gVrZ06lAg;<*RGY"=8u%ap_<(Aa7!BIgfE`W:Q5B_gVO+CPn?_9#dA5u``NkDB*-Y(WKdfi>-]rkA7>+<m8a3=R.emY9h6XRfR?s2+e9tPRHE.W-G0jXHq0M_gGPn+7!lRkDJu/hK%a?n?fo=+od-HQFP7%)^!$.n%MXHR0D3tsJE-i%\\eU/s/&jqY/m?MupfjB#hc"d"^+=c2hJqOWEQ`5rDg0I_6`-(r\'6?(hH7OIE>AZfl-/<7c0KP3GD\'Y3u=ff+BrS=\\6[6qqGTY5jrQ\']\'\'7Skq-F\\-9R&a818F/Q/KkpSi:#/>h:H:e8<C@CD5")iigo5c\\4q.Y.IEDDG+OoC*m:auZ>mE:GXdSWnQWHE\\W-$+%-h5+ojePtfrr9Fui#D\'lsRSl\\*;;EHr3rb2S]4J8Y,*cSrB9q+>Fr3NZYZc(K?gaV7"T@Tp8$0SC^C!05K"E:6oEAC^S&ZtMLV*Zf_6DFLD\\>nuOR0r,=0"8V[k^65aAJuG5sakrBS^#pmH(VWei7:a83J+f3\'dS:M#k#Ze]0]H[^AU=KCA_Dl7;ToDFFj@T14nLH:%0b1aC]KVs*[uHRuD]`W737;U"Z$-t4+rIZo1!hM:I<bI+sYC0BoI+"H3Kj]Y2*Ubs5(,0^d0?4QVTjV?,gbs^X32Qo3M-3$a3?s#n@.Wqf7anT+C4a:MgjN*;)d4;EE?Fb1Db%46BlrhrDbWL+8H"cuM_jF+pS&;g*h=7.TF/2PbXt&k\'r"/I\\*&[$*hkke+B31>?,`im:Fp7-?(,?Nj<?i5!UY^HV8idej]7/;]0&<b<iVmDEUp<UsT^3P+Vr^!RN-)hAA/cTL/;:(Q=M&48Y9r=sYQN)CCqWFH*@NVi5gdcG\'Un-i2"9/@Ot[PuLK!/^C9_if7@]O;REX9CLRZ^C9tFcrZm!SadnRtLVFFca1m25Tp;fNLHY\\W/Jf@(4e!I\\OA%!=<IS&4d!E(Be1.g\\JKT%Ue,%Hgb=PTq_=3.t3/drAp.?Zq)7/e_f,gX+A,hsIhR7<=d;+!G]4qiZC`^\'rqXQqT/.S)?\'p.gta>6cq4$GosXr\\SnkAnILcoQ4$DE1s4aRW%C3&R%`0,*F7>a1(rp`1+chgGI2])mZ6H/-F6>l,nZ:\'u_/(kJN\\=_s/g#47LEM</.[DLe\'HKQnDupJ>ml>iXZ\'5V1iL;Y3Y$U415&-1Cq[m\'`HO)ESuL:i*Cl;"#5I;qr/FtrA6kXIR"](K)9bCPYNm@mHKO#3K29NEo9RNg*oDA[M"QuiBECeTfD+pYso^\\<f%\\Mc.u%2=\'OF+)m8DiYV_uog2uCGi2"HgKY=8s>riQ=`a`aC^/8hnE1q.]([NYhCS#H[eR_tV]SC]L/XKQ;HAHQI1nrf1R\\=m=]>Sg]^<VLhBNs_GLphTDc7%LX$N)if5mHkMfFk5;/++;79Ju(+\'Bi_&fS#LW6],DH<uf=8Yo.1n],>7hjFQN:".Vc4gT;;*-&,a"dqUhW[C(^D`<b-el-oX3&arB2n3Da1e+U[G7!G?6e.93g^)"=2_*]=YND:YX_[M6I68:gVC\\J#W@Kdo\'a]6YL4CVa"iCJN\\:V6cOYI^H,>3[E-JR4LJL,_G%.n/n?"K6%.\'@,"\\qp9Dih[>;+XXm[`;^%e*(K)n<p"BQ..A8C6mVhu:YtHV,J=#M3K?Elmi(Zm[3CtKdS&Nj3\\.;]"&T0D4p\'"T/O9p9:0m@\\k^1q\\Jch_@U7mig1iTOJ`g!<E=%`Fe^mO?1=?Vf#^p>sK]2_@9^E_dm5Gi6M[l%D?ZdusYAR?92DVAf1_\'=ZX#gYB^?h;09b79gWC-fN-o!R\\od>V@YnS\'Y^o48dpg(N.Fl.\'QK46<`RZ[X3T-A+9Ci\'2$d,Oh4h-]M4EP7__+t\\lsR/5rOO/).\'Ku<9kU18MVaW`YeI5XcO5f+\\qn(*8"[\\lu6G+s+L+n4L1G*Be>=S)Yie;o5^]+@D\'<t0I_XU@TBll.Eo<;.8IJGnqudt/+RE84K!q:FMsLCFB#lsHW91(K,n$!PU\\Mk3\'1-"kh?-fJ>ccA+fG[pk_[fmN?`EZ06[(qE96G*+rPPe3N="aLlu2<_VhK:p*oddNH$S/dGtjW=NEbbb\'XK)OAd=n[,03r/u"%SAk4@V]>>Zt6E,TsL*84En^CTNQ\'+*O-8u^9IDVl5+lgIXZu"0=W&\\.Q!dS3\'ln5@,(uI)MOk<*odj=@ib`/.BTsg,nQ.eK:4#1B)[!o7P99&Fn0VLa@mHR!i!D3rb&K<g)AS!t!9XEoEPL4pbB:kcY^jH-j!$&HZ=qWm8]>,I_ji(18Rbn-i*-FBK=KRp;$!XA]^I]-D;[cl03NF.af=e8?<`b#&*D&>C"JPZU$Qq`_%Rl]r\'kE-I+_FV(\\p""<hLYKe*ST3@pdg0"C)l"^@to)*j^"dOrW30hU./)&ENkQ`c6.[p9<`*3]=1>`e"h%`^hWE?mS!Do$6NsS3:aL,N)m7S7jM+_(s+rp\'#^:Q6e[n<;I2H#[M2\'mU%f$]q\\"=pr8ul:)%Y/bReIcn"VTRBjB3ZnCJChJ+OEfc22QTQJ-<9cNU&mm^?V9KYN\\?=^6T56p_ou<VeUN<!Koc,ASM<PF-_lt3_Jd/D+Vm0hsB&=\'>UM[R;dZ27HcBR?!CND;]Q2;%,a.Q+sN-mMieEg%)T!@@+M%d7Dn:j7N=t6bB+POF*F"KR29dI3QL\\2j6m_->Hf4gSJQ9n_srZ9/$HVVmWZEmVE$8?\'"5Z*;fjZqT7Iiq7i3,3r^cU1*IQXF\'$LX[=c@0DFt<ng]BqH,a[hZ8`%3hn$FhaZ<W>hKPi&mTr#<B9@7#Q/IB,1eG(tM-p:6M37^nMp"hjmi,OuUD@1S_nYrVKUQI?EN"VFS,!-g>ZD3?]6:u!!t9l>hU1?&XrK9Z()!Zuh@`j=Ecf\'=e\\[K\'[W5&C1\\_r4_r.G\'FnQ4YaN)[,eZ#kS<\'4o5mV[\'0ts<VR!2+MAouHa7XVQAYc=^`Wqm=`lqqEh;RqB[\'A=g2R2De8j"9;0_Ro`Bo1;IgL@LE)qh\\=M$rYCBiFpe+QN49+oj^><AR`j&hNmK6iEc%=i_]_)t(XqkHFo#+tQ\\9p-m_>:B6`Y2Tc$J7"tN\\!>W+l_fAokh2bcl"?eFT?%\\<W6#2XjL>ocQbJe\'O&cU]h^.XqBh"QgQsP,tQ@:L"(hgGpRb)ep4c5eP]?lHbbIUs!$G,Q5dM)!:j/4H=K2YlrS-Em8iF<8.\\(!qJZZrE-&)m(fXK9Xan`\'1pDejdZ[7?^5S<[*(6OL6k,/a8(90FiNfiM%h"G8NSj(=Aa]A)m\\N2]d3$">DCgH9GnbO/#$Y!XYWF#5+4nLWq=Jh.mq-paWj2!cIs!Cjj_klu]k%6X$X5Gn!i@c9JnKZm7;;FV%T3LplajqLN2P+buo1;WO7GL$7Z\\4.O\'4`5Gp5GO4ie?1t\'+Dc&A@*%B%%\\32A_K`t[RMCtu%#,"nq*\'N<e-OPc?)fKm+crRGR?mK!h2q-%\'l>/t4Ks<CUqpH\'42XR%\\2^%SB^?`(X(jWl7A>6:11HUH/McDU2QCG\'Ij0a`A.31a>:A]Z>;VB8`/Ic_h;""n`4e#N24"e/0nkSqhjbT;IGW+*/)=$>.ajOp#4j]`n!ea"/7Q/8)T*s>!m:f%oQ8L9\'nk8N@TJIu-qVGV_ChT<&GbY^N.hqR0+f9L,":ZL)=?\\0c9^0d0K2*8[\\V?ZVt-TELCFg6+YO:*U<M$3d@EQCnep:O3XJT`/No2%h<\\&O,c2<BVp3TS42G1.$o"Z!-.p;@DR*uHrObgtE<%=[DB)!-lK3(-cd3D\\"/8?O$2%K3o1&lR9ZLSJR$MJ,*bna90V)fD_%)DqFtRo*a42rnna%[En(DK0<>mr$fDgAQM([*TPP6\\%%mB.\\K!^;=mJ6A.#!Q)SCkVu[p%u[i@3bAU(gj]tbr#X?526$m]Hl:qq*)ptZLfQ9F^#,3%@:jjjO5hI8\\6hc(mY(7%WebL=4/j?3\'7^0M>J8iiYkc9($0m!FN1\'i+iTN^/#?gtno2)b]2s\\cKb$2\\F4X/<ksimW=MX]UbHATdW`ncc_huk<_E,\'"c,k;Y(BX=k/+SkgGeZ]Pn(9.87QP)%#RP?h^?i\\Ebd3p]Detbuj&qZjY[`]3Wh&DCj?0;/5"^tJUiBEJhf=:=TE(uJSVX*Z1DFV!NldSs[4\'TP5XN21fL"EEgK<X*mDeRu?\\V`9/`-"-gN8%`_\\C&2&C6VVr&&IW;CaOm49UnEWJt/]-;>0*0G7!J#(ej,c77s!FLc%0$ZSh4:!=/UOs#5=A"7.K9D\\e@O:3>i/AiXGoYq/],Ne;+pWLL!iVVck%jA:iF\'JLp;>.WOgp\\+nkTV%</B;HM0SjQ//e]^NWB?FP<;&9kMX\'QX8%h[#*G$9%hOa\'ib/cRjQ+K`uRR<!=Q2ue\\5i9ku]VS@o3ufIW(uq\'?([<M$;*``0/=>=FLE7scUr%l.(5uSd1`1PKD\\Em\'`r7GWqM^9fXi>\\_G,F:jD/0NW3f9?.P,rbsAC$3g"o#kdn-;\\79u,XB9FEWH"9MW-J=DnrF+<E))uXc@#[OH8;P>lb_rcO+\'X+\'N&gdPc8^P4#J)F$ueD+]`Y`#N?PZBVQAi^!<\\4q=IG\\H9sClTm[g+;)cQ,Od!UtP33Rc33!_p0@jjrs\\-cJ\\6r0\'nY?AFt7+[]Fa`c%6p1n/^qT&Z\'X-LC<-urt);<\\V:$Z=K/a`%duW[bEq/"NT_b._!-T>kt)miI=(7-WP2?\'7PG2TC&6)iANTfO7=PE=Sl@alP26W+E0JL+\\iWB2knN59[+(\\s0\\7@eFh-p_%o4XH=;H2!)9s;e80U57(uA9@db+^kfjs@eqO/691CU[(<0Hb/P>VbdV??F@$H9h^#\'X$FQ35:-+_f0nd.bD@$f"N#3*$4^b/q2A@iG)0mnXT4861%h%&3MnDOoLHiRVSPjJ4W,"U,d(E,bh8H==nga7&m,5/\'_0]*-mQ,u`@LQcXbu+Pa!Cl]_M$d,LnZk0XUNSZ1MQptXS<[<oMS2b?TFE7$4I1,[454aaQ4B$POD7is]pVqBlA5DHW`mtp12_BUr\\8?Xn2!RM,n^`d[&#Z_UfZ(8NQK;qLs-qDNVa3b7\\bg"RAk>iV84c?jo(+2GPMFHlYBrCoUG\\^qSB5Q5%0At%s`WQR.-DLbhAm;of6#Q/<WOFW>__+"\\YS30]qX\'+nm\'<@GKY_(PTd`V%*.T#lj#82tJV.mOfS[H@;5?&q3epHqPeQLbS*XC?J/3qTg)(/2NXd[m;VM<SN8MWpN1gn!ZpH^:NKcR!ng7B*-^QXWSWG\\)K\\dkFm.Qu`rlct`*2PVahX"ga[E,\\7d@@C".s6\'r(pb?<\\`X8h:M!XVFJZ?`a.a_Z!GF`Ei<m^K<>D(E9nI6t>\\7)r<K_@A,9>=!Fc.(;),,G6F0+\'?H.Tu]0&3Fkf@CrP%19P"MdjFu<4gpGeYQn>/gXnC#>V(W*o:>+B>pThXnDfTA;42hk#lrP.Qm+Pjs<2\']A3^u/ali%1:t<D5R`oVjr-2o\\UbN[?8V!g^,B&"6sKrBf&tX$8X-\'OpdCH&WpidAfs=>8rb`pcAgVT#`g</fA>J"nlHZG9Sor.mT_VOq1gjeKUVLN\\hVrcH!"SHAL&ZYi#UG&lI$NrH,R:#AC7]Jq;6K%l@rEL-Jsq-TC<m!7!UU)9%G)sYR$@KaTp^+3-^fUl?OTco??"[GV[K*5UY-rbTc\'B+XHu%qY1BGb&OOOKPJstRDuS<f9TOmY18()rq/KaD_I1F-&#p3g_8N\\obX3).Fs.i*!AqTcp\'/.kAe%Pnn;k#7>"M<e9ue%LYbG*]A*7rF];Fo7&n*1$*6]ja7%\'2MeM=f3-c>p\':^4L(h$SjQTh%Xtb2bQ3+>PWg@+.^b,PNY&?lX2E4Q.?:B_%VO;8ddmiF"`k[5;gDe(gi4:P_Lg7^eQN$33$Hf4COs(91Y(_6io48(03Zs\'-agf[JVJG;i"&.Hoqo8O8tH8pkf"LM@1@=,)rJ:ecPWE0l\\n8VCQ9?88-0%\'?n64A-G[@hSb$\\3Y[Lag=smZrF@^\\Q4$QVj1,E`fktq<-#:0V7YS,$`,iV!>1=@gs62h,rNcgo`_=sIS.d=IIt@j@QuN?emTXR^I?YaI,hRu8G*D]*hOZ4l5eJ\\i.FJh6(`\\/h6SsYo^Q(n^Bde3]^giW`Y"4EfMB/,Iq=j)kc:4G:H64ShX(XCcoo6X%Kln3*mZ9]*q]12oOZ[X7/@>p4VJ_"I\\h4Xi*ATFS[5g6L8`>#:Js\\o5J8!?W8(Gd439^;OfAgh%W6<#i\'<j>GeZMPq=H`L;5Dcm7lUhdh484I`rg:bG6EhFruMGel9u"C?9<-5QHFC1.Q30G]I:\\>2uMec00:5Wm9=d>V^+CjidtB)1i/*1X\'77*qRah_.\\?=]NGI@i#la;SZ?+?3B04/#0kIk$RWkdZm099p^Sc1_FE>8MYPQgE%N:>JC=m-5h-;N>a$jP%ih:OAN^u1><:V(90Am=f;_Fk_TU\\1)7`.#NUPI;-VduIcY$m0T&rE:nO57L/\\iK!%BjP!LArj&KNf5W+`b[ViMsh*8Y%VK[&=0-TpkU-Z^)SS*&@Z$RqetWd/\'hWSNnF/-)=&Ij3-UJ!XEO1pXr[-$]]h4H>mPlQ4!!ST4!&De`[U4fAGCL@WdM$:(iA0Keu/>$SZ53)Sn\'\')D]H(S*7d<6K\'N?-&+`YWY\'r);*Hl7pe@iN,1?>+4B1E/<j./K80^Jl0hpb=?].sb?L/SJK(@kT1g47?=mN.o:PCnrDnpK>\\#.<*hl)mX=W<J#\\Nt*<@<:?\'uTs(F4\'h2D53KfW%$?<e3_muge(m0%;`HbE*a#=`t&"2mpJS!500=&3\'>J8/H@79I^1$E^Gg\\k[l;`lLY&qN[&OKlT%0L>$7gjN:DD>2=-3Bk1NU3dJ:D^XO(4*\'LYNkH8#CmkFeEZ5k#kfCEnD?^20Bu%"0\\!6Ae@=s!Ip6I_G*<ug3&KRbsE1!4P/baErF-Acp<C;X>E(Xq=M_%R!Y@(=][/5J$/9(Y>W\'Ec!8#Wlg-9D;ARSO/l,o\'sgENV\'jH!r=h6n8hu\'W6H.)rcKk%g[jr\'"Y"^^C8R*T,<h"HGY1hg+Ws8#su$H9M(cClT"<[\'H9R$ZHW-UnmK2;1X/;Wc7.U*jguhqKaaq/!dHnt2K>ea<upN.]W?kSW1=Y[nE$+o%9HP4UrTRi`XIklbf]$s,+;W5oY<:SG[Rc_-dC=-F\'UYKG&Ks.&5QQI/j+=eFnaTZX\\MEJiJ(Lh7+Jj6<8HGjU#i1^FHc4)_<[3b7)IStXINM);m$I,AS8^I4Cg^[GK7S>2ff*ShAHMI\\_Ytq,&GC2dMhFCi4\'%s.D7>&8Im%njVkSp3JdJ<BJqc/E.gMgMbaJ\'TXmKW>oaO2"*KOsf\'j73m<1O9raIsomOpctUl8u)d/DX,666`.OXb6&5]%0h\\-9(,HhPDpk$GWAos#n-T"VuGRa<>IC\'BnoTBOKDAfPD[;ie.@df#!5,OQIHcSJ2\'k.9TU_1\'=XQnkkh8,e\'5?#T]lpuFu_p-=MQdgeuVH,3tKFG0b2EX:pioRfa$/eL]>G8J2gaXDsq\'j-URCnY_P@%b=,-/Xm%OGEqP[YTorHq_pN[uLoO6rIHHjD&;m7(ssYAQ+j!-!qB7#+)U4Q?!XP60bMS00om1\\.<P=WAkmn2UmY)oaruZ!uf#7eNg36Z-N(M^1[2!:uQ<_]i;otL$GSo8OlifC.#4p4GkFGH$mJuSFDEj`5k$S^O#=q8+59G2q=V^`6T5U&jVW=pe*1VVU974KUJZZlC:K]G[2B=PR@@YjZtuqe>c^hlO-+.AMAKn=g)l0"$Tl<Q\'!>9M0JZX)5#6BMuP[3+t/8"Rj$_fG)]fi3<t"DDGE*42bi"M:s0q>6_ds[-`$_^45j(si,\\0-\\0RHkm=[00;rDmT1PX/@RRarEoEkXd8P$K_BIS\\Dk27l&l>H3mXoch2672UGVPs;RX<!+_KQXIdX+J7b$gF9M/&^[O*QEk"m?4.hI^/c3;jl==K<qPnUQu\\nY$@&\\kPZ#F\'50ZIS=8W^1BMf]\\TXFk.0L(8Tk+D/\\=:!U%%#T\'8]UWuHLYl/N7KE5o4W;2a_iYC,FDtZ+Vdc#2OBJsj@/)=JA+Y-bYZD/>sQD`^;MCI\\^Qe\\5k&(DA1*p0G`b9L!;`1BW#W[RM8U?)U;[GJ&oSQ.J0*0fY*F]&LG1Z\\VGt%KJ.2ak,P76SdT;V&$dgjUb*gu5U0Ku:Zt?/(qdm!*00FEO/l-sa:k?4DLh8h32-?H8(A^TEc=AI/)$?.hP066A]Q>uq,a2WfK01.j-NlAT:a_.iHTk`KR+`Fq)A7&%T>/YR*pI"baZCf0q/"RB$L)WMJo_^<W>1N-h4N[qL\'nLNDOWVPNEC^+;bAHuTT41*LFZg\'+P_-%]38NCXubPtNXJYPB^DNJR81)k!`2.e\']%]odb&7hUkP^hCPEntUs<\\!?VoS)W&Qgb*J2U62u%\'de..0`%`cs1l5S^M%7Kb3VFk@M[1$.phNO41KFbB)qq42\'9O>GQ2TV"sHE,N]P;Eg=i>*M6)BQ\\^+F1[ckSFH.H:c2k`)?8\'oAQNfa0jG9n1pa7r]Sm-1=nJVNU!9d"-%;/EeO?VP*c\'&D>S(pO%<?;A2"Q\'dMKWT,?D^cID5#_)$90>$.>(QI&UOX/c?<EjI6urcf.g16:D/Gbk_CLe6A[6/JD]C5LDb^Au+":oN+^\'@h4Q^f;6KoY/-W^\'?mHq<Wr%@lh!c/<A(OVX<A%>JQrmo.3Fe,!.=N56su[l>U>mu?`o^nN?W.E]:;5b*fhIPEml@$Sepg6A[IaC&!8NfgmXBP@B[$66A9le`?d\\JKHDR@95m)pfOPiGMIa51X,U*EG&=!W.BTbmk=WnYN>%h\\6!Pp1qn`/T<&nZ<FoD`8)NIb+i5Z(!>$ear/Cj;Ha#<G4E=LfKN8knJ3d/gF5iWPu-hQ#`VJSHhg8%[b*(/u>]=`\'9qWS1n(R#!k5O1tnku4LOA:+.K.PSU`c_3R9Fa*gA%sQ)%D<it/\\U1eY3tC5F:34>dId,"khU/6>48+7NasHoJZ#_!H20is-M5Qu54F?5_CQm=(fe,l]I;8>X-RKp"F_^Kk;bt\'WC2kPI.I(2@kLX[(*&?P<^b*S?:<9bMi$IN7)_N^I&/`0LDYMCu,=.tNpRi/=#$LW1%M7]_f.<GrM?Cm#(5F0(gPu]umtU1j%4@K<0%eQ:hQi<)&*n;OLP]A[N1VKFAh$-:Ft,]60@_2$Xmn]48<_<o.e?o0RIMeFpq4fkAG<_")^SljfV"N/^!RDlRag+:hFNBFl?/po;$K+c_AFP\'b<dsPY/tSB\\l[R>>a_l!\\OtUL;/cI/2N5N^#!h9gO(CAn"nfgD,[SqJI7m&;i>:TpO\\&Y\\%tiES=rd&f6@4/hVI"=MP@.tiSCS^E./cKfn!1U--(a^g,0G8qJicang\'N8$MqD&VO\\sr\\GE;NLdcG]H[:mL8BmkX,$X9Vf]iHs-2NmKM2PM,X(SP!I*Kc\\)G]/=lX<_f,8um_8M4R=CB]8dI8B3G9]g6Q_dji`!!f/&6lf3>En,)>ZqOKd6+\'YiDLa_A;4U??QA1BSBG2_X#)7`b%IedXo^?_%5PckWaNC^c,SHr0AGn9HK>kQ(EqGLOgV*p7c0g5\'J(Ai84UU&:>[0h9MJci-i)R!IR3#o)lUU%57PKg52V*stLikV^4HcKCZU/JpY=r@I)A%,aUbeM88Qf\\4gmm`W@PrMM]@iULooR45s%[ZX0(ebdlIN[d-Zs79M25R0Gcg)J)9[^Ttf5$9<gN\'8q!8ZeYH%_m3bt21Ubl#[(o*3l==7h4jK1P?[Bq*[kaB(.aUac(&TWMNa&EY_[G6ED94;%]WX<r"V"1%!25-pW\\Hb9&E[Pb>g$\'i)/`+>8@s6X\'8?2lC;+r"Mc=HE"Th;\'s"]0$!gGF@4&dFR&5>M>f$/dc&NJ/loe&b6:6NBgYOQoOZ9#Ls7l$6cNCk&-tt(].U-1Ke\'q*bD)[l>e@`2X9Yhj#\\djWhc10qNcW7N0\'D]+\'#T`4gDC0r.eK+H]En4g+`c^q=>Aas0=<&abWK@&H/HZ7`7!\'dHm%^7.J+\'](K&o(c@f`#>_u)Ka!6d^r%/fe<8k9^b%VAXG)Xf.SVi,s%!*f,/VO]>!hpj).lV`J@NZA;7ngncOPf0-#-HuT9&e60017<ADf/cP(.MJms(s./Wa9;7KCQ76R9Ef@dc5M]1ULM(QkoP9*-6ZYdOZ5FtCGD7qo^>Lh"DC)A\'r\'gQ&Io2]29F]PXY=aNo`Ed?ei:PM-F_KOjE**="J\'RbR$_>NRK;0GA<A+ko>H(g8dC2g;N#i)qGoYu::k*`S_[\\B5-ZRO$jXm]gBH.r@t74p^6b>&)R9%RX0-1O`Ip#6rWWnd,0:h:<T<L!fG&inp,c7FZ:+OOZbA4\'?d$I"dbi70X\\S3lP.UPB%3cYW*4gG1PBm5p3!\'r@0&j\\f(e%7U*[*\'(a^hoCq^Jr9o\\%LKlg(q;9DV62)IC)g#$7eFc>lQFHLlgtSs=N5fLY*0fu-C_-nd9&1(=+g]@t8JIQ[cRXu(3$eAH\\8>u`fZ4_P0e5h@%,BQ>WMZ4dhG<mLd,/\\oW2U(N"h-endOgZR\'21XNBDpV[^8ZBboQu82ibB\\$CglE2>U8ToFADi<Bks;cWO>LD,Esf_>KT&+_mk9<-eon]FBF5>a*YZRju`KXHT4-WHF9M&im?u](m>uK):k+\'>)`e<Di;,W7h4"gSfoQWF7C5u<Z8sPJ3\\Of;=4bsT\\Vc=F7p*hn]oObUTt1Tbn8e]e`P6LhoA]"R(M"q8FGK$&^7\'b&d"Z95J0q4*eUE9\'?s":*1RMS;Yc]?ECgds%\',(]p(m2\'"&iCsFac]g,Vue5Dj,h_h_j2/hCiVnaWE-Mjfe[,5C8kQFT9Rk]aA,@5erp`(bL8.)B1;>,^CG7N%1rbTA=(<p<CPh`I"/8X6[0I^GKHPIEB[qfK/6qUM_hIXiPAH=9[PW,pS=U=^`\\)BaII$)R^d9dV:-J`rh;g7[q&e.Hqm$ZDuA=5Z+).[\\KuG-eP<hko7L$>L]FM9pGk4MYkHdp+Cr9+E^7iV&JO,hUA$*$AhE0*Ut?TcGrUO7Ci:-.UuA9l);V0O5*bS6U?V._(pSMQ,,2+Pl;CW.4cpk)Lq&`i(V?)J?VJB,/k$,=bPD0oO--n-/\\!uYAKg&Fl*+hh<GaQc&FG+CA\\.pkR6s#!"/:51$cNTe;f:h3bBo@XdkG>#n;&Y5\\WEp0KS2JIcjWkHDpeUPs)D7E.FMa3V[?-AZ4.gOUM$]N6n/eVB(.\'JQP?i)/]:_qW^W\'=+>e9.Q=^:8+]hOM)4V<rHqIDi7#.q:kR(p+gRCL;1=:ot^X+F<<cS)^0Vh`lVkX.(So0d!,e.o`X96`je^nSg0M/KP3fJit*PkQ8K?7u3,57Y?7GjSF??IJ7mY9RtMPN-nrkUkERG#i/E?kFZGTV;M:;d?UK!Un3$?=]G2NSQ\'C^`^m<efNU@*OMuu$N[U1QXUL33M0Rg6nH`gr0CfAW7O(0.G2a7"0uOX:74Y[/\'4&0KMK9:u\\SGi1JRES8r.2/T+LdCIT&MjW<:UWI2^q5\\H4kel*tXsK>:Bg@]s$#$-sO(KC$NmulI<tOXYG-KWq+fnBsCHmM6R<^f&QCM7dR%\\[D27*@*F+0lUc\'rJIDg-hpXUtljjS^"/F#ij5M2%/Xt7A#,PJT;+,nh395FNE^4Nn\'B:iJoX,1S,$92EBYr`Z_60H?oG!_u&)K7X6K8?T[\'>qY58>TZg8kNYIp`k\'dlH@`^SNt$ZjtX#.b*#ms6M)SQ#faML`#P*O2qpi<29<Pgc7+:)Q+<jR/W+&mW6XFBN`0]3;Slf)AgEo7H/PG*`s%6&2_\'.8"d49Jr/DV?qnZnBH8CEp*dcUZ/A_q\\oPN-SD&0<C),p#TNjoX;h?2Os(kR3-(5(5L3Pm7MPEf*6?"oOQr`VoCR8sLh->Fuq5-;5ep^-_fRGhgSYlc`$_)IpkCdVjT22]PHAJ.?rFb>?S_lS\'lhQ\\$OT/#@12U&E0#<tO=oT+Ce&r_Os27AM3)R6G?PScdLhUbHj%U.6BuQX!EPZ^cnS0GQpdefJ2r2Tq;/ZDbQH=!!lol:LZLp?QHZG?FM"_Qu?bDBI[SJfq2gC"u*"O)&;`\'e=pcYe`;o-T;,dmC0kmhqq\\lT0[9S=c!OSdEJ\\TKKK1me>\\LL6F3?`;BI"=Ji$Bmf70Ym-$tS7"sNVUB02S\\C.@O(B>Ge8aC,;i_IF:O95%<uX)39TEr&7ikQP?[]\'GWn+H]&no9SPuisrWRbR]e#gXmFmH^t2lD8JK%%QrXA;[%Y)>fH9AJ)+J--^j)1)>X#fcTIh4G$LoDK=*ZVT$,;`Nj\'i9W"[GVM0NA+hb8c&,FJgN^\\eT78(F,9GSE14N\\6*h2mtlWiI$^Nh#JHJ9C>oe%_B1O*!jZoC4M$n/qPnJGQX4okL+_22#rs5nJ`1olmG&u$m0M9"LM\\%saaE7]=N_m-5i\\5"n.H+&cMRr&<b*Ts\'Ur5l?BJ]8:sEMe"*\'Qgui`XAn;G[qgt8*("?A#?fO"r/fu>9OqV,&)0ENY\'kp7$<B@\'.N&<qW^Wf%kY>)8q*SSgSVFLm*5\'WN\\D\\K`im3L0<O7#G)h2eL["C98\'SG8L8m4Z;,l<8D-Oc"l$>F:>oFl3#>Em^3:k#[Ahl2o]pTD[Y$\\(>I3ig*>^j-B3G4to_OHIe;JJgc9E\\46)tO:e+1_dI\'O<o=b[#>SbbiNYRaA[<F:j7lg!i$&.M!VEi[XA[?u=dK\\[!%4c[#?X:Xs=KRC4@n-TeRTXkff$c=odAJie.T>Z8O(^<kFa?GFoF-f.(m1*NT3Gm#+@1YJL\\.;C6JK9,Ap]2Cf*.Md:KYOi.a_tC8(\\ZIKpV^_G-D-X`&l0=bBYJ/++f](iUB&A:9aNDLoY>k0R:5\\nrKZAC\\RZ$Jb2@K3>o/*L-,5:)n>BEMtcq1TXVWm)CkMtKM4<44GYPfsMqpTaRI.J\\lkO*pAV]UM,pTeo5l0MU@.$5YEFc!42nAor-:"NOIhf\\9Kd>:ZbI_"`<Xj>V"_Yk(*Rni!ubH.hJY.@G,;n&sCEln8-lj5epT$VP";H@3jKo\\a\\L$k#\\m`XqsiA=TYY1#:"hHlkK\\9ZODNjs,a\']gX08UICGYcjAUVqgQBBjniWim\\g65<p?VHsm5GQe\\3"h1-$*g&MDLS$NQ$g@3mi^mk?.Q_.5g\\.a(K$_\'d>ph_D%2%P7ah1(!5[CXsJ<_NE4)\'@\\m:4(89#>T38iVCL?_=EXDZ5]m+bZq;XP+0c[4+5.`aBn3iEkl>)rTC\\C?_fhM$b<WNol"jj^8p?F[d82JoPfSI*Q!Mhb[/?O2@Fob)r\\3Ro?L7E#)Kap*oum\\J$[pPg888EfX>+TpGfN\'iIKZoiSSA4OKDci?E.)mY,b!#:ceaoLHOafj5ZiC"Jf`I0mDo\'E-S=PMp^4sC:1/qf%^e/ECiIF>&o<2AQ]cF,:p%XnhZX:D<@_mO4A(io!8pM\\6n>;2f!743/?\\OAs]O(:UpE,CQNfQOfU4d(T!&L1G&#JeQF;t=^78qB5BS#&PD+bg<&j0mU_FtH!W5+9)j5pj<#Ri5>K^JmVR</afn^HZaNV:.r=$EXP)jbX-T?@i>_K.[C\'b%r\\eS0SUT0Lg?P6;HB5=^qa7l4"=MI/a3Ll*[DF=D6>rqCqtkL0T-.mc4^QNVRQO_6_a<O:C+\'81e.\'r)U_&E698525mcr_0%V1X6KVegg^>(\'R#PT^9nCUbi]<s=M+20JHUCg:U6.BJ]Vi5peQZ5GNFX4*n??l%$b:?Uuj\'HK\\"dIN2itaH9(ptI2"k4UL9Lp6?*)88darQE\\/rG)cg>;k5[]9dRj1<OOibu()+c<"-lGrUUN\\&&D&4oon=KuAZ&f%C2,n;4K2B3/.E^:<R#f+%[Oi.6>-h;@7D)L?sde4iTNr7Ud5,>B1aCO!eUsh6NLF,IF`p\'j)S[N7do[iIU4l89ohHbYrS*<VcdDc+?]mK5(Hd=kLr-ZP^g^LDPH-m*%8m/qCRUDdTi1Yr;b>^7!/-;_,oQt0o`VttMUm4Xq[O/MTNq2@-/"+CcqKlK?JXr</iG9m*h!^m@X,Y)VQ\\NG?`lT8/,Ci:>$$#>G;_^!>b8_&<Z2KJ5!\\[^9q8$PneR%"eAP0#lSlt,NT&5JB2#kn]63UcY)5C?elRKP_@,l7F!+;lnEWS%!s!\\fI5bR6cn"kgDjB.<<I.LE+`cJ\\+opmecUQ,45n6&Xtek=oDE.jHBQm3B-Sm>./C_m!PThVV7*pb3M)iR_+j0u/Kk<k"_`+\'A0onto9P=RF?WSU\';f8%e2E9t$nIoU>:M_L`^EiAAhh94*j`kRl/j^o(SN"(Shqg&okU:A?i?Xb1\\=/-)4$+j:cdj17%,6H/*GQflgFM,=gLX]8%5j$lnKBPK/jZ0,[U5p!3keQaj7&ul=WMVp>6n?6NUR3<3"A06PQ4!PHA?B@H/VKbU4nOZF*%o[^p8R@N(Kmg[,O91lhbC>TNTo^e=`PK^<@"bQ\';fEq9doX@<kkh;EcQUu*P?D^e>5(I*E^5(MGn)j?cd(=IAUHWomt$,aDruSa-UT7oo7e!AAn"Cfi//d(MdHEG_\'dAI"IU!SA&AkedTe^p**/+REjHN:_&M-R%>(/h!]2H#@"6=C,A;8D3,hma_1mqX"kc,eYR=Gs60q]A:M7!Pc/dhg*4bVE4\\BU8Nm=m(2ZWh`O3&_C4?X445p3"]\'qA7HlL2>8]?iI&KYJk\\Gb(iDE>0?>df-[G+Nu1-"m;`fkPRCG++HhSe#1WAWX*AYB>H$k.M`T^+pPWLXN4lI4?7!aO"t[f]<]&%;+V4b\'DBnABWQf\'KI[(qT&+u/H6eW)N(ON1#P`]g94+]`bb1T@iaj3E?YTs2#W;N77*)$V;4W9pgS.`mnOJOHielImj=)@HM[a<La.$qdo.tU"<X^Hn@o%=^6.0WqicCjn"j"oV`4F?JcrSji-J)hp;<31BDZ\'#jlsP@)\'@!gF_,oH\'i,th?T.ETrsHkT,P`9(hpiAQGS7&bBnCKlP]r`M\'tKe<Gt>SdKZ"OE1t$!_J=d6:q2/;Q&!AY).-IXi&$_`*pG\'.m3*diqUV8FQ&\\$9$?"6@(P$[lu4-_YhE@RBA7WnBIks.q)npt:jYEfcm[aEf<*=G3HEBF19aa6RmLZIocI")Y99jnpP*!4X<]Uc<Mo_,qoijW-..UN`lB_EQl;MZP%W!iW40X;%j7;"rJX;BB\\6]4<l-7;\'9$8e3kAN[dt>UCXcGAR1coSq8>.uKS$PcC=K2=5.Lh1D=jMqQZ2V&ZM]KVr\'-G8,_JdYh2Q:i<,6];_mT]ir4E0\\VL&lHi]lfO9+]"I!sE(2qQ;3o"KG[fn0CTIgWN%_*M$`eEFIUs\\XG9,;A&F!>S25o#tF$P@#4_0Wm1ChYfrG+&loMCYS\\=F/@iVbpg\'LMXR-O(0pB>%?**aOXTI0;sbe6-Q[BP#>F(WT)W3Cci7cd@F2CaadR^_/%N5dV\'14nMse&Ap$VOTcTF<h7,=d>O9[<]"gjE[?-[-TR@j%,Yl<61O\\<hKLGB0lh*?IAu#\\;I!E.N\\H6MNXXeh:>s_hn._\\qBC0*$<c.3`]W;OEDnCmQ)UG!(W[qeUN)0V\\I7aIk0PYac]PWgu\\nbU8]49l!H<Y3__8LmAlC",r,Ek]\'(8qE\\73tla9<<tKr4*CG<l[EVbW*?c!<bp>qeDS.ae3.N.C&oedG26r9O98pGW]3,Z;)\'"&#ulZ\\NS?hcAms>u6SCBn$MOjnhGU3IW\\ieJ;\\&lo\'5UY&Cr<eP07tt[\'u/T4J0cNU/Dt9QS(ns!ia-OEdleHo<:Y_s3+5tp;[Yn;XnjRLM`P98VU@r9dA@E#rDfnl)5m^5\\b-a868d:1.G56/Qp2Qo&?ZVW#*+=M)0&d91u$Ue(@]Ug.DKcOQ3U=V;4=OV/sEDug"HtsZ,>fMe(f&ns48Obns0q"o&bMeCi=55013LmNI/2p+Wo[!k*;J"^l`Fe!Da&7di\'f]fVnp57A$OX2ARDfKIH5La*e]dJZVs51Atap>2-3t^NPf)16&Z6,p2#l]CS`m(Ja"bb$^l1SR[A3Q]L[\\VpWNq"uN21\\$gO3WNf]F;"6%n\\"hjql4?X@63d,-[^&sk[ME]5_Ieg1WctDK(BH/mMrmKGgH@hJ\\Lpq]-c??=kcD!==tm-RLid`BVH6"ji2OBkU:`eDZo.NQqDpmIW,9ATZ]V*963sSjQpZctIg\'.1+Y7;AOQQt\'jW6C/W46\\.(8aq(pA"ZJdI\\MOrc@hD@X"?oD/CLBct%o$5Ib_"))<\'aQ.8`TJ3Lq6W[)24H)1SIX#SWk`8Pc+.q<t>pU23Xj]C1W3tf1Igf5HW0Qe9(dTY%@8VTf)Y7#m>%!XTm6SLBOktZMa,QujA3KAp)5N6u:HE!eSf/pf]Q9)?O$SC2ON3j.TZ1RDO,S\\:s]rJ1!Vs+.l"H-Q^!OXF#^p;:ZY,a*OgU<)O*^[:[Z7A9pNI6QW,CB;0*YgR?#!AhI65q1V9+urRfj5%na3Z2K#7-(nPKsYsPm>5r8qRhknU]R&3N`r[*H=(8Hr>q`s*Ymc(R"e>*gG\'nr&;4WRdLkO909nqS[*<aI45)G0`Zg4gk/%N"f!$p#PK`&h+oJ`gWV]`T9EUT?3\\u6$C1luKGI\'>bWV1m:O;`J\'JP^<B>fm^Nj<\\$SInji%+hs7QQRYQ*m#Gb4h0oL=@R"]-;W6O#01E7[I2P#Ruhuc<l]bkDg\'0/`*O4_LPidU,>oHm&gUUf0M%e4UL3-M3",uDXS`5_X^aBV5&tr-lIG4gK@2njO\'!rP>!S,Um#C%T$V?"V![CG>CO.Yort1DY7M9to!JH,\\\'&8NF=-22n!f#%fZ(*N[_!*dp2-pq?5-q-="5ZmpN?NPBc2E^`*ul;0`)Ha)g\'rW;CCdr2p/Dls5+DQPDlB:S/%J@fqI8I8qt/hNGUaa0Us]0?G*`tm;"j.nT#/\\@r\\MI4FsFcdoWt#sr>auN!(_a\'`W\\+*?J&25^?mqo%/o=tc\\4e@ce?Gq5k=^8)!0XF_fQ;Ni4N^1mSE=9Aut/h-SW&:s"-]0g+O3jgWu?ecC.W<]ttW)AQ%)=!_C;Q)Y_6G=[1iM-8?3AIu9:);M9@h(l-tr_\\65pG"9He[I2Y[;&;--8e5o72lBW;KDah7j%_2-Y?"fH7U\'J?FgGTf1:28-6/\'%OD8(d%HL"At^E5Ykc0d]@4BUEe%jkT<lo(JX-b"Kc9_!9@-GD;O)f$Z#PP<ZhV4FP)(tRtoB\\fEJ>0qs3hiZa-DcnJ(aZlQ)5=G\'\'P;ri8iNWkkjFIb(\'^gu:er)Z-#;QJA?cU(^/L7E_^al1s.liiK:ehKC7T9e:efZZY37$nZ3#ZFQ0^c?7nV1V&L#CR!j0Ck.ptk#%Y#%P-\'gSY;#\'A<d=J+\\b#g@[OlLA/J7INEQ<mP5!-@C8,nXp3-@Mil:eHao_*B)Bl?CF`21/N9!8qEjA0$M"@^R"?CgL\'1+r-,Sr?CD(8W8]D6F%N!Z4n,ThNHCPFjtWgJlk)2oAECm)5WT-"MQK)NM:+6BBK]G;j2u3pID71j\'HTbg$>qJ:UaE[e:Biig1g^2"^+uC:IW!ca\\$\'r;2-Gqgj\'s3QCTWUM:/`9CH5^0VQI=J::^M6%Z"(M>N*XKoqajl2OqQ)bD&Mq%de?6VI5!AYhm+;YPH*]sg(Sg3R0K`WbQacml@e.(qN"0rJE6/.mJ\\I[`g3LS15c%C`=VYQ+t?o1_GKZ6-\\K6@8[UWg-,!"t\\<>]7C-r?D8uu_#1GXi)F;q+&QS<s5C&g%HM?(mm)#GQY<!pbP\'Q0[,.[%QmMAcBTibQ9SCVFF=&t;H1RfJ;/=<33C6n<R%nSmo-qO*FGFbTn4BE\\Gl:f:`\'Ob\'3lOE(;a4Z803A1+d.B5((!d<</B#;!C)BXTPj?t*%:@sN;b)K?GcIn0YE%=oPjA<Xnj[c7!TVjd^CVP^;Egl"%gBkn^.DIBNX27<-J_rP!8XRa`B(;9+E[We)Q+k4nl(NLQ%$k$\\P=t/WQag-jb$q.O*\\;X$T3IF@#n\\#_/Z.5+N?-0;\'3XdZ\']1aXn39BBZA]<H!lo<OHoIppAX_$ubhb4Y/&a-M@/`J3:6dsWqb"GbME?emWcL5i4\'dZKg2j)>=qGH+6A1bd8Pd2MqG515X6OZslE=9kI88D/IX"3)3[Bg[b%h@mE7tr.NLHa;jMb+"S9oLcPf:TG00D#e7F;bJ#l60q%&%nO;rFto<W>&\\o]6dd?rX2ND9LpuQrIi*en,O#a!g4U6H*\\)6[Zj@D%qW`D?<2?TgWY&"gL7STYI0^L.Q5#64P_`$%RG"-,be:Q$27aoeUOY<6?SOtO!7&#:NAiHU&h=SME]J2PB?gF4(e7]F!!#o<]8(oW\\\\b7=Zl_@Ka[\'^?F[YPjPIDNFCk!>]=>5P\\9.\\d_MoMdQe[pF:kj%OF+pK_J:DCDPh:mslrAblcmG2beL"A;=0X\'s4V2L?cS*$0XYhI1#71PVUn77V\'(ZtI7+lQ&\\X++U!d^0tEI8$>!lAGM;A>e%qs);=NVQN/U\'kg;NnAar-fQ*P_KsBcl$i-:=&5DEE!6$qeh1L=4e?+(Me]&u(qfVh;)7u:(J"\\">>ILNG+mV"=_b3=)ZaM+E^5bCK;bK-@ZBE^A$lSP;)ep!7IbUSG,p7S;oCL32=3`JDtMQF+A20>ohDND#e-X;#_B(?rYh)BhJEiFDH(fgg]kqhlQY0i1W<6V83i7+GES6rJcH6PIEJ**,1%f:XuRNUZrr#%*5mqfXe@.fbB/PtT*.\'W$0e\\B4tmtHOhh.B"t"Mi==:IcmmeU,RgR!g33r,U<g)&?;?W`@Q("#cZSYolPIPHg.IV*62E&b9+>PbFC4?+Vc&9dTV3?%q1([5M&".`/eg5N9S%CtG-B^<kY+IH#Bk?qMGqBN$X5QOLh6A.&^bHu#\'fTPl$[)DKfeCE.Dd$tS_%oY9-1O_N0h^`*F;c2A7%ps^!jh=M9/dWP8oDmuG+,>.0\\$4^]Z[I)*NrNJWX_t2;K^mFFk0Im@dt2S(tBS(*Z`8XdX$HlbGX,7@24rBc]&o<>oKOY/K(7K&8u\'J#r,29]C!:g8pqFQ"m"?"aer^R)HL.*\'UY$JR81fpbLoY$B2G=CH4OJu/XQui?RDA5k0XS#Hhsc^ofK0"4P066WPG4,4qG>]aZ/;$Lb_cJ4Jnl00ILFs#E?pH:0E"lBGVMh]Vkkr`.M\'6L1u?nM&;ZZ<7kOKs2klkK`I>AB)VfK9$HJNflL2*j+bk!kH>Ha#=#bD8Ph0SV*Cr6Zgmk;\'R.k@&-61!:4COrh.CEE9/dD=q<$cBpYs\'<PtuF6jm,RH+M*oB>[=jZNi,Cj-m$o/2r,e;Ma\'$qXW8=.4EoBTlD@\\`K&0u2"1Qh=S1%\'q=7*?^kU*,WBZ+uREk0+Kks3TYR!-N*0OK:Qg__(D$>LqP^9lH#0`9!>b+BFFgdHNNX9I+6Q`TZop/e-&cK^R)3J0V:TNe[9Q/eaUF/fq6bWE"71#+[h>%YCf3E,@HVf%cuJ\\\'lOjD9k8&[5j9r)T;bNU/.Xq,gAf;H\\,S0"p,@hl2K;n%"Psc>qgemTCpi-d7hqk9bg/`#9C/0rYr9+9^gQj0T9C"X,XMm7t@F#oYm#oOU_R<]ME5>:]t6h1mD0?D;"ekeSLj%qX6b\'^c1Cb0p>k$>q&jEsADi;-ZNu7\'#/6gBa&+XE;<][Io]&V[c*[TYL92AN\\;Y*_+i#fNtm&]Z/X4jKe0Mpj"C`h\\3@)VH,`>Z\\>D6]8IK]\'K"j0ci@dI2m(RT%,Ps"SJW3)+FV5%p#pN7(LFbWRH#*e?f[&>?QI/c*0`ni\\5t`aL.#1DD75gGjXE@"[ZoN.X?74EEY5g+Wp@J%Xn..Vl[^Ck"g=rfn(%_`V)Qq+X9f7^?+1j>n:)S<Ha#`7A-JZ:5dP!\'GGIs>k)4a.c\'DoU=_%JV;<0Z!rOaeG>k*\'h;]f!k57q6jS5%@Fm+pU+>dK6]5KP)`2^V)k^g>ZG+fG8OkSP$`7s]L;)Vm\'f`aGSu\'s<#tA*BY)PGRd8n:FPVdS$AQ9R<Aka-fY%S[hnM&7Oi$)OO+/Y-_i68F^B$[rtE<_D&-%"JfXE[q>^]5^(HTjN_]FAhsiYK#+mpA.ja.1-<s26QEBeln138#tE06hTktRI@1-bVcFNuk3p]`fZVZlq_rQoeFj9cb68#.bMLQa&1]lT7*i0I5iO+RRuiH!*.e`7LmsWX7UYCX?N*cqRMEf(2sqEoIGVngY+OGt+NE%W$/>31nh&%1qZWD5S*]729mY-?33$okCp.g4o"Q"\'_L-IO)s9J<n$3I9U7\\?]&)o4V!<CnM\'NDN-pDuZjkuSMBad;OhRF"6e%XH[O6o[Xr%WV9*Z\'[c8iVo2]e>aKY3(G-Xj(Q2863]SHpVqR?D^C$CnfI%VX(NL8B8Ws,gj[X5F,U2`n"XO0m*57u;9^0BSg\\h*@f&K\'rH1%(Do_!=e7sY4[/;Qc[;s^fj)NO4.1[JW\'D-X\'6/#5n*;os2/\\rL3Vn>oWP+HnbH@jSe=?)S9pJ^5N7-sb05i:-+hE2`:T=gNX+/$8XH!9;Yp*P*L^&@<$BJi"EF`uF/o/@afk7@#Yk>Mf+T(5VYI%?ii,7a$i>,St3W/!S3;7Or!P28;QDuO")m\\PNQFH9<LpFMZ#>Yk\'kRr\\1$Qjb@ZrU^6T`H3p\'rH*OXE]t4VCN%oL0lh&Ve%M`P\'ga5CO[BHDU\\=bUGE@,*:MoBNPEb)Mlec`,V*.UQ3<r$#m]C#@RsLf"4d#G$dh<6$ou&l>\'68:D4Rh(SrI_eO\\!\'egmo,7-ok\'ro&Pd]!E-]pk6fC^`W&JcVH2mBgM;F1m]UK7hgeFGJc)]gN]8?CqHd=Hrp*&lBS9rRGPPCfK,S//b6Z6Q6l/!T^9_1p,MN?Z?dad^0B>*In)>],8LT)&]BA2nr$^G`2gYjC9US02T!>#9kR-3q7\'L(!.Ag\':6n+D`<W>"\'Q5G;4DgZ+\\kFe>?cT>1DnpO[VJbs_$A`<=\'=i=>P8[->&X\\=u.=RhKmTTk>TL[.G*pI7EB/5GZ<nV-`0\\n[I1Y=WLAQbW(*Bg:+"RA:`\\ePe\\uj>8MkG0>t=i`^mOI/RDmhFt)%N:\'TOE=sN:-:&ihV=kT6eSS4GXg3Ocu?`iD6G.3XnKm/U_LB?Q#OH+,2[8d6VLK^fQN!0<\\l5b6%4b5KV"KhFaK(tdIJ_uoC4;]iLYd@(AAh=Xa"Er!mJt0k#Y)\';GgIA99>m;i\\i,@a&o8"7r7pZ4Cd>l/`J_tnP]])rbi,u\'*l(*`P-*kNX$rW5oOpB5&3kiTo\\!t;(Zc@^d_q!,iG4@V*E9G`2e3/6F<9]q=Vl>!4B4m8fB/ra.)P?G+0Mh,;lIN:2aa8pQ6#UmY3!1s<VLUK#%KsE1Ac\\i#\'q8Hi/WG,6-pY@"kCKM[QC)IM1@8G[j$S3!pL@jP3kNNF$N-V).AB%?fj,TsjI/d]*dH)jb+CO8m;nVK@S.4cO.R8m`-`3m94j;CQ\\NPma\'LT3(p,D)AYIej=\',KX6`nBHV`bU[[)<+-3><VjHiM\\5[e3=\'VpXqO$PG\'(qi2=P92ELmHc+FG5T,CrS+15*Zoo^mUjC:ac`+-_UCl5)S&TSH4K/4RV9sC_$,fRl!+kI[U]HVRiH#@`L,,?)TXd27\'-`T9EdcVY"u450e3i&+\'iSW93,tq]!VVL:#1l]h*%inN;%hD(+r(u-2)R]@f<8T$^h53pC14+BrRO#,Kj07[;I$#Hfm5f0,GC[p8l@\\UTO?$(-p\'=8q7<F5=[XFgP<(f.ZrL3LI_>Qb%)Q+j`6hLJW&M;KFVM+m;i]nS]GmC[&+QoYV5<KN&otO)68U_rX:_4JmeFPTIU]LT(QCnH;7\\NBelL$;\'VkcQXS38.%<d*NDj6@1hMRrp(V(A.0LkP]%sNLFAe`pE^mZE)(kZ`6U9SOCE=%08K/@abK<4^j+Lg3@Xj@)q+:ffcd9"4_YM^HJ=E[G]=1j2>o2Q!L?eupjlMm5Y\\$WA]rlU4SdS]7ZLJ^9p#D<L_dj>V;+79>d"0%6!D_?*L%at.BO0I>;mC5WMI2,d8#"H.Jc`#tH@lqOC;c(N@Y?8c+cB[$.E\\+Jp>7HTe?-1TK7;pHNia*tLnWEJN$op^19IMP_S]74MC_`sNKr@c"ethlpe?RnJ"\\WBqplS!\\6?u<ejqh?kl&db`3Q!kQ1gQD7G@p&W.QP<l/49.`$(,LJTF^S1S?UH.?-/e@&kA.G\'n;t![<.Rm=@Moh`L@6?WqI*K))/I3*L/=&eeJ.q!(q[V)SqmuRakk6+i>V_VX<Ce_[6C*cG-!!:t,[K30j86`!Gl.hJWs+6T]\\\\nr[8\\@3o\'33a2OCFT:+#no8"7EZ2O;3\'ocA"8e99XbD1E;rSud[,KQ#k\\.Gm#VoElh0R^NQuuW\'8!uFqh:Ck+`s:Pmkn4A,A;uPF$d7m=>\'?gtW-ds&P5bjE6I5nS`b\'"*-.7X70#Mt!90f;sL$+A7G6(<)/R676X"5hc,IK7P>?-SP$)82d9kmq>_onEdQt;7)NR/D<+dR[q25)p/R/kWYV^@E@!l80%1dNZH7&[nDW\\*N,A4^L37dI.F6r[954-g1\'^hR7PMnW#&6-X(6Ul9W%iN_9n$@pn<6i&KJ,m0$M3lTW;3kU.\\[bC<2*n/aZ!jc5-X6koCC\'6a6(^Z,dT)At!!E43Wb\'90PK+.sdiiaSKDhe!-d=kl.Hn)"Equ:NTosM$T`2B@hZs*PVX6_GpbMHI_6S]&3MQR\'G?g2CQ:anTK$99WJ#%qL;Rn,$>DUk;sG[iHCe[)^;Bu!Y^es(/Or5\'#e_24/WkW6<d"ng,dUIr;\'[;_&d@``1CH$oL7*`rh*%OlhTOY$Zje2%,\'W/0D7.6JeeQ2$Ynd:QCna!u2[#Z04D9gj\\)B&6rV2fY<]0`;H_nS\\0qFWr8oGpW4j\\*L+r&K,Sh->dP#YTFOZ_]f8c\\8ZE=ZcG7(X-eL\'*WIN,cbS.5)*[Xf8hb4e_-Z4>P!BH=F4jmRnS>BKQV?fPSQj^1C#1^acds1$rC/QRRZ!G\\m?7L98X*WOEUcrK)8:u8\\]6"%K))K_As4h,.s[m\\\'AO#YV3jb^k)$oi^ja7>>lDYtc,$ga3nrX$"q/$g]SGQ+&k2t3VM=+K-W/f7;9]#F_;PO(A_^0QA%R\\6@;"`?BiAK%Z6q]RU16Fi`]Re%\'a>=3Uj5!##a1t,<Ztn%6s_u]]OV/#AYlfZ(qf3I>r`VTe.]I<mUQ?&jnb#]!A^s6NVkIC`jTEu._sA01)DPr>khHO%^#R?pE>\\#&odLB9dJna+GR6&.MBalhEf!aRQlSg#=o]A)DQ1kVC>$Wj;=)0P:_9HE"FS=LQ[HUT4>;>$,Bf!,A-W`38k;?/7s1o_B4(!1].V*ZN)gKJ,m<9"?slmH,b:PQ(tUF82OtWh$M5>g3njL_:W6abd3]&Ap,/;Bu6U^_TI@m=`d:k/^-f]-]hknq3m[Fk;/icq`\'<V,FbSA:;AqUPI@>JG#m;u[oGt*:)/pj#`=skEd$$B$OOb\\:\'24U2=,^Nlc#)oZp/:ep9f"JpdP=7ANQfMn9<DCCera5:7=/UO4`L<UUE<gR*6k>c=-R!f!1bZoUBh-oY.[qBfjTZM,-2^Vo1p*UiSeMl0)RKqaA8h3(u>n3\\u$\'hFOR22\\OS_b+8Vmq*0)nPAC~>'
|
999,747 | b489578b017710ed899ebfad438fb3b607cf8d4b | t=list(map(int,input().split()))
su=reversed(t)
print(*su)
|
999,748 | a6052813f10d61489142ab73338efc140f7c15a1 | from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.conf import settings
from scrapy import http
from scrapy.shell import inspect_response # for debugging
import re
import json
import time
import logging
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
from datetime import datetime
from TweetScraper.items import Tweet, User
logger = logging.getLogger(__name__)
class TweetScraper(CrawlSpider):
name = 'TweetScraper'
allowed_domains = ['twitter.com']
def __init__(self, query='', lang='', crawl_user=False, top_tweet=False):
self.query = query
self.url = "https://twitter.com/i/search/timeline?l={}".format(lang)
if not top_tweet:
self.url = self.url + "&f=tweets"
self.url = self.url + "&q=%s&src=typed&max_position=%s"
self.crawl_user = crawl_user
def start_requests(self):
url = self.url % (quote(self.query), '')
yield http.Request(url, callback=self.parse_page)
def parse_page(self, response):
# inspect_response(response, self)
# handle current page
data = json.loads(response.body.decode("utf-8"))
for item in self.parse_tweets_block(data['items_html']):
yield item
# get next page
min_position = data['min_position']
min_position = min_position.replace("+","%2B")
url = self.url % (quote(self.query), min_position)
yield http.Request(url, callback=self.parse_page)
def parse_tweets_block(self, html_page):
page = Selector(text=html_page)
### for text only tweets
items = page.xpath('//li[@data-item-type="tweet"]/div')
for item in self.parse_tweet_item(items):
yield item
def parse_tweet_item(self, items):
for item in items:
try:
tweet = Tweet()
tweet['usernameTweet'] = item.xpath('.//span[@class="username u-dir u-textTruncate"]/b/text()').extract()[0]
ID = item.xpath('.//@data-tweet-id').extract()
if not ID:
continue
tweet['ID'] = ID[0]
### get text content
tweet['text'] = ' '.join(
item.xpath('.//div[@class="js-tweet-text-container"]/p//text()').extract()).replace(' # ',
'#').replace(
' @ ', '@')
if tweet['text'] == '':
# If there is not text, we ignore the tweet
continue
### get meta data
tweet['url'] = item.xpath('.//@data-permalink-path').extract()[0]
nbr_retweet = item.css('span.ProfileTweet-action--retweet > span.ProfileTweet-actionCount').xpath(
'@data-tweet-stat-count').extract()
if nbr_retweet:
tweet['nbr_retweet'] = int(nbr_retweet[0])
else:
tweet['nbr_retweet'] = 0
nbr_favorite = item.css('span.ProfileTweet-action--favorite > span.ProfileTweet-actionCount').xpath(
'@data-tweet-stat-count').extract()
if nbr_favorite:
tweet['nbr_favorite'] = int(nbr_favorite[0])
else:
tweet['nbr_favorite'] = 0
nbr_reply = item.css('span.ProfileTweet-action--reply > span.ProfileTweet-actionCount').xpath(
'@data-tweet-stat-count').extract()
if nbr_reply:
tweet['nbr_reply'] = int(nbr_reply[0])
else:
tweet['nbr_reply'] = 0
tweet['datetime'] = datetime.fromtimestamp(int(
item.xpath('.//div[@class="stream-item-header"]/small[@class="time"]/a/span/@data-time').extract()[
0])).strftime('%Y-%m-%d %H:%M:%S')
### get photo
has_cards = item.xpath('.//@data-card-type').extract()
if has_cards and has_cards[0] == 'photo':
tweet['has_image'] = True
tweet['images'] = item.xpath('.//*/div/@data-image-url').extract()
elif has_cards:
logger.debug('Not handle "data-card-type":\n%s' % item.xpath('.').extract()[0])
### get animated_gif
has_cards = item.xpath('.//@data-card2-type').extract()
if has_cards:
if has_cards[0] == 'animated_gif':
tweet['has_video'] = True
tweet['videos'] = item.xpath('.//*/source/@video-src').extract()
elif has_cards[0] == 'player':
tweet['has_media'] = True
tweet['medias'] = item.xpath('.//*/div/@data-card-url').extract()
elif has_cards[0] == 'summary_large_image':
tweet['has_media'] = True
tweet['medias'] = item.xpath('.//*/div/@data-card-url').extract()
elif has_cards[0] == 'amplify':
tweet['has_media'] = True
tweet['medias'] = item.xpath('.//*/div/@data-card-url').extract()
elif has_cards[0] == 'summary':
tweet['has_media'] = True
tweet['medias'] = item.xpath('.//*/div/@data-card-url').extract()
elif has_cards[0] == '__entity_video':
pass # TODO
# tweet['has_media'] = True
# tweet['medias'] = item.xpath('.//*/div/@data-src').extract()
else: # there are many other types of card2 !!!!
logger.debug('Not handle "data-card2-type":\n%s' % item.xpath('.').extract()[0])
is_reply = item.xpath('.//div[@class="ReplyingToContextBelowAuthor"]').extract()
tweet['is_reply'] = is_reply != []
is_retweet = item.xpath('.//span[@class="js-retweet-text"]').extract()
tweet['is_retweet'] = is_retweet != []
tweet['user_id'] = item.xpath('.//@data-user-id').extract()[0]
yield tweet
if self.crawl_user:
### get user info
user = User()
user['ID'] = tweet['user_id']
user['name'] = item.xpath('.//@data-name').extract()[0]
user['screen_name'] = item.xpath('.//@data-screen-name').extract()[0]
user['avatar'] = \
item.xpath('.//div[@class="content"]/div[@class="stream-item-header"]/a/img/@src').extract()[0]
yield user
except:
logger.error("Error tweet:\n%s" % item.xpath('.').extract()[0])
# raise
def extract_one(self, selector, xpath, default=None):
extracted = selector.xpath(xpath).extract()
if extracted:
return extracted[0]
return default
|
999,749 | 4ce601108255ef9ac896ed5e294ade9f768b5b48 | # Generated by Django 2.1.1 on 2018-12-07 16:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TimelineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('delete_flag', models.BooleanField(default=False, verbose_name='是否被删除')),
('delete_time', models.DateTimeField(blank=True, null=True, verbose_name='删除时间')),
('open_code', models.CharField(blank=True, max_length=32, null=True, unique=True)),
('date', models.DateField()),
('title', models.CharField(default='', max_length=128)),
('content', models.CharField(default='', max_length=128)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TimelineLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('delete_flag', models.BooleanField(default=False, verbose_name='是否被删除')),
('delete_time', models.DateTimeField(blank=True, null=True, verbose_name='删除时间')),
('open_code', models.CharField(blank=True, max_length=32, null=True, unique=True)),
('year', models.PositiveSmallIntegerField()),
('title', models.CharField(default='', max_length=128)),
('content', models.CharField(default='', max_length=128)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='timelineitem',
name='timeline',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='timeline_items', to='timeline.TimelineLine'),
),
]
|
999,750 | d19eb7bc381a8fa5db90b3a4914f892f72b5043e | import click
allpass = []
def other_info_callback(ctx, param, value):
pass
#function lower-cases the provided string
#strips the string off all special characters and spaces
def clean_input(a):
clean = a.lower()
return ''.join(i for i in clean if i.isalnum())
#function uses slicing to return a reverse of the string provided
def reverser(a):
return a[::-1]
#@click.command()
@click.option('--firstname', prompt='Enter target\'s firstname: ',
help='Target\'s firstname ')
@click.option('--surname', prompt='Enter target\'s surname: ',
help='Target\'s surname' )
@click.option('--nickname', prompt='Enter target\'s nickname: ',
help='Target\'s nickname')
@click.option('--date_of_birth',
prompt='Enter target\'s date of birth in this format dd-mm-yyyy',
help='Target\'s date of birth',
type=click.DateTime(formats=['%d-%m-%Y']))
@click.option('--other',
prompt="Do you have other info about the target? ",
help='Other useful info about the target ie hobbies, likes, etc',
type=click.Choice(['yes', 'no']), callback=other_info_callback)
def tooobv(firstname):
pass0 = clean_input(firstname)
pass1 = clean_input(firstname.title())
pass2 = clean_input(firstname.upper())
pass3 = clean_input(reverser(firstname))
return allpass.append(pass0, pass1, pass2, pass3)
def lvl1(firstname, surname):
pass3 = clean_input(firstname + surname)
pass4 = clean_input(surname + firstname)
pass5 = clean_input(firstname.title() + surname.title())
pass6 = clean_input(surname.title() + firstname.title())
pass7 = clean_input((firstname + surname).upper())
pass8 = clean_input((surname + firstname).upper())
return allpass.append(pass3, pass4, pass5, pass6, pass7, pass8)
def main(firstname):
for i in allpass:
with open(firstname + '.txt', 'wb') as passfile:
print(i, file=passfile)
if __name__ == '__main__':
main()
|
999,751 | 21ecaadd6d2a98c09b942af77bcde75b57683530 | #! /usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from std_msgs.msg import String
import time
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
left=20
front=20
command="start"
def go_left(value):
global command
msg = Twist()
state_description = 'going_left'
if command=='left':
if(value>0.75):
rospy.loginfo(state_description)
msg.linear.y = 8.0
velocity_pub.publish(msg)
else:
msg.linear.y=0
velocity_pub.publish(msg)
command='front'
def go_front(value):
global command
msg = Twist()
state_description = "going_forward"
if command=='front':
if(value>1):
rospy.loginfo(state_description)
msg.linear.x = 10
velocity_pub.publish(msg)
else:
msg.linear.x=0
velocity_pub.publish(msg)
command='stop'
def callback_frontlaser(msg):
global command
global front
global left
left = min(msg.ranges[710:])
front = min(msg.ranges[355:365])
if command=='start':
command='left'
if command=='left':
go_left(left)
if command=='front':
go_front(front)
if command=='stop':
command='start_scanning_objects'
def centrefinding(cv_image):
imfinal = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imfinal,230,255,cv2.THRESH_BINARY)
image, contours, hierarchy= cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)>1):
cv_image = cv2.drawContours(cv_image, contours, 1, (0,255,0), 3)
M = cv2.moments(contours[1])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv_image = cv2.circle(cv_image, (cx,cy), radius=5, color=(255,0,0), thickness=-1)
cv_image = cv2.circle(cv_image, (cv_image.shape[1]//2,cv_image.shape[0]//2), radius=10, color=(0,0,255), thickness=-1)
a = cv_image.shape[1]//2
b = cx
if abs(a-b)<5:
v=Twist()
v.linear.y=0
velocity_pub.publish(v)
else:
v=Twist()
v.linear.y= -0.5
velocity_pub.publish(v)
cv2.imshow("Image window", cv_image)
cv2.waitKey(1)
else:
v=Twist()
v.linear.y= -0.5
velocity_pub.publish(v)
cv2.imshow("Image window", cv_image)
cv2.waitKey(1)
def load(img_name):
img=cv2.imread(img_name,1)
img=cv2.resize(img,(960,540))
img_new=img[106:488,245:780]
#img_new=cv2.resize(img_new,(b,h))
#img_new=img_new.reshape(h,b,1)
return img_new
#if using solid work images and want to crop out some stuff
def cv_to_detect_shape_color():
img=load('cyan.png')
#Otherwise
#img=cv2.imread('b.png')
img_rgb=img.copy()
img_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)
_, contours, hierarchy= cv2.findContours(thresh,cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
#print(len(contours))
area=0
for cnts in contours:
if cv2.contourArea(cnts)>area:
cnt=cnts
area=cv2.contourArea(cnts)
peri=cv2.arcLength(cnt,True)
epsilon=0.01*peri
approx=cv2.approxPolyDP(cnt,epsilon,True)
img=cv2.drawContours(img,[cnt],0,(0,255,0),3)
x,y,w,h=cv2.boundingRect(cnt)
_,(wr,hr),_=cv2.minAreaRect(cnt)
ellipse=cv2.fitEllipse(cnt)
img = cv2.ellipse(img,ellipse,(0,0,255),2)
a=ellipse[1][0]
b=ellipse[1][1]
if len(approx)<=6 :
shape="cuboid"
elif 0.95<w*1.0/h<1.05 :
shape="sphere"
elif wr*hr>w*h+0.1*area :
shape="cone"
elif w*1.0/h<1.5 :
shape="it's tall not wide"
elif abs(0.786*a*b-area)<0.05*area:
shape="torus"
else :
shape="I don't know"
mask=np.zeros_like(gray)
mask=cv2.drawContours(mask,[cnt],0,255,-1)
cv2.imshow('mask',mask)
mask=mask/255.0
num=np.sum(mask)
r=np.sum(img_rgb[:,:,2]*mask)/num
g=np.sum(img_rgb[:,:,1]*mask)/num
b=np.sum(img_rgb[:,:,0]*mask)/num
color="unidentified"
if (r-g)**2+(g-b)**2+(b-r)**2<100 :
bright=np.sum(gray*mask)/num
if bright<10 :
color="black"
else :
color="gray"
else :
img_hsv=img_hsv*1.0
hue=np.sum(mask*img_hsv[:,:,0])/np.sum(mask)
if hue<=5 or hue>=175 :
color="red"
else :
colors=["green","navy_blue","cyan","yellow","brown"]
hue_values=[60,120,90,30,15]
for i in range(5) :
if hue_values[i]-5<=hue<=hue_values[i]+5 :
color=colors[i]
return shape, color
cv2.imshow('frame',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def callback_opencv(data):
global command
if command=='start_scanning_objects':
command='start_opencv'
if command=='start_opencv':
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
centrefinding(cv_image)
''' elif command=='torus_search':
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
cv_to_detect_shape_color()[0]
def pickup():
global command
if ml_torus_identification_model() == 1:
arm_pickup_mechanism_and_put_on_bot_body()
else:
command='torus_search'
callback_opencv(data)
def see_tag_and_put():
global command
pick_up_from_bot()
cv_to_detect_color()
if cv_to_detect_shape_color()[1] == ml_tag_label():
put_in_rod()
else:
go_front(value)
continue
'''
def main():
global command
global left
command='start'
global velocity_pub
global sub_laser_left
rospy.init_node('main')
velocity_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
sub_laser_front = rospy.Subscriber('/hexbot/laser/scan', LaserScan , callback_frontlaser)
image_sub = rospy.Subscriber("/hexbot/camera1/image_raw",Image,callback_opencv)
#image_sub2 = rospy.Subscriber("/hexbot/camera1/image_raw",Image, pickup)
#image_sub3 = rospy.Subscriber("/hexbot/camera1/image_raw",Image, see_tag_and_put)
rospy.spin()
if __name__ == '__main__':
main()
|
999,752 | 265235c666c79685a7c52c8183ceb63c486baf1c | import pandas as pd
import numpy as np
def report_diff(x):
if x[0] == x[1]:
return x[0]
else:
return '{} ---> {}'.format(*x)
def has_change(row):
# if '\033[1m + {} ---> {}' in row.to_string():
if '--->' in row.to_string():
return "Y"
else:
return "N"
staxi1 = pd.read_csv("STAXIExports/STAXI_Test_Data.csv")
staxi2 = pd.read_csv("STAXIExports/STAXI_Test_Data_2.csv")
diff_panel = pd.Panel(dict(staxi1=staxi1, staxi2=staxi2))
diff_output = diff_panel.apply(report_diff, axis=0)
diff_output['has_change'] = diff_output.apply(has_change, axis=1)
diff_output[(diff_output.has_change == 'Y')].to_csv('/Users/mplazar/Desktop/Staxi_Dif.csv', index=False, columns=['record_id', 'redcap_event_name', 'staxi_s_ang_tscore', 'staxi_s_ang_f_tscore', 'staxi_s_ang_v_tscore', 'staxi_s_ang_p_tscore', 'staxi_t_ang_tscore', 'staxi_t_ang_t_tscore', 'staxi_t_ang_r_tscore', 'staxi_ax_index_tscore', 'staxi_ax_o_tscore', 'staxi_ax_i_tscore', 'staxi_ac_o_tscore', 'staxi_ac_i_tscore'])
|
999,753 | eca18b44f23cda0997a3526562fd0a0238b09b2e |
class Service:
def get_key_from_dict(self, key, data_dict, value_else):
return data_dict[key] if data_dict[key] else value_else
|
999,754 | 19ae5cbd9d16665fa5714ec7844a7d6439023d9d | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Article(models.Model):
title=models.CharField("标题",max_length=50)
zhuozhe=models.CharField("作者",max_length=50)
created_date=models.DateField("创建日期",auto_now_add=True)
modify_date=models.DateField("修改日期",auto_now=True)
content=models.TextField()
is_show=models.BooleanField()
class Meta:
db_table="article"
def __str__(self):
return self.title
class MyUser(AbstractUser):
jifen=models.IntegerField('积分',default=0)
class Meta:
db_table='MyUser'
def __str__(self):
return self.username
|
999,755 | 6ec317ccf8e54704ffd9d75f2b891f49741dadaf | import math
r = float(input("Please enter a radius r: "))
area = math.pi * (r * r)
vol = (4/3) * math.pi * (r * r * r)
print("The area of a circle with radius r is {} units squared.".format(area))
print("The volume of a sphere with radius r is {} units cubed.".format(vol)) |
999,756 | 96ba837ab0e682eb5978941f569a7cce19396502 | from django.core.management.base import BaseCommand, CommandError
from stock.models import StockManager
class Command(BaseCommand):
help = 'Updates database (use when stocks have been bought or sold)'
# def add_arguments(self, parser):
# parser.add_argument('poll_ids', nargs='+', type=int)
def handle(self, *args, **options):
return StockManager.update(self)
|
999,757 | 99e8cf91334b5650d7f55894d610d1672b77e310 | # Generated by Django 3.0.3 on 2020-04-10 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import insane_app.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('ad', 'admin'), ('mb', 'member')], max_length=2)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SanityRank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('sanity_cap', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('body', models.TextField(max_length=600)),
('like_count', models.PositiveIntegerField(blank=True, default=0)),
('dt_created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('dt_created', models.DateTimeField(auto_now_add=True)),
('members', models.ManyToManyField(related_name='user_group', through='insane_app.Membership', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StoryImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=insane_app.models.get_story_image)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image', to='insane_app.Story')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_sanity', models.PositiveIntegerField(default=0)),
('rank', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='insane_app.SanityRank')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'insane_user',
'ordering': ['user'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('description', models.TextField(max_length=500)),
('dt_created', models.DateTimeField(auto_now_add=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('categories', models.ManyToManyField(to='insane_app.Category')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('story', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='insane_app.Story')),
],
),
migrations.AddField(
model_name='membership',
name='user_group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insane_app.UserGroup'),
),
migrations.CreateModel(
name='StoryLike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('story', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insane_app.Story')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'story')},
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=insane_app.models.get_product_image)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insane_app.Product')),
],
options={
'unique_together': {('image', 'product')},
},
),
migrations.AlterUniqueTogether(
name='membership',
unique_together={('user', 'user_group')},
),
]
|
999,758 | 77418339080e1cd482adbe67c40ec0209369082f | # -*- coding: UTF-8 -*-
# Code generated by lark suite oapi sdk gen
from typing import *
from ....api import Request, Response, set_timeout, set_tenant_key, set_user_access_token, set_path_params, \
set_query_params, set_response_stream, set_is_response_stream, FormData, FormDataFile
from ....config import Config
from ....consts import ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP
from .model import *
class Service(object):
def __init__(self, conf):
# type: (Config) -> None
self.conf = conf
self.images = ImageService(self)
class ImageService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def get(self, tenant_key=None, response_stream=None, timeout=None):
# type: (str, Union[None, IO], int) -> ImageGetReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if response_stream is not None:
request_opts += [set_response_stream(response_stream)]
return ImageGetReqCall(self, request_opts=request_opts)
def put(self, tenant_key=None, timeout=None):
# type: (str, int) -> ImagePutReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
return ImagePutReqCall(self, request_opts=request_opts)
class ImageGetReqCall(object):
def __init__(self, service, request_opts=None):
# type: (ImageService, List[Any]) -> None
self.service = service
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_image_key(self, image_key):
# type: (str) -> ImageGetReqCall
self.query_params['image_key'] = image_key
return self
def do(self):
# type: () -> Response[None]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_query_params(self.query_params)]
self.request_opts += [set_is_response_stream()]
req = Request('/open-apis/image/v4/get', 'GET', [ACCESS_TOKEN_TYPE_TENANT],
None, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class ImagePutReqCall(object):
def __init__(self, service, request_opts=None):
# type: (ImageService, List[Any]) -> None
self.service = service
self.body = FormData()
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_image(self, image):
# type: (IO[Any]) -> ImagePutReqCall
self.body.add_file('image', FormDataFile(image))
return self
def set_image_type(self, image_type):
# type: (str) -> ImagePutReqCall
self.body.add_param('image_type', image_type)
return self
def do(self):
# type: () -> Response[Image]
root_service = self.service.service
conf = root_service.conf
req = Request('/open-apis/image/v4/put', 'POST', [ACCESS_TOKEN_TYPE_TENANT], self.body, output_class=Image , request_opts=self.request_opts)
resp = req.do(conf)
return resp
|
999,759 | d19b44bf32cc3121c99f9b2fb4a2f44b98e3c56e | # Выведите все элементы списка с четными индексами
# (то есть A[0], A[2], A[4], ...).
inputList = list(map(int, input().split(" ")))
List = list()
for i in range(0, len(inputList)):
if i % 2 == 0:
List.append(inputList[i])
for i in range(0, len(List)):
print(List[i], end=' ')
|
999,760 | 755c893a4acc31987de9ddf3f207d6038e7b3f18 | #!/usr/bin/env python
import random
import sys
import optparse
from trie import Trie
from watchdog import Watchdog
DICTIONARY_FILE="/usr/share/dict/american-english-small"
VOWELS="aeiouAEIOU"
def find_match(word):
try:
with Watchdog(5):
matches = [pos_word for pos_word in var_iterative(word)
if pos_word in dictionary]
except Watchdog:
return "WORD IS TOO COMPLEX: %s" % (word)
if not len(matches):
return "NO SUGGESTION: %s" % (word)
else:
return matches[0]
# This function is not actually used.
def var_recursive(word):
def gen_set(char):
words = set()
for postfix in var_recursive(word[1:]):
for c in (char.lower(), char.upper()):
words.add(c + postfix)
return words
if not len(word):
return set([""])
else:
words = set()
char = word[0]
next_char = word[1] if len(word) >= 2 else None
if char == next_char:
words |= var_recursive(word[1:])
if char.lower() in VOWELS:
for v in VOWELS:
words |= gen_set(v)
else:
words |= gen_set(char)
return words
def var_iterative(word):
word_vars = {''}
prev_char = ''
for char in word:
chars = {char, char.swapcase()}
if char == prev_char:
chars |= set([''])
if char.lower() in VOWELS:
chars |= set(VOWELS)
word_vars = {prefix + c for prefix in word_vars for c in chars
if dictionary.has_prefix(prefix + c)}
prev_char = char
return word_vars
def mangle(word):
case_prob = 0.6
dup_prob = 0.1
vowel_prob = 0.3
max_dups = 3
mangled_word = []
for char in word:
if random.random() < case_prob:
char = char.swapcase()
if random.random() < dup_prob:
char = char * random.randint(2, max_dups)
elif char in VOWELS and random.random() < vowel_prob:
char = random.choice(list(VOWELS))
mangled_word.append(char)
return ''.join(mangled_word)
def create_dictionary():
with open(DICTIONARY_FILE) as f:
global dictionary
dictionary = Trie()
for word in f:
dictionary.add(word.strip())
def spell_correct(quiet):
prompt = '' if quiet else '>'
create_dictionary()
while True:
try:
word = raw_input(prompt)
print find_match(word)
sys.stdout.flush()
except (EOFError, KeyboardInterrupt):
print ""
break
def spell_mangle():
with open(DICTIONARY_FILE) as f:
dictionary = {word.strip() for word in f}
while True:
try:
word = random.sample(dictionary, 1)[0]
print mangle(word)
sys.stdout.flush()
except KeyboardInterrupt:
break
def parse_args():
parser = optparse.OptionParser()
parser.add_option("-m", "--mangle",
action="store_true",
default=False,
dest="mangle",
help="generate misspelt words",
)
parser.add_option("-q", "--quiet",
action="store_true",
default=False,
dest="quiet",
help="do not print '>' character at start of each line",
)
return parser.parse_args()
def main():
options, args = parse_args()
if options.mangle:
spell_mangle()
else:
spell_correct(options.quiet)
if __name__ == "__main__":
main()
|
999,761 | f06b855391a56d1bab0e1d76f89d2f97caf0ebf5 | # _*_ coding: utf-8 _*_
#!c:/Python36
#Filename: ReportPage.py
from test_case.page_obj.LoginPage import LoginPage
from test_case.page_obj.SearchPage import SearchPage
# from test_case.page_obj.ReportPage import ReportPage
from test_case.page_obj.MenuBar import MenuBar
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchAttributeException, NoSuchElementException
from selenium.webdriver.support import select
import time
class EditPage(SearchPage):
url=''
if __name__ == '__main__':
webdriver = webdriver.Firefox()
webdriver.maximize_window()
webdriver.implicitly_wait(10)
login_page = LoginPage(webdriver)
login_page.login()
menu_bar = MenuBar(webdriver)
menu_bar.wait_UI(menu_bar.menu_button_loc)
menu_bar.action_toggle_menu()
time.sleep(1)
menu_bar.action_expand_app_group('Supply Chain Advantage')
menu_bar.action_expand_menu('Advantage Dashboard')
menu_bar.action_expand_menu('Receiving')
menu_bar.action_expand_menu('Unknown Receipts')
menu_bar.action_expand_menu('Resolve Unknown Receipts', False)
reportPage = ReportPage(webdriver)
reportPage.wait_page('Resolve Unknown Receipts')
time.sleep(3)
reportPage.action_cell_click(1, 1)
editPage = SearchPage(webdriver)
editPage.wait_page('Edit Resovle Unknown Receipt')
# print(editPage.get_all_labels_name(2))
editPage.action_dropdown_select('Carrier Name','DHL', 1)
editPage.action_dropdown_select('Status','Resolved',2)
editPage.action_multiedit_input('Resolution Comment', 'comment',2)
editPage.action_page_click_button('Delete')
print (editPage.get_infodialog_header())
print (editPage.get_infodialog_title())
print(editPage.get_infodialog_message())
editPage.action_infodialog_click_button('Cancel')
|
999,762 | 3fa38c34800bdd3c28c078140bad33fee861306b | #! /bin/usr/env python3
#
# Author: Dal Whelpley
# Project: The following statement calls a function named half, which returns
# a value that is halfthat of the argument. (Assume the number
# variable references a float value.)Writecode for the function.
# result = half(number)
# Date:5/7/2019
def half(num):
num /= 2
return num
def main():
num = int(input("Enter a nunmber to he halfed: "))
print("Your number halfed is {:.2f}".format(half(num)))
if __name__ == "__main__":
main() |
999,763 | 4def274583f736600c131f95c5ec0ac67a463aca | import discord
from discord.ext import commands
import time
from synthesize import synthesize
import multiprocessing
if __name__ == '__main__':
TOKEN = # Stream Key Goes Here
client = commands.Bot(command_prefix='!')
@client.event
async def on_ready():
print('Bot online.')
@client.command(pass_context = True)
async def make(ctx,*, sentance):
sen = str(sentance)
print(sen)
p1 = multiprocessing.Process(target = synthesize, args=(sen,))
p1.start()
p1.join()
await ctx.send("Ready")
@client.command(pass_context = True)
async def say(ctx):
channel = ctx.message.author.voice.channel
vc = await channel.connect()
guild = ctx.message.guild
voice_client = guild.voice_client
voice_client.play(discord.FFmpegPCMAudio(executable='C:/Users/Leighton Waters/Documents/ffmpeg/bin/ffmpeg.exe',source='C:/Users/Leighton Waters/Desktop/WOWSC/TeoBot/1.wav'), after=lambda e: print('done', e))
while voice_client.is_playing():
time.sleep(1)
voice_client.stop()
await voice_client.disconnect()
client.run(TOKEN)
|
999,764 | 701adc24e8181461e65515164f82c8c396fddb37 | lista = list()
while True:
continuar = str(input("Quer continuar? S ou N ")).upper()
if continuar == "S":
lista.append(int(input("Digite um número: ")))
else:
if continuar == "N":
break
print(f"Foram digitados: {len(lista)} números")
lista.sort(reverse = True)
print(lista)
if 5 in lista:
print("O valor 5 foi digitado e está na lista!")
else: print("O valor 5 não foi digitado!") |
999,765 | f91498d98e5da2d41760f6e8739d1c38da3ef719 | SIMPLE_SETTINGS = {
'OVERRIDE_BY_ENV': True
}
MY_VAR = u'Some Value'
|
999,766 | dbfcf7bc6639c1ce8497d65cd601bd2aa908b8c5 | import base128variant
import zigzag
from core import WireType, ValueType
class String(ValueType):
@classmethod
def get_wire_type(cls):
return WireType.LENGHT_DELIMITED
@classmethod
def new_default_value(cls):
return ""
@classmethod
def dumps_value(cls, value):
return '"{0}"'.format(value)
@classmethod
def pack_value(cls, value):
return base128variant.pack(len(value)) + value
@classmethod
def unpack_stream(cls, bytes, offset):
value_len, offset = base128variant.unpack(bytes, offset)
value = bytes[offset:offset + value_len]
offset += value_len
return value, offset
class Integer(ValueType):
@classmethod
def get_wire_type(cls):
return WireType.VARIANT
@classmethod
def new_default_value(cls):
return 0
@classmethod
def pack_value(cls, value):
return base128variant.pack(zigzag.encode(value))
@classmethod
def unpack_stream(cls, bytes, offset):
ret, offset = base128variant.unpack(bytes, offset)
return zigzag.decode(ret), offset
class Unsigned(ValueType):
@classmethod
def get_wire_type(cls):
return WireType.VARIANT
@classmethod
def new_default_value(cls):
return 0
@classmethod
def pack_value(cls, value):
assert(value >= 0)
return base128variant.pack(value)
@classmethod
def unpack_stream(cls, bytes, offset):
ret, offset = base128variant.unpack(bytes, offset)
return ret, offset
class Enum(Integer):
pass |
999,767 | b1c8a2cebb4b03ebadb2c60b3c1438d8bf36fb02 | import os
from datetime import date
import configparser
import requests
from bs4 import BeautifulSoup as bs
from slackclient import SlackClient
#slack_channel = "CEKB88A1Y"
slack_channel = "GDQ7JPD8U"
BASE_URL = "https://dilbert.com/strip/"
NEWEST_COMIC = date.today()
def get_today():
"""
Returns today's date
"""
today = date.today()
return today
def get_comic_strip_url(start_date):
"""
Outputs the comic strip date url in the https://dilbert.com/YYYY-MM-DD format
"""
full_url = []
full_url.append(BASE_URL+str(start_date))
return full_url
def get_image_comic_url(session, response):
"""
Fetches the comic strip image source url based on the strip url
"""
soup = bs(response.text, 'lxml')
for div in soup.find_all('div', class_="img-comic-container"):
for a in div.find_all('a', class_="img-comic-link"):
for img in a.find_all('img', src=True):
return "https:" + img['src']
def download_dilbert(s, u):
"""
Downloads and saves the comic strip
"""
with open("comicfile.jpg", "wb") as file:
response = s.get(u)
file.write(response.content)
def post_to_slack(slack_client, comic):
slack_client.api_call("chat.postMessage", channel=slack_channel, text=comic, as_user = True)
def download_engine(fcsd): #fcsd = first comic strip date
"""
Based on the strip url, fetches the comic image source and downloads it
"""
url_list = get_comic_strip_url(fcsd)
for url in url_list:
session = requests.Session()
response = session.get(url)
download_url = get_image_comic_url(session, response)
# download_dilbert(session, download_url)
return download_url
def load_config(config_file, config_section):
# dir_path = os.path.dirname(os.path.relpath('config.ini'))
dir_path = os.path.dirname(os.path.realpath(__file__))
# dir_path = os.path.abspath('.')
if os.path.isfile(dir_path + '/' + config_file):
config = configparser.ConfigParser()
config.read(config_file)
slack_token = config.get(config_section, 'token')
else:
slack_token = os.environ['token']
return slack_token
def main():
config_file = 'config.ini'
config_section = 'dev'
slack_token = load_config(config_file, config_section)
slack_client = SlackClient(slack_token)
comic = download_engine(get_today())
post_to_slack(slack_client,comic)
if __name__ == '__main__':
main()
|
999,768 | 9796ff581988fb6c8fb54231ea413b4cea23d05d | '''
produce letter-3-gram representations
'''
with open('../../dataset/chunk/train.txt') as tr:
train_sets = tr.read().split('\n')
with open('../../dataset/chunk/test.txt') as te:
test_sets = te.read().split('\n')
l3g = {}
for data in [train_sets, test_sets]:
for word_pos_chunk in data:
if word_pos_chunk:
word, tag, chunktag = word_pos_chunk.split()
word = '#'+word.lower()+'#'
word_lenght = len(word) - 2
for i in range(word_lenght):
if word[i:i+3] in l3g:
l3g[word[i:i+3]] += 1
else:
l3g[word[i:i+3]] = 1
sort_l3g = sorted(l3g.items(), key=lambda x:x[1], reverse=True)
a = open('./l3g.txt', 'w')
b = open('./sorted_l3g.txt', 'w')
for each in l3g:
a.write(each+'\n')
for each in sort_l3g:
b.write(str(each)+'\n')
|
999,769 | fbb30c3109d3a7e58e70227b5270a7fe5bd366f6 | import torch
from torch import nn
import torchvision.datasets as dset
import numpy as np
import logging
import argparse
import time
import os
from model import EDNetV2 as EDNet
from trainer import Trainer
from data import get_ds
from utils import _logger, _set_file
from my_utils import ModelTools
from torch.utils.data.sampler import SubsetRandomSampler
class Config(object):
num_cls_used = 0
init_theta = 1.0
alpha = 0.2
beta = 0.6
speed_f = './speed_cpu.txt'
w_lr = 0.001 #0.65 #0.1
w_mom = 0.9
w_wd = 3e-5 #1e-4
t_lr = 0.01
t_wd = 5e-4
t_beta = (0.9, 0.999)
init_temperature = 5.0
temperature_decay = 0.956
model_save_path = '/data/limingyao/model/nas/ednas/'
total_epoch = 250
start_w_epoch = 2
train_portion = 0.8
width_mult = 0.75
kernel_size = [3, 5, 7]
target_lat = 260e3
valid_interval = 1
lr_scheduler_params = {
'logger' : _logger,
'T_max' : 400,
'alpha' : 1e-4,
'warmup_step' : 100,
't_mul' : 1.5,
'lr_mul' : 0.98,
}
config = Config()
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Train a model with data parallel for base net \
and model parallel for classify net.")
parser.add_argument('--batch-size', type=int, default=256,
help='training batch size of all devices.')
parser.add_argument('--epochs', type=int, default=200,
help='number of training epochs.')
parser.add_argument('--log-frequence', type=int, default=400,
help='log frequence, default is 400')
parser.add_argument('--gpus', type=str, default='0',
help='gpus, default is 0')
parser.add_argument('--load-model-path', type=str, default=None,
help='re_train, default is None')
parser.add_argument('--num-workers', type=int, default=4,
help='number of subprocesses used to fetch data, default is 4')
args = parser.parse_args()
args.model_save_path = '%s/%s/' % \
(config.model_save_path, time.strftime('%Y-%m-%d', time.localtime(time.time())))
if not os.path.exists(args.model_save_path):
_logger.warn("{} not exists, create it".format(args.model_save_path))
os.makedirs(args.model_save_path)
_set_file(args.model_save_path + 'log.log')
import torchvision.transforms as transforms
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
train_data = dset.CIFAR10(root='/data/limingyao/.torch/datasets', train=True,
download=False, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(config.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=16)
val_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=SubsetRandomSampler(indices[split:]),
pin_memory=True, num_workers=8)
model = EDNet(num_classes=config.num_cls_used if config.num_cls_used > 0 else 10,)
if args.load_model_path is not None:
ModelTools.load_model(model, args.load_model_path)
trainer = Trainer(network=model,
w_lr=config.w_lr,
w_mom=config.w_mom,
w_wd=config.w_wd,
init_temperature=config.init_temperature,
temperature_decay=config.temperature_decay,
logger=_logger,
lr_scheduler=lr_scheduler_params,
gpus=args.gpus,
model_save_path=args.model_save_path)
trainer.search(train_queue, val_queue,
total_epoch=config.total_epoch,
start_w_epoch=config.start_w_epoch,
log_frequence=args.log_frequence)
|
999,770 | 6ff4df29b8aafe240bd60f0c3ff861bda81ca0be | # https://leetcode.com/problems/water-bottles/submissions/
class Solution:
def numWaterBottles(self, numBottles: int, numExchange: int) -> int:
total = numBottles
while numBottles >= numExchange:
(drunk, empty) = divmod(numBottles, numExchange)
total += drunk
numBottles = drunk + empty
return total
def main():
sol = Solution()
print(sol.numWaterBottles(15, 4))
if __name__ == '__main__':
main()
|
999,771 | f13427904a4672cfd442e92b0a1b1c56919fa8e0 | __author__ = 'zhaoyimeng'
X = 88
def func():
global X
X = 88
|
999,772 | 8d2d7eb052591d94bac8556179aa4deb5d065690 | #------------------------------------------------------------------------------
# filename : duplicate.py
# author : Ki-Hwan Kim (kh.kim@kiaps.org)
# affilation: KIAPS (Korea Institute of Atmospheric Prediction Systems)
# update : 2014.3.25 start
# 2016.8.25 fix the relative import path
#
#
# description:
# check the duplicated points
#
# subroutines:
# duplicate_idxs()
# remove_duplicates()
#------------------------------------------------------------------------------
import numpy as np
import sys
from os.path import abspath, dirname
current_dpath = dirname(abspath(__file__))
sys.path.extend([current_dpath,dirname(current_dpath)])
from misc.compare_float import feq
def duplicate_idxs(xyzs, digit=15):
size = len(xyzs)
dup_idxs = list()
for i in range(size):
for j in range(i+1,size):
x1, y1, z1 = xyzs[i]
x2, y2, z2 = xyzs[j]
if feq(x1,x2,digit) and feq(y1,y2,digit) and feq(z1,z2,digit):
dup_idxs.append(j)
return dup_idxs
def remove_duplicates(xyzs, digit=15):
dup_idxs = duplicate_idxs(xyzs, digit)
unique_xyzs = list()
for seq, xyz in enumerate(xyzs):
if seq not in dup_idxs:
unique_xyzs.append(xyz)
return unique_xyzs
|
999,773 | 8934dc32c6cde06f0569d0590f761cb01ffb553f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AiDatasetEncryptionSpec',
'AiEndpointDeployedModel',
'AiEndpointDeployedModelAutomaticResource',
'AiEndpointDeployedModelDedicatedResource',
'AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec',
'AiEndpointDeployedModelDedicatedResourceMachineSpec',
'AiEndpointDeployedModelPrivateEndpoint',
'AiEndpointEncryptionSpec',
'AiFeatureStoreEncryptionSpec',
'AiFeatureStoreEntityTypeIamBindingCondition',
'AiFeatureStoreEntityTypeIamMemberCondition',
'AiFeatureStoreEntityTypeMonitoringConfig',
'AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig',
'AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis',
'AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig',
'AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis',
'AiFeatureStoreIamBindingCondition',
'AiFeatureStoreIamMemberCondition',
'AiFeatureStoreOnlineServingConfig',
'AiFeatureStoreOnlineServingConfigScaling',
'AiIndexDeployedIndex',
'AiIndexIndexStat',
'AiIndexMetadata',
'AiIndexMetadataConfig',
'AiIndexMetadataConfigAlgorithmConfig',
'AiIndexMetadataConfigAlgorithmConfigBruteForceConfig',
'AiIndexMetadataConfigAlgorithmConfigTreeAhConfig',
'AiMetadataStoreEncryptionSpec',
'AiMetadataStoreState',
'AiTensorboardEncryptionSpec',
'GetAiIndexDeployedIndexResult',
'GetAiIndexIndexStatResult',
'GetAiIndexMetadataResult',
'GetAiIndexMetadataConfigResult',
'GetAiIndexMetadataConfigAlgorithmConfigResult',
'GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfigResult',
'GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult',
]
@pulumi.output_type
class AiDatasetEncryptionSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiDatasetEncryptionSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiDatasetEncryptionSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiDatasetEncryptionSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: Optional[str] = None):
"""
:param str kms_key_name: Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
if kms_key_name is not None:
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> Optional[str]:
"""
Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class AiEndpointDeployedModel(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticResources":
suggest = "automatic_resources"
elif key == "createTime":
suggest = "create_time"
elif key == "dedicatedResources":
suggest = "dedicated_resources"
elif key == "displayName":
suggest = "display_name"
elif key == "enableAccessLogging":
suggest = "enable_access_logging"
elif key == "enableContainerLogging":
suggest = "enable_container_logging"
elif key == "modelVersionId":
suggest = "model_version_id"
elif key == "privateEndpoints":
suggest = "private_endpoints"
elif key == "serviceAccount":
suggest = "service_account"
elif key == "sharedResources":
suggest = "shared_resources"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModel. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModel.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModel.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatic_resources: Optional[Sequence['outputs.AiEndpointDeployedModelAutomaticResource']] = None,
create_time: Optional[str] = None,
dedicated_resources: Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResource']] = None,
display_name: Optional[str] = None,
enable_access_logging: Optional[bool] = None,
enable_container_logging: Optional[bool] = None,
id: Optional[str] = None,
model: Optional[str] = None,
model_version_id: Optional[str] = None,
private_endpoints: Optional[Sequence['outputs.AiEndpointDeployedModelPrivateEndpoint']] = None,
service_account: Optional[str] = None,
shared_resources: Optional[str] = None):
"""
:param Sequence['AiEndpointDeployedModelAutomaticResourceArgs'] automatic_resources: (Output)
A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration.
Structure is documented below.
:param str create_time: (Output)
Output only. Timestamp when the DeployedModel was created.
:param Sequence['AiEndpointDeployedModelDedicatedResourceArgs'] dedicated_resources: (Output)
A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration.
Structure is documented below.
:param str display_name: Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
:param bool enable_access_logging: (Output)
These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that Stackdriver logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option.
:param bool enable_container_logging: (Output)
If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Stackdriver Logging. Only supported for custom-trained Models and AutoML Tabular Models.
:param str id: (Output)
The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/.
:param str model: (Output)
The name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint.
:param str model_version_id: (Output)
Output only. The version ID of the model that is deployed.
:param Sequence['AiEndpointDeployedModelPrivateEndpointArgs'] private_endpoints: (Output)
Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured.
Structure is documented below.
:param str service_account: (Output)
The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account.
:param str shared_resources: (Output)
The resource name of the shared DeploymentResourcePool to deploy on. Format: projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}
"""
if automatic_resources is not None:
pulumi.set(__self__, "automatic_resources", automatic_resources)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if dedicated_resources is not None:
pulumi.set(__self__, "dedicated_resources", dedicated_resources)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enable_access_logging is not None:
pulumi.set(__self__, "enable_access_logging", enable_access_logging)
if enable_container_logging is not None:
pulumi.set(__self__, "enable_container_logging", enable_container_logging)
if id is not None:
pulumi.set(__self__, "id", id)
if model is not None:
pulumi.set(__self__, "model", model)
if model_version_id is not None:
pulumi.set(__self__, "model_version_id", model_version_id)
if private_endpoints is not None:
pulumi.set(__self__, "private_endpoints", private_endpoints)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if shared_resources is not None:
pulumi.set(__self__, "shared_resources", shared_resources)
@property
@pulumi.getter(name="automaticResources")
def automatic_resources(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelAutomaticResource']]:
"""
(Output)
A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration.
Structure is documented below.
"""
return pulumi.get(self, "automatic_resources")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[str]:
"""
(Output)
Output only. Timestamp when the DeployedModel was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="dedicatedResources")
def dedicated_resources(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResource']]:
"""
(Output)
A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration.
Structure is documented below.
"""
return pulumi.get(self, "dedicated_resources")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="enableAccessLogging")
def enable_access_logging(self) -> Optional[bool]:
"""
(Output)
These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that Stackdriver logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option.
"""
return pulumi.get(self, "enable_access_logging")
@property
@pulumi.getter(name="enableContainerLogging")
def enable_container_logging(self) -> Optional[bool]:
"""
(Output)
If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Stackdriver Logging. Only supported for custom-trained Models and AutoML Tabular Models.
"""
return pulumi.get(self, "enable_container_logging")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
(Output)
The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def model(self) -> Optional[str]:
"""
(Output)
The name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint.
"""
return pulumi.get(self, "model")
@property
@pulumi.getter(name="modelVersionId")
def model_version_id(self) -> Optional[str]:
"""
(Output)
Output only. The version ID of the model that is deployed.
"""
return pulumi.get(self, "model_version_id")
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelPrivateEndpoint']]:
"""
(Output)
Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured.
Structure is documented below.
"""
return pulumi.get(self, "private_endpoints")
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[str]:
"""
(Output)
The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account.
"""
return pulumi.get(self, "service_account")
@property
@pulumi.getter(name="sharedResources")
def shared_resources(self) -> Optional[str]:
"""
(Output)
The resource name of the shared DeploymentResourcePool to deploy on. Format: projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}
"""
return pulumi.get(self, "shared_resources")
@pulumi.output_type
class AiEndpointDeployedModelAutomaticResource(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxReplicaCount":
suggest = "max_replica_count"
elif key == "minReplicaCount":
suggest = "min_replica_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModelAutomaticResource. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModelAutomaticResource.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModelAutomaticResource.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_replica_count: Optional[int] = None,
min_replica_count: Optional[int] = None):
"""
:param int max_replica_count: (Output)
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.
:param int min_replica_count: (Output)
The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.
"""
if max_replica_count is not None:
pulumi.set(__self__, "max_replica_count", max_replica_count)
if min_replica_count is not None:
pulumi.set(__self__, "min_replica_count", min_replica_count)
@property
@pulumi.getter(name="maxReplicaCount")
def max_replica_count(self) -> Optional[int]:
"""
(Output)
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.
"""
return pulumi.get(self, "max_replica_count")
@property
@pulumi.getter(name="minReplicaCount")
def min_replica_count(self) -> Optional[int]:
"""
(Output)
The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.
"""
return pulumi.get(self, "min_replica_count")
@pulumi.output_type
class AiEndpointDeployedModelDedicatedResource(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoscalingMetricSpecs":
suggest = "autoscaling_metric_specs"
elif key == "machineSpecs":
suggest = "machine_specs"
elif key == "maxReplicaCount":
suggest = "max_replica_count"
elif key == "minReplicaCount":
suggest = "min_replica_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModelDedicatedResource. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModelDedicatedResource.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModelDedicatedResource.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autoscaling_metric_specs: Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec']] = None,
machine_specs: Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceMachineSpec']] = None,
max_replica_count: Optional[int] = None,
min_replica_count: Optional[int] = None):
"""
:param Sequence['AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecArgs'] autoscaling_metric_specs: (Output)
The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`.
Structure is documented below.
:param Sequence['AiEndpointDeployedModelDedicatedResourceMachineSpecArgs'] machine_specs: (Output)
The specification of a single machine used by the prediction.
Structure is documented below.
:param int max_replica_count: (Output)
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.
:param int min_replica_count: (Output)
The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.
"""
if autoscaling_metric_specs is not None:
pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
if machine_specs is not None:
pulumi.set(__self__, "machine_specs", machine_specs)
if max_replica_count is not None:
pulumi.set(__self__, "max_replica_count", max_replica_count)
if min_replica_count is not None:
pulumi.set(__self__, "min_replica_count", min_replica_count)
@property
@pulumi.getter(name="autoscalingMetricSpecs")
def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec']]:
"""
(Output)
The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`.
Structure is documented below.
"""
return pulumi.get(self, "autoscaling_metric_specs")
@property
@pulumi.getter(name="machineSpecs")
def machine_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceMachineSpec']]:
"""
(Output)
The specification of a single machine used by the prediction.
Structure is documented below.
"""
return pulumi.get(self, "machine_specs")
@property
@pulumi.getter(name="maxReplicaCount")
def max_replica_count(self) -> Optional[int]:
"""
(Output)
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.
"""
return pulumi.get(self, "max_replica_count")
@property
@pulumi.getter(name="minReplicaCount")
def min_replica_count(self) -> Optional[int]:
"""
(Output)
The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.
"""
return pulumi.get(self, "min_replica_count")
@pulumi.output_type
class AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "metricName":
suggest = "metric_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
metric_name: Optional[str] = None,
target: Optional[int] = None):
"""
:param str metric_name: (Output)
The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
:param int target: (Output)
The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
"""
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[str]:
"""
(Output)
The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def target(self) -> Optional[int]:
"""
(Output)
The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
"""
return pulumi.get(self, "target")
@pulumi.output_type
class AiEndpointDeployedModelDedicatedResourceMachineSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acceleratorCount":
suggest = "accelerator_count"
elif key == "acceleratorType":
suggest = "accelerator_type"
elif key == "machineType":
suggest = "machine_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModelDedicatedResourceMachineSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModelDedicatedResourceMachineSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModelDedicatedResourceMachineSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
accelerator_count: Optional[int] = None,
accelerator_type: Optional[str] = None,
machine_type: Optional[str] = None):
"""
:param int accelerator_count: (Output)
The number of accelerators to attach to the machine.
:param str accelerator_type: (Output)
The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
:param str machine_type: (Output)
The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
"""
if accelerator_count is not None:
pulumi.set(__self__, "accelerator_count", accelerator_count)
if accelerator_type is not None:
pulumi.set(__self__, "accelerator_type", accelerator_type)
if machine_type is not None:
pulumi.set(__self__, "machine_type", machine_type)
@property
@pulumi.getter(name="acceleratorCount")
def accelerator_count(self) -> Optional[int]:
"""
(Output)
The number of accelerators to attach to the machine.
"""
return pulumi.get(self, "accelerator_count")
@property
@pulumi.getter(name="acceleratorType")
def accelerator_type(self) -> Optional[str]:
"""
(Output)
The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
"""
return pulumi.get(self, "accelerator_type")
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> Optional[str]:
"""
(Output)
The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
"""
return pulumi.get(self, "machine_type")
@pulumi.output_type
class AiEndpointDeployedModelPrivateEndpoint(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "explainHttpUri":
suggest = "explain_http_uri"
elif key == "healthHttpUri":
suggest = "health_http_uri"
elif key == "predictHttpUri":
suggest = "predict_http_uri"
elif key == "serviceAttachment":
suggest = "service_attachment"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointDeployedModelPrivateEndpoint. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointDeployedModelPrivateEndpoint.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointDeployedModelPrivateEndpoint.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
explain_http_uri: Optional[str] = None,
health_http_uri: Optional[str] = None,
predict_http_uri: Optional[str] = None,
service_attachment: Optional[str] = None):
"""
:param str explain_http_uri: (Output)
Output only. Http(s) path to send explain requests.
:param str health_http_uri: (Output)
Output only. Http(s) path to send health check requests.
:param str predict_http_uri: (Output)
Output only. Http(s) path to send prediction requests.
:param str service_attachment: (Output)
Output only. The name of the service attachment resource. Populated if private service connect is enabled.
"""
if explain_http_uri is not None:
pulumi.set(__self__, "explain_http_uri", explain_http_uri)
if health_http_uri is not None:
pulumi.set(__self__, "health_http_uri", health_http_uri)
if predict_http_uri is not None:
pulumi.set(__self__, "predict_http_uri", predict_http_uri)
if service_attachment is not None:
pulumi.set(__self__, "service_attachment", service_attachment)
@property
@pulumi.getter(name="explainHttpUri")
def explain_http_uri(self) -> Optional[str]:
"""
(Output)
Output only. Http(s) path to send explain requests.
"""
return pulumi.get(self, "explain_http_uri")
@property
@pulumi.getter(name="healthHttpUri")
def health_http_uri(self) -> Optional[str]:
"""
(Output)
Output only. Http(s) path to send health check requests.
"""
return pulumi.get(self, "health_http_uri")
@property
@pulumi.getter(name="predictHttpUri")
def predict_http_uri(self) -> Optional[str]:
"""
(Output)
Output only. Http(s) path to send prediction requests.
"""
return pulumi.get(self, "predict_http_uri")
@property
@pulumi.getter(name="serviceAttachment")
def service_attachment(self) -> Optional[str]:
"""
(Output)
Output only. The name of the service attachment resource. Populated if private service connect is enabled.
"""
return pulumi.get(self, "service_attachment")
@pulumi.output_type
class AiEndpointEncryptionSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiEndpointEncryptionSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiEndpointEncryptionSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiEndpointEncryptionSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: str):
"""
:param str kms_key_name: Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
"""
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class AiFeatureStoreEncryptionSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreEncryptionSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreEncryptionSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreEncryptionSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: str):
"""
:param str kms_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.
"""
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class AiFeatureStoreEntityTypeIamBindingCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class AiFeatureStoreEntityTypeIamMemberCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class AiFeatureStoreEntityTypeMonitoringConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "categoricalThresholdConfig":
suggest = "categorical_threshold_config"
elif key == "importFeaturesAnalysis":
suggest = "import_features_analysis"
elif key == "numericalThresholdConfig":
suggest = "numerical_threshold_config"
elif key == "snapshotAnalysis":
suggest = "snapshot_analysis"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreEntityTypeMonitoringConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreEntityTypeMonitoringConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreEntityTypeMonitoringConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
categorical_threshold_config: Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig'] = None,
import_features_analysis: Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis'] = None,
numerical_threshold_config: Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig'] = None,
snapshot_analysis: Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis'] = None):
"""
:param 'AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfigArgs' categorical_threshold_config: Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING).
Structure is documented below.
:param 'AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysisArgs' import_features_analysis: The config for ImportFeatures Analysis Based Feature Monitoring.
Structure is documented below.
:param 'AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfigArgs' numerical_threshold_config: Threshold for numerical features of anomaly detection. This is shared by all objectives of Featurestore Monitoring for numerical features (i.e. Features with type (Feature.ValueType) DOUBLE or INT64).
Structure is documented below.
:param 'AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysisArgs' snapshot_analysis: The config for Snapshot Analysis Based Feature Monitoring.
Structure is documented below.
"""
if categorical_threshold_config is not None:
pulumi.set(__self__, "categorical_threshold_config", categorical_threshold_config)
if import_features_analysis is not None:
pulumi.set(__self__, "import_features_analysis", import_features_analysis)
if numerical_threshold_config is not None:
pulumi.set(__self__, "numerical_threshold_config", numerical_threshold_config)
if snapshot_analysis is not None:
pulumi.set(__self__, "snapshot_analysis", snapshot_analysis)
@property
@pulumi.getter(name="categoricalThresholdConfig")
def categorical_threshold_config(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig']:
"""
Threshold for categorical features of anomaly detection. This is shared by all types of Featurestore Monitoring for categorical features (i.e. Features with type (Feature.ValueType) BOOL or STRING).
Structure is documented below.
"""
return pulumi.get(self, "categorical_threshold_config")
@property
@pulumi.getter(name="importFeaturesAnalysis")
def import_features_analysis(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis']:
"""
The config for ImportFeatures Analysis Based Feature Monitoring.
Structure is documented below.
"""
return pulumi.get(self, "import_features_analysis")
@property
@pulumi.getter(name="numericalThresholdConfig")
def numerical_threshold_config(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig']:
"""
Threshold for numerical features of anomaly detection. This is shared by all objectives of Featurestore Monitoring for numerical features (i.e. Features with type (Feature.ValueType) DOUBLE or INT64).
Structure is documented below.
"""
return pulumi.get(self, "numerical_threshold_config")
@property
@pulumi.getter(name="snapshotAnalysis")
def snapshot_analysis(self) -> Optional['outputs.AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis']:
"""
The config for Snapshot Analysis Based Feature Monitoring.
Structure is documented below.
"""
return pulumi.get(self, "snapshot_analysis")
@pulumi.output_type
class AiFeatureStoreEntityTypeMonitoringConfigCategoricalThresholdConfig(dict):
def __init__(__self__, *,
value: float):
"""
:param float value: Specify a threshold value that can trigger the alert. For categorical feature, the distribution distance is calculated by L-inifinity norm. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> float:
"""
Specify a threshold value that can trigger the alert. For categorical feature, the distribution distance is calculated by L-inifinity norm. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "anomalyDetectionBaseline":
suggest = "anomaly_detection_baseline"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreEntityTypeMonitoringConfigImportFeaturesAnalysis.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
anomaly_detection_baseline: Optional[str] = None,
state: Optional[str] = None):
"""
:param str anomaly_detection_baseline: Defines the baseline to do anomaly detection for feature values imported by each [entityTypes.importFeatureValues][] operation. The value must be one of the values below:
* LATEST_STATS: Choose the later one statistics generated by either most recent snapshot analysis or previous import features analysis. If non of them exists, skip anomaly detection and only generate a statistics.
* MOST_RECENT_SNAPSHOT_STATS: Use the statistics generated by the most recent snapshot analysis if exists.
* PREVIOUS_IMPORT_FEATURES_STATS: Use the statistics generated by the previous import features analysis if exists.
:param str state: Whether to enable / disable / inherite default hebavior for import features analysis. The value must be one of the values below:
* DEFAULT: The default behavior of whether to enable the monitoring. EntityType-level config: disabled.
* ENABLED: Explicitly enables import features analysis. EntityType-level config: by default enables import features analysis for all Features under it.
* DISABLED: Explicitly disables import features analysis. EntityType-level config: by default disables import features analysis for all Features under it.
"""
if anomaly_detection_baseline is not None:
pulumi.set(__self__, "anomaly_detection_baseline", anomaly_detection_baseline)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="anomalyDetectionBaseline")
def anomaly_detection_baseline(self) -> Optional[str]:
"""
Defines the baseline to do anomaly detection for feature values imported by each [entityTypes.importFeatureValues][] operation. The value must be one of the values below:
* LATEST_STATS: Choose the later one statistics generated by either most recent snapshot analysis or previous import features analysis. If non of them exists, skip anomaly detection and only generate a statistics.
* MOST_RECENT_SNAPSHOT_STATS: Use the statistics generated by the most recent snapshot analysis if exists.
* PREVIOUS_IMPORT_FEATURES_STATS: Use the statistics generated by the previous import features analysis if exists.
"""
return pulumi.get(self, "anomaly_detection_baseline")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
Whether to enable / disable / inherite default hebavior for import features analysis. The value must be one of the values below:
* DEFAULT: The default behavior of whether to enable the monitoring. EntityType-level config: disabled.
* ENABLED: Explicitly enables import features analysis. EntityType-level config: by default enables import features analysis for all Features under it.
* DISABLED: Explicitly disables import features analysis. EntityType-level config: by default disables import features analysis for all Features under it.
"""
return pulumi.get(self, "state")
@pulumi.output_type
class AiFeatureStoreEntityTypeMonitoringConfigNumericalThresholdConfig(dict):
def __init__(__self__, *,
value: float):
"""
:param float value: Specify a threshold value that can trigger the alert. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.
"""
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> float:
"""
Specify a threshold value that can trigger the alert. For numerical feature, the distribution distance is calculated by Jensen–Shannon divergence. Each feature must have a non-zero threshold if they need to be monitored. Otherwise no alert will be triggered for that feature. The default value is 0.3.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "monitoringInterval":
suggest = "monitoring_interval"
elif key == "monitoringIntervalDays":
suggest = "monitoring_interval_days"
elif key == "stalenessDays":
suggest = "staleness_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreEntityTypeMonitoringConfigSnapshotAnalysis.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disabled: Optional[bool] = None,
monitoring_interval: Optional[str] = None,
monitoring_interval_days: Optional[int] = None,
staleness_days: Optional[int] = None):
"""
:param bool disabled: The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoringInterval for Features under it.
:param int monitoring_interval_days: Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. The default value is 1.
If both FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days and [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][] are set when creating/updating EntityTypes/Features, FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days will be used.
:param int staleness_days: Customized export features time window for snapshot analysis. Unit is one day. The default value is 21 days. Minimum value is 1 day. Maximum value is 4000 days.
"""
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if monitoring_interval is not None:
pulumi.set(__self__, "monitoring_interval", monitoring_interval)
if monitoring_interval_days is not None:
pulumi.set(__self__, "monitoring_interval_days", monitoring_interval_days)
if staleness_days is not None:
pulumi.set(__self__, "staleness_days", staleness_days)
@property
@pulumi.getter
def disabled(self) -> Optional[bool]:
"""
The monitoring schedule for snapshot analysis. For EntityType-level config: unset / disabled = true indicates disabled by default for Features under it; otherwise by default enable snapshot analysis monitoring with monitoringInterval for Features under it.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="monitoringInterval")
def monitoring_interval(self) -> Optional[str]:
warnings.warn("""This field is unavailable in the GA provider and will be removed from the beta provider in a future release.""", DeprecationWarning)
pulumi.log.warn("""monitoring_interval is deprecated: This field is unavailable in the GA provider and will be removed from the beta provider in a future release.""")
return pulumi.get(self, "monitoring_interval")
@property
@pulumi.getter(name="monitoringIntervalDays")
def monitoring_interval_days(self) -> Optional[int]:
"""
Configuration of the snapshot analysis based monitoring pipeline running interval. The value indicates number of days. The default value is 1.
If both FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days and [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][] are set when creating/updating EntityTypes/Features, FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days will be used.
"""
return pulumi.get(self, "monitoring_interval_days")
@property
@pulumi.getter(name="stalenessDays")
def staleness_days(self) -> Optional[int]:
"""
Customized export features time window for snapshot analysis. Unit is one day. The default value is 21 days. Minimum value is 1 day. Maximum value is 4000 days.
"""
return pulumi.get(self, "staleness_days")
@pulumi.output_type
class AiFeatureStoreIamBindingCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class AiFeatureStoreIamMemberCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class AiFeatureStoreOnlineServingConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fixedNodeCount":
suggest = "fixed_node_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreOnlineServingConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreOnlineServingConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreOnlineServingConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
fixed_node_count: Optional[int] = None,
scaling: Optional['outputs.AiFeatureStoreOnlineServingConfigScaling'] = None):
"""
:param int fixed_node_count: The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.
:param 'AiFeatureStoreOnlineServingConfigScalingArgs' scaling: Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.
Structure is documented below.
"""
if fixed_node_count is not None:
pulumi.set(__self__, "fixed_node_count", fixed_node_count)
if scaling is not None:
pulumi.set(__self__, "scaling", scaling)
@property
@pulumi.getter(name="fixedNodeCount")
def fixed_node_count(self) -> Optional[int]:
"""
The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.
"""
return pulumi.get(self, "fixed_node_count")
@property
@pulumi.getter
def scaling(self) -> Optional['outputs.AiFeatureStoreOnlineServingConfigScaling']:
"""
Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.
Structure is documented below.
"""
return pulumi.get(self, "scaling")
@pulumi.output_type
class AiFeatureStoreOnlineServingConfigScaling(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxNodeCount":
suggest = "max_node_count"
elif key == "minNodeCount":
suggest = "min_node_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiFeatureStoreOnlineServingConfigScaling. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiFeatureStoreOnlineServingConfigScaling.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiFeatureStoreOnlineServingConfigScaling.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_node_count: int,
min_node_count: int):
"""
:param int max_node_count: The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.
:param int min_node_count: The minimum number of nodes to scale down to. Must be greater than or equal to 1.
"""
pulumi.set(__self__, "max_node_count", max_node_count)
pulumi.set(__self__, "min_node_count", min_node_count)
@property
@pulumi.getter(name="maxNodeCount")
def max_node_count(self) -> int:
"""
The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.
"""
return pulumi.get(self, "max_node_count")
@property
@pulumi.getter(name="minNodeCount")
def min_node_count(self) -> int:
"""
The minimum number of nodes to scale down to. Must be greater than or equal to 1.
"""
return pulumi.get(self, "min_node_count")
@pulumi.output_type
class AiIndexDeployedIndex(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deployedIndexId":
suggest = "deployed_index_id"
elif key == "indexEndpoint":
suggest = "index_endpoint"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexDeployedIndex. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexDeployedIndex.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexDeployedIndex.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
deployed_index_id: Optional[str] = None,
index_endpoint: Optional[str] = None):
"""
:param str deployed_index_id: (Output)
The ID of the DeployedIndex in the above IndexEndpoint.
:param str index_endpoint: (Output)
A resource name of the IndexEndpoint.
"""
if deployed_index_id is not None:
pulumi.set(__self__, "deployed_index_id", deployed_index_id)
if index_endpoint is not None:
pulumi.set(__self__, "index_endpoint", index_endpoint)
@property
@pulumi.getter(name="deployedIndexId")
def deployed_index_id(self) -> Optional[str]:
"""
(Output)
The ID of the DeployedIndex in the above IndexEndpoint.
"""
return pulumi.get(self, "deployed_index_id")
@property
@pulumi.getter(name="indexEndpoint")
def index_endpoint(self) -> Optional[str]:
"""
(Output)
A resource name of the IndexEndpoint.
"""
return pulumi.get(self, "index_endpoint")
@pulumi.output_type
class AiIndexIndexStat(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "shardsCount":
suggest = "shards_count"
elif key == "vectorsCount":
suggest = "vectors_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexIndexStat. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexIndexStat.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexIndexStat.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
shards_count: Optional[int] = None,
vectors_count: Optional[str] = None):
"""
:param int shards_count: (Output)
The number of shards in the Index.
:param str vectors_count: (Output)
The number of vectors in the Index.
"""
if shards_count is not None:
pulumi.set(__self__, "shards_count", shards_count)
if vectors_count is not None:
pulumi.set(__self__, "vectors_count", vectors_count)
@property
@pulumi.getter(name="shardsCount")
def shards_count(self) -> Optional[int]:
"""
(Output)
The number of shards in the Index.
"""
return pulumi.get(self, "shards_count")
@property
@pulumi.getter(name="vectorsCount")
def vectors_count(self) -> Optional[str]:
"""
(Output)
The number of vectors in the Index.
"""
return pulumi.get(self, "vectors_count")
@pulumi.output_type
class AiIndexMetadata(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentsDeltaUri":
suggest = "contents_delta_uri"
elif key == "isCompleteOverwrite":
suggest = "is_complete_overwrite"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexMetadata. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexMetadata.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexMetadata.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
config: Optional['outputs.AiIndexMetadataConfig'] = None,
contents_delta_uri: Optional[str] = None,
is_complete_overwrite: Optional[bool] = None):
"""
:param 'AiIndexMetadataConfigArgs' config: The configuration of the Matching Engine Index.
Structure is documented below.
:param str contents_delta_uri: Allows inserting, updating or deleting the contents of the Matching Engine Index.
The string must be a valid Cloud Storage directory path. If this
field is set when calling IndexService.UpdateIndex, then no other
Index field can be also updated as part of the same call.
The expected structure and format of the files this URI points to is
described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format
:param bool is_complete_overwrite: If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex,
then existing content of the Index will be replaced by the data from the contentsDeltaUri.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if contents_delta_uri is not None:
pulumi.set(__self__, "contents_delta_uri", contents_delta_uri)
if is_complete_overwrite is not None:
pulumi.set(__self__, "is_complete_overwrite", is_complete_overwrite)
@property
@pulumi.getter
def config(self) -> Optional['outputs.AiIndexMetadataConfig']:
"""
The configuration of the Matching Engine Index.
Structure is documented below.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="contentsDeltaUri")
def contents_delta_uri(self) -> Optional[str]:
"""
Allows inserting, updating or deleting the contents of the Matching Engine Index.
The string must be a valid Cloud Storage directory path. If this
field is set when calling IndexService.UpdateIndex, then no other
Index field can be also updated as part of the same call.
The expected structure and format of the files this URI points to is
described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format
"""
return pulumi.get(self, "contents_delta_uri")
@property
@pulumi.getter(name="isCompleteOverwrite")
def is_complete_overwrite(self) -> Optional[bool]:
"""
If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex,
then existing content of the Index will be replaced by the data from the contentsDeltaUri.
"""
return pulumi.get(self, "is_complete_overwrite")
@pulumi.output_type
class AiIndexMetadataConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "algorithmConfig":
suggest = "algorithm_config"
elif key == "approximateNeighborsCount":
suggest = "approximate_neighbors_count"
elif key == "distanceMeasureType":
suggest = "distance_measure_type"
elif key == "featureNormType":
suggest = "feature_norm_type"
elif key == "shardSize":
suggest = "shard_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexMetadataConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexMetadataConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexMetadataConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dimensions: int,
algorithm_config: Optional['outputs.AiIndexMetadataConfigAlgorithmConfig'] = None,
approximate_neighbors_count: Optional[int] = None,
distance_measure_type: Optional[str] = None,
feature_norm_type: Optional[str] = None,
shard_size: Optional[str] = None):
"""
:param int dimensions: The number of dimensions of the input vectors.
:param 'AiIndexMetadataConfigAlgorithmConfigArgs' algorithm_config: The configuration with regard to the algorithms used for efficient search.
Structure is documented below.
:param int approximate_neighbors_count: The default number of neighbors to find via approximate search before exact reordering is
performed. Exact reordering is a procedure where results returned by an
approximate search algorithm are reordered via a more expensive distance computation.
Required if tree-AH algorithm is used.
:param str distance_measure_type: The distance measure used in nearest neighbor search. The value must be one of the followings:
* SQUARED_L2_DISTANCE: Euclidean (L_2) Distance
* L1_DISTANCE: Manhattan (L_1) Distance
* COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity.
* DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product
:param str feature_norm_type: Type of normalization to be carried out on each vector. The value must be one of the followings:
* UNIT_L2_NORM: Unit L2 normalization type
* NONE: No normalization type is specified.
:param str shard_size: Index data is split into equal parts to be processed. These are called "shards".
The shard size must be specified when creating an index. The value must be one of the followings:
* SHARD_SIZE_SMALL: Small (2GB)
* SHARD_SIZE_MEDIUM: Medium (20GB)
* SHARD_SIZE_LARGE: Large (50GB)
"""
pulumi.set(__self__, "dimensions", dimensions)
if algorithm_config is not None:
pulumi.set(__self__, "algorithm_config", algorithm_config)
if approximate_neighbors_count is not None:
pulumi.set(__self__, "approximate_neighbors_count", approximate_neighbors_count)
if distance_measure_type is not None:
pulumi.set(__self__, "distance_measure_type", distance_measure_type)
if feature_norm_type is not None:
pulumi.set(__self__, "feature_norm_type", feature_norm_type)
if shard_size is not None:
pulumi.set(__self__, "shard_size", shard_size)
@property
@pulumi.getter
def dimensions(self) -> int:
"""
The number of dimensions of the input vectors.
"""
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="algorithmConfig")
def algorithm_config(self) -> Optional['outputs.AiIndexMetadataConfigAlgorithmConfig']:
"""
The configuration with regard to the algorithms used for efficient search.
Structure is documented below.
"""
return pulumi.get(self, "algorithm_config")
@property
@pulumi.getter(name="approximateNeighborsCount")
def approximate_neighbors_count(self) -> Optional[int]:
"""
The default number of neighbors to find via approximate search before exact reordering is
performed. Exact reordering is a procedure where results returned by an
approximate search algorithm are reordered via a more expensive distance computation.
Required if tree-AH algorithm is used.
"""
return pulumi.get(self, "approximate_neighbors_count")
@property
@pulumi.getter(name="distanceMeasureType")
def distance_measure_type(self) -> Optional[str]:
"""
The distance measure used in nearest neighbor search. The value must be one of the followings:
* SQUARED_L2_DISTANCE: Euclidean (L_2) Distance
* L1_DISTANCE: Manhattan (L_1) Distance
* COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity.
* DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product
"""
return pulumi.get(self, "distance_measure_type")
@property
@pulumi.getter(name="featureNormType")
def feature_norm_type(self) -> Optional[str]:
"""
Type of normalization to be carried out on each vector. The value must be one of the followings:
* UNIT_L2_NORM: Unit L2 normalization type
* NONE: No normalization type is specified.
"""
return pulumi.get(self, "feature_norm_type")
@property
@pulumi.getter(name="shardSize")
def shard_size(self) -> Optional[str]:
"""
Index data is split into equal parts to be processed. These are called "shards".
The shard size must be specified when creating an index. The value must be one of the followings:
* SHARD_SIZE_SMALL: Small (2GB)
* SHARD_SIZE_MEDIUM: Medium (20GB)
* SHARD_SIZE_LARGE: Large (50GB)
"""
return pulumi.get(self, "shard_size")
@pulumi.output_type
class AiIndexMetadataConfigAlgorithmConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bruteForceConfig":
suggest = "brute_force_config"
elif key == "treeAhConfig":
suggest = "tree_ah_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexMetadataConfigAlgorithmConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexMetadataConfigAlgorithmConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexMetadataConfigAlgorithmConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
brute_force_config: Optional['outputs.AiIndexMetadataConfigAlgorithmConfigBruteForceConfig'] = None,
tree_ah_config: Optional['outputs.AiIndexMetadataConfigAlgorithmConfigTreeAhConfig'] = None):
"""
:param 'AiIndexMetadataConfigAlgorithmConfigBruteForceConfigArgs' brute_force_config: Configuration options for using brute force search, which simply implements the
standard linear search in the database for each query.
:param 'AiIndexMetadataConfigAlgorithmConfigTreeAhConfigArgs' tree_ah_config: Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing).
Please refer to this paper for more details: https://arxiv.org/abs/1908.10396
Structure is documented below.
"""
if brute_force_config is not None:
pulumi.set(__self__, "brute_force_config", brute_force_config)
if tree_ah_config is not None:
pulumi.set(__self__, "tree_ah_config", tree_ah_config)
@property
@pulumi.getter(name="bruteForceConfig")
def brute_force_config(self) -> Optional['outputs.AiIndexMetadataConfigAlgorithmConfigBruteForceConfig']:
"""
Configuration options for using brute force search, which simply implements the
standard linear search in the database for each query.
"""
return pulumi.get(self, "brute_force_config")
@property
@pulumi.getter(name="treeAhConfig")
def tree_ah_config(self) -> Optional['outputs.AiIndexMetadataConfigAlgorithmConfigTreeAhConfig']:
"""
Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing).
Please refer to this paper for more details: https://arxiv.org/abs/1908.10396
Structure is documented below.
"""
return pulumi.get(self, "tree_ah_config")
@pulumi.output_type
class AiIndexMetadataConfigAlgorithmConfigBruteForceConfig(dict):
def __init__(__self__):
pass
@pulumi.output_type
class AiIndexMetadataConfigAlgorithmConfigTreeAhConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "leafNodeEmbeddingCount":
suggest = "leaf_node_embedding_count"
elif key == "leafNodesToSearchPercent":
suggest = "leaf_nodes_to_search_percent"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiIndexMetadataConfigAlgorithmConfigTreeAhConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiIndexMetadataConfigAlgorithmConfigTreeAhConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiIndexMetadataConfigAlgorithmConfigTreeAhConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
leaf_node_embedding_count: Optional[int] = None,
leaf_nodes_to_search_percent: Optional[int] = None):
"""
:param int leaf_node_embedding_count: Number of embeddings on each leaf node. The default value is 1000 if not set.
:param int leaf_nodes_to_search_percent: The default percentage of leaf nodes that any query may be searched. Must be in
range 1-100, inclusive. The default value is 10 (means 10%) if not set.
"""
if leaf_node_embedding_count is not None:
pulumi.set(__self__, "leaf_node_embedding_count", leaf_node_embedding_count)
if leaf_nodes_to_search_percent is not None:
pulumi.set(__self__, "leaf_nodes_to_search_percent", leaf_nodes_to_search_percent)
@property
@pulumi.getter(name="leafNodeEmbeddingCount")
def leaf_node_embedding_count(self) -> Optional[int]:
"""
Number of embeddings on each leaf node. The default value is 1000 if not set.
"""
return pulumi.get(self, "leaf_node_embedding_count")
@property
@pulumi.getter(name="leafNodesToSearchPercent")
def leaf_nodes_to_search_percent(self) -> Optional[int]:
"""
The default percentage of leaf nodes that any query may be searched. Must be in
range 1-100, inclusive. The default value is 10 (means 10%) if not set.
"""
return pulumi.get(self, "leaf_nodes_to_search_percent")
@pulumi.output_type
class AiMetadataStoreEncryptionSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiMetadataStoreEncryptionSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiMetadataStoreEncryptionSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiMetadataStoreEncryptionSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: Optional[str] = None):
"""
:param str kms_key_name: Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
if kms_key_name is not None:
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> Optional[str]:
"""
Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class AiMetadataStoreState(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskUtilizationBytes":
suggest = "disk_utilization_bytes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiMetadataStoreState. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiMetadataStoreState.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiMetadataStoreState.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_utilization_bytes: Optional[str] = None):
"""
:param str disk_utilization_bytes: (Output)
The disk utilization of the MetadataStore in bytes.
"""
if disk_utilization_bytes is not None:
pulumi.set(__self__, "disk_utilization_bytes", disk_utilization_bytes)
@property
@pulumi.getter(name="diskUtilizationBytes")
def disk_utilization_bytes(self) -> Optional[str]:
"""
(Output)
The disk utilization of the MetadataStore in bytes.
"""
return pulumi.get(self, "disk_utilization_bytes")
@pulumi.output_type
class AiTensorboardEncryptionSpec(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AiTensorboardEncryptionSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AiTensorboardEncryptionSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AiTensorboardEncryptionSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: str):
"""
:param str kms_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource.
Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class GetAiIndexDeployedIndexResult(dict):
def __init__(__self__, *,
deployed_index_id: str,
index_endpoint: str):
pulumi.set(__self__, "deployed_index_id", deployed_index_id)
pulumi.set(__self__, "index_endpoint", index_endpoint)
@property
@pulumi.getter(name="deployedIndexId")
def deployed_index_id(self) -> str:
return pulumi.get(self, "deployed_index_id")
@property
@pulumi.getter(name="indexEndpoint")
def index_endpoint(self) -> str:
return pulumi.get(self, "index_endpoint")
@pulumi.output_type
class GetAiIndexIndexStatResult(dict):
def __init__(__self__, *,
shards_count: int,
vectors_count: str):
pulumi.set(__self__, "shards_count", shards_count)
pulumi.set(__self__, "vectors_count", vectors_count)
@property
@pulumi.getter(name="shardsCount")
def shards_count(self) -> int:
return pulumi.get(self, "shards_count")
@property
@pulumi.getter(name="vectorsCount")
def vectors_count(self) -> str:
return pulumi.get(self, "vectors_count")
@pulumi.output_type
class GetAiIndexMetadataResult(dict):
def __init__(__self__, *,
configs: Sequence['outputs.GetAiIndexMetadataConfigResult'],
contents_delta_uri: str,
is_complete_overwrite: bool):
pulumi.set(__self__, "configs", configs)
pulumi.set(__self__, "contents_delta_uri", contents_delta_uri)
pulumi.set(__self__, "is_complete_overwrite", is_complete_overwrite)
@property
@pulumi.getter
def configs(self) -> Sequence['outputs.GetAiIndexMetadataConfigResult']:
return pulumi.get(self, "configs")
@property
@pulumi.getter(name="contentsDeltaUri")
def contents_delta_uri(self) -> str:
return pulumi.get(self, "contents_delta_uri")
@property
@pulumi.getter(name="isCompleteOverwrite")
def is_complete_overwrite(self) -> bool:
return pulumi.get(self, "is_complete_overwrite")
@pulumi.output_type
class GetAiIndexMetadataConfigResult(dict):
def __init__(__self__, *,
algorithm_configs: Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigResult'],
approximate_neighbors_count: int,
dimensions: int,
distance_measure_type: str,
feature_norm_type: str,
shard_size: str):
pulumi.set(__self__, "algorithm_configs", algorithm_configs)
pulumi.set(__self__, "approximate_neighbors_count", approximate_neighbors_count)
pulumi.set(__self__, "dimensions", dimensions)
pulumi.set(__self__, "distance_measure_type", distance_measure_type)
pulumi.set(__self__, "feature_norm_type", feature_norm_type)
pulumi.set(__self__, "shard_size", shard_size)
@property
@pulumi.getter(name="algorithmConfigs")
def algorithm_configs(self) -> Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigResult']:
return pulumi.get(self, "algorithm_configs")
@property
@pulumi.getter(name="approximateNeighborsCount")
def approximate_neighbors_count(self) -> int:
return pulumi.get(self, "approximate_neighbors_count")
@property
@pulumi.getter
def dimensions(self) -> int:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="distanceMeasureType")
def distance_measure_type(self) -> str:
return pulumi.get(self, "distance_measure_type")
@property
@pulumi.getter(name="featureNormType")
def feature_norm_type(self) -> str:
return pulumi.get(self, "feature_norm_type")
@property
@pulumi.getter(name="shardSize")
def shard_size(self) -> str:
return pulumi.get(self, "shard_size")
@pulumi.output_type
class GetAiIndexMetadataConfigAlgorithmConfigResult(dict):
def __init__(__self__, *,
brute_force_configs: Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfigResult'],
tree_ah_configs: Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult']):
pulumi.set(__self__, "brute_force_configs", brute_force_configs)
pulumi.set(__self__, "tree_ah_configs", tree_ah_configs)
@property
@pulumi.getter(name="bruteForceConfigs")
def brute_force_configs(self) -> Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfigResult']:
return pulumi.get(self, "brute_force_configs")
@property
@pulumi.getter(name="treeAhConfigs")
def tree_ah_configs(self) -> Sequence['outputs.GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult']:
return pulumi.get(self, "tree_ah_configs")
@pulumi.output_type
class GetAiIndexMetadataConfigAlgorithmConfigBruteForceConfigResult(dict):
def __init__(__self__):
pass
@pulumi.output_type
class GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult(dict):
def __init__(__self__, *,
leaf_node_embedding_count: int,
leaf_nodes_to_search_percent: int):
pulumi.set(__self__, "leaf_node_embedding_count", leaf_node_embedding_count)
pulumi.set(__self__, "leaf_nodes_to_search_percent", leaf_nodes_to_search_percent)
@property
@pulumi.getter(name="leafNodeEmbeddingCount")
def leaf_node_embedding_count(self) -> int:
return pulumi.get(self, "leaf_node_embedding_count")
@property
@pulumi.getter(name="leafNodesToSearchPercent")
def leaf_nodes_to_search_percent(self) -> int:
return pulumi.get(self, "leaf_nodes_to_search_percent")
|
999,774 | 95c2c5a425703a061b55841ac12d0d0ac164d320 | from collections import namedtuple
# set up named tuples
NodeInfo = namedtuple('NodeInfo', ['id', 'name'])
Account = namedtuple('Account', ['id', 'name'])
AccountInfo = namedtuple('AccountInfo',
['id', 'name', 'total', 'currency'])
|
999,775 | 6e74d4019d23ad0f3835b77c9cc2d3c5ff0d6b69 | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# Test the pyomo.gdp transformations
#
import os
import sys
from os.path import abspath, dirname, normpath, join
from pyutilib.misc import import_file
currdir = dirname(abspath(__file__))
exdir = normpath(join(currdir,'..','..','..','examples', 'gdp'))
try:
import new
except:
import types as new
import pyutilib.th as unittest
from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
import pyomo.opt
import pyomo.scripting.pyomo_main as main
from pyomo.environ import *
from six import iteritems
solvers = pyomo.opt.check_available_solvers('cplex', 'glpk','gurobi')
if False:
if os.path.exists(sys.exec_prefix+os.sep+'bin'+os.sep+'coverage'):
executable=sys.exec_prefix+os.sep+'bin'+os.sep+'coverage -x '
else:
executable=sys.executable
def copyfunc(func):
return new.function(func.__code__, func.func_globals, func.func_name,
func.func_defaults, func.func_closure)
class Labeler(type):
def __new__(meta, name, bases, attrs):
for key in attrs.keys():
if key.startswith('test_'):
for base in bases:
original = getattr(base, key, None)
if original is not None:
copy = copyfunc(original)
copy.__doc__ = attrs[key].__doc__ + \
" (%s)" % copy.__name__
attrs[key] = copy
break
for base in bases:
for key in dir(base):
if key.startswith('test_') and key not in attrs:
original = getattr(base, key)
copy = copyfunc(original)
copy.__doc__ = original.__doc__ + " (%s)" % name
attrs[key] = copy
return type.__new__(meta, name, bases, attrs)
class CommonTests:
#__metaclass__ = Labeler
solve=True
def pyomo(self, *args, **kwds):
exfile = import_file(join(exdir, 'jobshop.py'))
m_jobshop = exfile.build_model()
# This is awful, but it's the convention of the old method, so it will
# work for now
datafile = args[0]
m = m_jobshop.create_instance(join(exdir, datafile))
if 'preprocess' in kwds:
transformation = kwds['preprocess']
TransformationFactory('gdp.%s' % transformation).apply_to(m)
m.write(join(currdir, '%s_result.lp' % self.problem),
io_options={'symbolic_solver_labels': True})
if self.solve:
solver = 'glpk'
if 'solver' in kwds:
solver = kwds['solver']
results = SolverFactory(solver).solve(m)
m.solutions.store_to(results)
results.write(filename=join(currdir, 'result.yml'))
def check(self, problem, solver):
pass
def referenceFile(self, problem, solver):
return join(currdir, problem+'.txt')
def getObjective(self, fname):
FILE = open(fname)
data = yaml.load(FILE, **yaml_load_args)
FILE.close()
solutions = data.get('Solution', [])
ans = []
for x in solutions:
ans.append(x.get('Objective', {}))
return ans
def updateDocStrings(self):
for key in dir(self):
if key.startswith('test'):
getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__
def test_bigm_jobshop_small(self):
self.problem='test_bigm_jobshop_small'
# Run the small jobshop example using the BigM transformation
self.pyomo('jobshop-small.dat', preprocess='bigm')
# ESJ: TODO: Right now the indicator variables have names they won't
# have when they don't have to be reclassified. So I think this LP file
# will need to change again.
self.check( 'jobshop_small', 'bigm' )
def test_bigm_jobshop_large(self):
self.problem='test_bigm_jobshop_large'
# Run the large jobshop example using the BigM transformation
self.pyomo('jobshop.dat', preprocess='bigm')
# ESJ: TODO: this LP file also will need to change with the
# indicator variable change.
self.check( 'jobshop_large', 'bigm' )
# def test_bigm_constrained_layout(self):
# self.problem='test_bigm_constrained_layout'
# # Run the constrained layout example with the bigm transformation
# self.pyomo( join(exdir,'ConstrainedLayout.py'),
# join(exdir,'ConstrainedLayout_BigM.dat'),
# preprocess='bigm', solver='cplex')
# self.check( 'constrained_layout', 'bigm')
def test_chull_jobshop_small(self):
self.problem='test_chull_jobshop_small'
# Run the small jobshop example using the CHull transformation
self.pyomo('jobshop-small.dat', preprocess='chull')
self.check( 'jobshop_small', 'chull' )
def test_chull_jobshop_large(self):
self.problem='test_chull_jobshop_large'
# Run the large jobshop example using the CHull transformation
self.pyomo('jobshop.dat', preprocess='chull')
self.check( 'jobshop_large', 'chull' )
@unittest.skip("cutting plane LP file tests are too fragile")
@unittest.skipIf('gurobi' not in solvers, 'Gurobi solver not available')
def test_cuttingplane_jobshop_small(self):
self.problem='test_cuttingplane_jobshop_small'
self.pyomo('jobshop-small.dat', preprocess='cuttingplane')
self.check( 'jobshop_small', 'cuttingplane' )
@unittest.skip("cutting plane LP file tests are too fragile")
@unittest.skipIf('gurobi' not in solvers, 'Gurobi solver not available')
def test_cuttingplane_jobshop_large(self):
self.problem='test_cuttingplane_jobshop_large'
self.pyomo('jobshop.dat', preprocess='cuttingplane')
self.check( 'jobshop_large', 'cuttingplane' )
class Reformulate(unittest.TestCase, CommonTests):
solve=False
def tearDown(self):
if os.path.exists(os.path.join(currdir,'result.yml')):
os.remove(os.path.join(currdir,'result.yml'))
def pyomo(self, *args, **kwds):
args = list(args)
args.append('--output='+self.problem+'_result.lp')
CommonTests.pyomo(self, *args, **kwds)
def referenceFile(self, problem, solver):
return join(currdir, problem+"_"+solver+'.lp')
def check(self, problem, solver):
self.assertFileEqualsBaseline( join(currdir,self.problem+'_result.lp'),
self.referenceFile(problem,solver) )
class Solver(unittest.TestCase):
def tearDown(self):
if os.path.exists(os.path.join(currdir,'result.yml')):
os.remove(os.path.join(currdir,'result.yml'))
def check(self, problem, solver):
refObj = self.getObjective(self.referenceFile(problem,solver))
ansObj = self.getObjective(join(currdir,'result.yml'))
self.assertEqual(len(refObj), len(ansObj))
for i in range(len(refObj)):
self.assertEqual(len(refObj[i]), len(ansObj[i]))
for key,val in iteritems(refObj[i]):
self.assertAlmostEqual(
val.get('Value', None),
ansObj[i].get(key,{}).get('Value', None),
6
)
@unittest.skipIf(not yaml_available, "YAML is not available")
@unittest.skipIf(not 'glpk' in solvers, "The 'glpk' executable is not available")
class Solve_GLPK(Solver, CommonTests):
def pyomo(self, *args, **kwds):
kwds['solver'] = 'glpk'
CommonTests.pyomo(self, *args, **kwds)
@unittest.skipIf(not yaml_available, "YAML is not available")
@unittest.skipIf(not 'cplex' in solvers,
"The 'cplex' executable is not available")
class Solve_CPLEX(Solver, CommonTests):
def pyomo(self, *args, **kwds):
kwds['solver'] = 'cplex'
CommonTests.pyomo(self, *args, **kwds)
if __name__ == "__main__":
unittest.main()
|
999,776 | c0c3440c25d4f347b6cd76cf7cb81917914dac7f | import numpy as np
import math
import matplotlib.pyplot as plt
class ScratchLogisticRegression():
"""
ロジスティック回帰のスクラッチ実装
Parameters
----------
num_iter : int
イテレーション回数
lr : float
学習率
lmd : float
正則化パラメータ
no_bias : bool
バイアス項を入れない場合はTrue
verbose : bool
学習過程を出力する場合はTrue
Attributes
----------
self.coef_ : 次の形のndarray, shape(n_features,)
パラメータ
self.train_loss : 次の形のndarray, shape(self.iter,)
学習用データに対する損失の記録
self.val_loss : 次の形のndarray, shape(self.iter,)
検証用データに対する損失の記録
"""
# コンストラクタ
def __init__(self, num_iter = 500, lr = 1e-2, lmd = 1, no_bias = True, verbose = False):
# ハイパーパラメータを属性として記録
self.iter = num_iter#イテレーション回数
self.lr = lr#学習率
self.lmd = lmd#正則化パラメータ
self.no_bias = no_bias#バイアス項 True : あり、False:なし
self.verbose = verbose # 学習過程の表示 True : あり、 False:なし
# 損失を記録する配列を用意
self.train_loss = np.zeros(self.iter) # 学習用データに基づき計算した損失を記録
self.val_loss = np.zeros(self.iter) # 検証用データに基づき計算した損失を記録
# パラメータベクトル
self.coef = 1
# 正解ラベル
self.y = 1
def fit(self, X, y, X_val = None, y_val = None):
"""
シグモイド回帰を学習する
Parameters
----------
X : 次の形のndarray, shape (m_samples, n_features)
学習用データの特徴量
y : 次の形のndarray, shape (m_samples, )
学習用データの正解値
X_val : 次の形のndarray, shape (m_samples, n_features)
検証用データの特徴量
y_val : 次の形のndarray, shape (m_samples, )
"""
# Numpy配列に変換(pandasデータにも対応するため)
# 学習用データ
X = np.array(X) # 説明変数
y = np.array(y) # 目的変数
# 検証用データ
X_val = np.array(X_val) # 説明変数
y_val = np.array(y_val) # 目的変数
# 1次元配列の場合、軸を追加する
if X.ndim == 1:
X[:, np.newaxis] # 説明変数
if y.ndim == 1:
y[:, np.newaxis] # 目的変数
if X_val.ndim == 1:
X_val[:, np.newaxis] # 説明変数
if y_val.ndim == 1:
y_val[:, np.newaxis] # 目的変数
# vervoseをTrueにした場合は学習過程を出力
if self.verbose:
print(self.train_loss)
# バイアス項を入れる場合(no_bias = True)の場合、バイアス項を水平方向に連結する
if self.no_bias == False:
bias_term = np.ones(len(X)).reshape(-1, 1) # (m, 1)行列
X = np.concatenate([bias_term, X], axis = 1) # (m+1, n)行列
if (X_val is not None) and (y_val is not None):
bias_term = np.ones(len(X_val)).reshape(-1, 1) # (m, 1)行列
X_val = np.concatenate([bias_term, X_val], axis = 1) # (m+1, n)行列
# パラメータベクトルをランダム関数で初期化
np.random.seed(seed=0)
self.coef = np.random.rand(X.shape[1]).reshape(1,-1)
# 正解値のラベルをインスタンス変数に保存しておく(予測値の正解ラベルに使うため)
self.y = y
# 正解ラベルの要素を(0,1)に変換する
self.y_train = y.copy()
for i in range(len(self.y_train)):
if self.y_train[i] == min(y):
self.y_train[i] = 0
elif self.y_train[i] == max(y):
self.y_train[i] = 1
# 検証用データも同様に
if (X_val is not None) and (y_val is not None):
self.y_val = y_val.copy()
for i in range(len(self.y_val)):
if self.y_val[i] == min(y_val):
self.y_val[i] = 0
elif self.y_val[i] == max(y_val):
self.y_val[i] = 1
# 所定の試行回数だけ学習を繰り返す
for i in range(self.iter):
self.gradient_descent(X, self.y_train) # 最急降下法(パラメータ更新)
train_loss = self.cross_entropy_loss(X, self.y_train) # 損失を計算
self.train_loss[i] = train_loss # 配列に格納
# vervoseをTrueにした場合は学習過程を出力
if self.verbose:
print("Train Loss in {0}th iteration : {1}".format(i, round(train_loss)))
if (X_val is not None) and (y_val is not None):
val_loss = self.cross_entropy_loss(X_val, y_val)
self.val_loss[i] = val_loss
if self.verbose:
print("Valid Loss in {0}th iteration : {1}".format(i, round(val_loss)))
print("")
def predict_prob(self, X):
"""
予測値に対する確率を算出する
Parameter
----------
X : 次の形のndarray, shape(m_samples, n_features)
特徴量
Return
----------
y_pred_prob : 次の形のndarray, shape (m_samples,)
予測値の正解率
"""
# バイアス項を入れる場合(no_bias = True)の場合、バイアス項を水平方向に連結する
if self.no_bias == False:
bias_term = np.ones(len(X)).reshape(-1, 1) # (m, 1)行列
X = np.concatenate([bias_term, X], axis = 1) # (m+1, n)行列
y_pred_prob = self._sigmoid_hypothesis(X)
return y_pred_prob
def predict(self, X, threshold = 0.5):
"""
分類ラベルの予測(0 or 1)を返す
Parameter
----------
X : 次の形のndarray, shape(m_samples, n_features)
特徴量
threshold : float
閾値
Return
----------
y_pred : 次の形のndarray, shape (m_samples, )
予測値
"""
# 予測確率
y_pred_prob = self.predict_prob(X) # no_bias = Falseの場合、Xのindexはm+1行
# 正解ラベルの予測値をゼロで初期化
y_pred = np.zeros(len(y_pred_prob)).reshape(-1,1) # (m,1)行列
# y_predの各要素について、パラメータを更新する。
for i in range(len(y_pred)):
if y_pred_prob[i] < threshold:
y_pred[i] = min(self.y) # 閾値を下回る場合、negative(正解ラベルのうち大きい値)
else:
y_pred[i] = max(self.y) # 閾値を上回る場合、positive(正解ラベルのうち小さい値)
return y_pred.astype("int64")
def sigmoid(self, z):
"""
シグモイド関数
Parameters
----------
z : 次の形のndarray, shape(m_samples, 1_features)
仮定関数
Returns
----------
prob :
シグモイド関数で算出した確率
"""
prob = 1/(1 + np.exp(-z)) # 演算
# sigmoid.reshape(-1, 1) # 出力されたベクトルの次元が不定の場合、reshapeする
return prob
def _linear_hypothesis(self, X):
"""
線形の仮定関数
Parameters
----------
X : 次の形のndarray, shape(m_samples, n_features)
学習データ
Returns
----------
次の形のndarray, shape(m_samples, 1)
線形の仮定関数による推定結果
"""
# 仮定関数
line_hypo = np.dot(X, self.coef.T)
return line_hypo
def _sigmoid_hypothesis(self, X):
"""
シグモイド仮定関数
Parameters
----------
X : 次の形のndarray, shape(m_samples, n_features)
学習データ
Returns
----------
次の形のndarray, shape(m_samples, 1)
シグモイド形の仮定関数による推定結果
"""
z = self._linear_hypothesis(X) # 線形和
sig_hypo = self.sigmoid(z) # 予測確率
return sig_hypo
def regularization_term(self, X):
"""
正則化項
Parameters
----------
X : 次の形のndarray, shape(m_samples, n_features)
学習用データ
coef : 次の形のndarray, shape(1_sample, n_features)
パラメータベクトル
lmd : int
正則化パラメータ
Returns
----------
reg_term : float64
正則化項
"""
reg_term = self.lmd / len(X) * np.sum(self.coef ** 2) # 正則化項
return reg_term
def cross_entropy_loss(self, X, y):
"""
クロスエントロピー損失を求める
Parameters
----------
X : 次の形のndarray, shape(m_samples, n_features)
学習データ
y : 次の形のndarray, shape(m_samples, 1_feature)
正解値
Returns
----------
loss : float64
損失
"""
# 正則化項
reg_term = self.regularization_term(X)
#print("regularization_term.shape:{}".format(reg_term.shape))
# シグモイド仮定関数
sig_hypo = self._sigmoid_hypothesis(X) # 確率
# 目的関数の第1項
first_term = - y * np.log(sig_hypo)
# 目的関数の第2項
second_term = - (1 - y) * np.log(1- sig_hypo)
# 目的関数の計算結果
loss = np.sum(first_term + second_term + reg_term)
return loss
def gradient_descent(self, X, y):
"""
最急降下法(パラメータの更新)
Parameters
----------
X : 次の形のndarray, shape(m_sample, n_features)
学習データ
y : 次の形のndarray, shape(m_sample, 1_feature)
正解値
Returns
----------
coef : 次の形のndarray, shape(1_sample, n_features)
パラメータベクトル
"""
# シグモイド仮定関数
sig_hypo = self._sigmoid_hypothesis(X)
# 第1項
grad_first_term = np.dot(sig_hypo.T, X) / len(X)
# 第2項
temp_coef = self.coef #演算用に用いるパラメータを作成
if self.no_bias == False:
temp_coef[0][0] = 0 # バイアスありの場合、バイアス項に対するパラメータ(θ)をゼロにする
#print("temp_coef:{}".format(temp_coef))
grad_second_term = self.lmd / len(X) * temp_coef
grad_second_term = grad_second_term.reshape(1,-1)
# パラメータ更新
self.coef -= self.lr * (grad_first_term + grad_second_term)
def show_learning_curve(self,):
"""
学習過程をグラフに描画
"""
if self.val_loss.all() == 0:
None
else:
plt.plot(self.val_loss, label="val_loss") # 検証用データによる交差エントロピー損失
plt.plot(self.train_loss, label = "train_loss") # 学習用データによる交差エントロピー損失
plt.xlabel("iteration")
plt.ylabel("cross_entropy_loss")
plt.title("Learning_Curve")
plt.legend()
plt.show()
|
999,777 | 59c97317ac1f630bcb670f87b14e2936a84d6276 | # -*- coding: utf-8 -*-
def buscar_un_paciente(id, diccionario):
try:
valor_de_prueba_de_errores = diccionario[id]['Nombre']
print("***** INFORMACIÓN DEL PACIENTE *****")
print("Nombre: " + str(diccionario[id]['Nombre']))
print("Id: " + str(diccionario[id]['Id']))
print("Género: " + str(diccionario[id]['Sexo']))
print("EPS: " + str(diccionario[id]['EPS']))
print("*** Información del perfil lipídico ***")
print("COL TOTAL: " + str(diccionario[id]['Perfil lipídico']['COLT']))
print("HDL: " + str(diccionario[id]['Perfil lipídico']['HDL']))
print("LDL: " + str(diccionario[id]['Perfil lipídico']['LDL']))
print("Triglicéridos: " + str(diccionario[id]['Perfil lipídico']['TRIG']))
print("*** Información de diabetes ***")
print("HbA1C: " + str(diccionario[id]['Diabetes']['HbA1c']))
print("Glucosa: " + str(diccionario[id]['Diabetes']['Glucosa']))
except:
print( "El paciente no existe en la base de datos") |
999,778 | d1a2553ede173d5eed654bd270aa5c43f42e8044 | #!/usr/bin/env python3 -u
# load packages
import numpy as np
from joblib import Parallel
from joblib import delayed
import pandas as pd
import os
from warnings import filterwarnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.base import clone
from copy import deepcopy
# import models
from models import define_models
# define function to run training/prediction in parallel
def run_on_series(model, y_train, y_test, fh):
# silence warnings
filterwarnings('ignore', category=ConvergenceWarning, module='sklearn')
# remove missing values from padding
y_train = y_train[~np.isnan(y_train)]
y_test = y_test[~np.isnan(y_test)]
# check forecasting horizon
assert len(fh) == len(y_test)
# get train data into expected format
# train = pd.DataFrame([pd.Series([pd.Series(y_train)])])
train = pd.Series([pd.Series(y_train)])
# n_obs = len(train.iloc[0, 0])
n_obs = len(train.iloc[0])
# adjust test index to be after train index
y_test = pd.Series(y_test)
y_test.index = y_test.index + n_obs
# assert y_test.index[0] == train.iloc[0, 0].index[-1] + 1
assert y_test.index[0] == train.iloc[0].index[-1] + 1
# clone strategy
m = deepcopy(model)
# fit and predict
m.fit(train, fh=fh)
y_pred = m.predict(fh=fh)
assert y_pred.index.equals(y_test.index)
return y_pred.values
# number of jobs
n_jobs = os.cpu_count()
# set paths
home = os.path.expanduser("~")
repodir = os.path.join(home, "Documents/Research/python_methods/m4-methods/")
datadir = os.path.join(repodir, "Dataset")
traindir = os.path.join(datadir, 'Train')
testdir = os.path.join(datadir, 'Test')
savedir = os.path.join(repodir, "predictions/second_run")
assert os.path.exists(repodir)
assert os.path.exists(datadir)
assert os.path.exists(traindir)
assert os.path.exists(testdir)
assert os.path.exists(savedir)
print('Results directory: ', savedir)
# import meta data
info = pd.read_csv(os.path.join(datadir, 'M4-info.csv'))
# get M4 baseline methods
m4_results = pd.read_excel(os.path.join(repodir, 'Evaluation and Ranks.xlsx'),
sheet_name='Point Forecasts-Frequency',
header=[0, 1]).dropna(axis=0)
strategies = m4_results.loc[:, ['Method', 'User ID']].iloc[:, 0]
# select models
models = define_models(1)
selected_models = list(models.keys()) # ('MLP', 'Naive2', 'SES', 'Holt', 'Damped')
print('Selected models:', selected_models)
# dictionary of forecasting horizons and seasonal periodicities
fhs = info.set_index('SP')['Horizon'].to_dict()
sps = info.set_index('SP')['Frequency'].to_dict()
# get dataset names
files = os.listdir(os.path.join(traindir))
datasets = [f.split('-')[0] for f in files]
# select datasets
selected_datasets = ('Hourly',) #'('Hourly', 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly')
print("Selected datasets: ", selected_datasets)
for dataset in selected_datasets:
# get forecasting horizon
fh = np.arange(fhs[dataset]) + 1
# Get seasonal frequency
sp = sps[dataset]
# define and select models
models = define_models(sp)
models = {name: model for name, model in models.items() if name in selected_models}
# print status
print(f"Dataset: {dataset}, sp: {sp}, fh: {len(fh)}")
# get all train and test datasets, make sure to work with numpy arrays to use shared mem for parallelisation
alltrain = pd.read_csv(os.path.join(traindir, f'{dataset}-train.csv'), index_col=0)
alltest = pd.read_csv(os.path.join(testdir, f'{dataset}-test.csv'), index_col=0)
alltrain = alltrain.sort_index().reset_index(drop=True).values
alltest = alltest.sort_index().reset_index(drop=True).values
# get number of series in dataset
n_series = alltrain.shape[0]
assert n_series == info.SP.value_counts()[dataset]
# iterate over strategies
for name, model in models.items():
# create strategy directory if necessary
filedir = os.path.join(savedir, name)
if not os.path.isdir(filedir):
os.makedirs(filedir)
# if results file already exists, skip series
filename = os.path.join(filedir, f"{name}_{dataset}_forecasts.txt")
if os.path.isfile(filename):
print(f"\tSkipping {name} on {dataset}, forecasts already exists")
continue
# iterate over series in dataset
print('\tModel: ', name)
y_preds = Parallel(n_jobs=n_jobs)(delayed(run_on_series)(model, alltrain[i, :], alltest[i, :], fh)
for i in range(n_series))
# stack and save results
np.savetxt(filename, np.vstack(y_preds))
|
999,779 | 359c1341ec2a9276f3cc508b3bee819d35ac744e | # Hex is a base 16 number system - used as a simpler representation of binary - e.g. 8 bits binary can be represented by 2 hex digits.
# Hex is useful for cipher texts which often have unprintable bytes - so we convert it to hex so that it is easily shared
# Hex itself is an ascii shareable string. base64 is another common encoding for sharing ciphertext
hex_string = '63727970746f7b596f755f77696c6c5f62655f776f726b696e675f776974685f6865785f737472696e67735f615f6c6f747d'
byte_array = bytearray.fromhex(hex_string).decode('ascii')
print(byte_array)
|
999,780 | 1e484d6048c8b15cc2601be4845191176b29be04 | def computepay(h,r):
if h >40:
x = ((1.5*r*(h-40)) + (40*r))
return x
else:
x = r * h
return x
hrs = "45"
rate = "10.50"
h = float(hrs)
r = float(rate)
p = computepay(45,10.5)
print ("Pay",p)
|
999,781 | 3261671176bd688b061d537358fdbc5ed4b3b6cc | # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from core.helpers.log_helpers import logwrap
from core.models.fuel_client import base_client
class OSTFClient(base_client.BaseClient):
@logwrap
def get_test_sets(self, cluster_id):
"""get all test sets for a cluster
:type cluster_id: int
"""
return self._client.get(
url="/testsets/{}".format(cluster_id),
).json()
@logwrap
def get_tests(self, cluster_id):
"""get all tests for a cluster
:type cluster_id: int
"""
return self._client.get(
url="/tests/{}".format(cluster_id),
).json()
@logwrap
def get_test_runs(self, testrun_id=None, cluster_id=None):
"""get test runs results
:type testrun_id: int
:type cluster_id: int
"""
url = '/testruns'
if testrun_id is not None:
url += '/{}'.format(testrun_id)
if cluster_id is not None:
url += '/{}'.format(cluster_id)
elif cluster_id is not None:
url += '/last/{}'.format(cluster_id)
return self._client.get(url=url).json()
@logwrap
def run_tests(self, cluster_id, test_sets, test_name=None):
"""run tests on specified cluster
:type cluster_id: int
:type test_sets: list
:type test_name: str
"""
# get tests otherwise 500 error will be thrown6^40
self.get_tests(cluster_id)
json = []
for test_set in test_sets:
record = {
'metadata': {'cluster_id': str(cluster_id), 'config': {}},
'testset': test_set
}
if test_name is not None:
record['tests'] = [test_name]
json.append(record)
return self._client.post("/testruns", json=json).json()
|
999,782 | 45ee5b3aaff16b1bbf36c2aa069411cdba7e4601 | import normal
normal.kd() |
999,783 | 7fc1072d8d88a0e42b3c29420610c837a75878cb | import numpy as np
# def convolve2d(X,W):
# n1, n2 = X.shape
# m1, m2 = W.shape
# con2d = np.zeros((n1+m1-1,n2+m2-1))
# print(con2d.shape)
# for i1 in range(n1):
# for i2 in range(m1):
# for j1 in range(n2):
# for j2 in range(m2):
# if i1>=i2 and j1>=j2 and i1-i2<n1 and j1-j2<n2:
# con2d[i1,j1] += W[i2,j2]*X[i1-i2,j1-j2]
# return con2d
def convolve2d(X,W):
n1, n2 = X.shape
m1, m2 = W.shape
con2d = np.zeros((n1+m1-1,n2+m2-1))
for i in range(n1):
for j in range(n2):
con2d[i:i+m1,j:j+m2] += W*X[i,j]
Y = con2d[m1//2:-m1//2+1,m2//2:-m2//2+1] #To makeoutput same size as input
print(con2d)
assert(Y.shape == X.shape)
return Y
# X = np.random.randn(2,2)
# W = np.random.randn(5,5)
# print( convolve2d(X,W) ) |
999,784 | e3d1a0c51f21c18656d3acd75db5bc897003720b | # -*- coding: utf-8 -*-
import numpy as np
import cv2
img1 = cv2.imread('C:/Users/JPang3/Desktop/beijing/opencv/opencv_projects/python-opencv/img3.jpg')
# ----------------------#
cv2.imshow('tmp',img1) #
cv2.waitKey(0) #
# ----------------------#
e1 = cv2.getTickCount()
for i in xrange(5,49,2):
# 中值模糊
img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print t
# ----------------------#
cv2.imshow('tmp',img1) #
cv2.waitKey(0) #
# ----------------------#
# check if optimization is enabled
print cv2.useOptimized()
cv2.setUseOptimized(False)
print cv2.useOptimized()
e1 = cv2.getTickCount()
for i in xrange(5,49,2):
img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print t |
999,785 | 6931af2ee2f44ccc4497b733ee2956236ea06d78 | # Generated by Django 3.2 on 2021-05-01 07:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('controlApp', '0004_delete_team'),
]
operations = [
migrations.DeleteModel(
name='Room',
),
]
|
999,786 | 681eefcacdbb8d3e7aff0fae9389e4b2e1515190 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 26 17:58:41 2021
@author: michail
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from forest import random_forest
import json
dt = pd.read_csv('sdss_redshift.csv')
xf = np.array(dt.loc[:, 'u':'z'])
y = np.array(dt['redshift'])
X_train, X_test, y_train, y_test = train_test_split(xf, y, train_size=0.75, random_state=42)
forest = random_forest(X_train, y_train, number=15)
y_pred_train = forest.predict(X_train)
y_pred_test = forest.predict(X_test)
plt.figure(figsize=(12,12))
plt.scatter(y_pred_train, y_train, label="Train", marker='*', s=2)
plt.scatter(y_pred_test, y_test, label='Test', alpha=0.3, s=2, color='red')
plt.xlabel('Real y', fontsize=18)
plt.ylabel('Test y', fontsize=18)
plt.legend(fontsize=18)
plt.savefig('redhift.png')
# save results
file = {"train": float('{:.3f}'.format(np.std((y_train - y_pred_train)))), "test": float('{:.3f}'.format(np.std((y_test - y_pred_test))))}
json_file = json.dumps(file)
with open("redhsift.json", "w") as outfile:
outfile.write(json_file)
data = pd.read_csv('sdss.csv')
X = np.array(data)
Y = forest.predict(X)
data['redshift'] = Y
data.to_csv('sdss_predict.csv')
|
999,787 | 37ac75e8f62f1d6d67fa359eaeb28193299f159a | #!/usr/bin/env python3
##
# Copyright (c) Nokia 2018. All rights reserved.
#
# Author:
# Email: nokia-sbell.com
#
from path_helper import join
from vfs import VFs
def run(fs: VFs, target, cmd, cwd, env):
parents = False
dir_list = list()
for arg in cmd[1:]:
if arg[:2] == '--':
flag = arg[2:]
if flag == 'parents':
parents = True
elif flag in ('mode', 'verbose'):
pass
elif flag in ('help', 'version'):
return None, None
else:
raise NotImplementedError('mkdir: flag --{:s} not implemented'.format(flag))
elif arg[0] == '-':
for flag in arg[1:]:
if flag == 'p':
parents = True
elif flag in ('m', 'v'):
pass
else:
raise NotImplementedError('mkdir: flag -{:s} not implemented'.format(flag))
else:
dir_list.append(join(cwd, arg))
if not dir_list:
raise Exception('mkdir: missing arguments')
for path in dir_list:
if parents:
fs.mkdirs(path)
else:
fs.mkdir(path)
return None, None
|
999,788 | f9c8df48bb51aaffffc3cec9f8437be881a48318 | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import json
import os
import sys
class Email():
port = None
smtp = None
from_email = None
pass_email = None
path = None
def __init__(self):
r = os.getcwd()
self.path = os.getcwd()
if len(r) > 0:
self.path = r
file = open("{0}/config/setting.json".format(self.path))
print("Constuyendo correo")
# print (os.path.dirname("{0}/setting.json".format(self.path)))
# with open('setting.json') as json_data_file:
data = json.load(file)
_ = data["email"]
self.port = _["port"]
self.smtp = _["smtp"]
self.from_email = _["from"]
self.pass_email = _["pass"]
def send_email(self, to=None, file_name=None, body=None, subject=None):
print("Correo preprando", file_name)
msg = MIMEMultipart()
msg['From'] = self.from_email
msg['To'] = to
msg['Subject'] = subject
if file_name:
print("argv", sys.argv[0])
attachment = open("{0}/out/{1}".format(self.path, file_name), "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % file_name)
msg.attach(part)
try:
attachment = open("{0}/out/ANS_{1}".format(self.path, file_name), "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % "ANS_"+file_name)
msg.attach(part)
except:
pass
server = smtplib.SMTP(self.smtp, self.port)
server.starttls()
server.login(self.from_email, self.pass_email)
text = msg.as_string()
server.sendmail(self.from_email, to, text)
server.quit() |
999,789 | a4eb4f0ae23b89d92557278611e0962d547fd083 | import matplotlib.pyplot as plt
from util.OCR_Pre import OCR_Pre
import Data
ocr = OCR_Pre()
ocr.isFigure = True
ocr.isShowLocalMin = True
ocr.Read(Data.path0)
ocr.GetCutIndexs()
ocr.GetRotations()
ocr.SaveRotations(Data.folderTest+'cut')
#ocr.CutPaddings()
#ocr.GetLetterSizes()
#ocr.GetCandidateRows(Data.folderTest+'cut_0')
#ocr.SlideCandidateRow()
plt.show()
|
999,790 | 7072a7de772f5b0a0f2f025d3245888252fe8c20 |
def buildInhritedGraph(classesList, classesQueryDic):
rootList = []
for c in classesList:
for inherited in c.inheritedList:
if not inherited in classesQueryDic:
print "inherited classes not found"
else:
c.addParentClass(classesQueryDic[inherited])
classesQueryDic[inherited].addChildClass(c)
for c in classesList:
if not c.parentClassList:
print c.name+" doesn't has parent"
rootList.append(c)
return rootList |
999,791 | 78c7a8eae601db49f4649382c8e7b4a318f39d88 | from django.urls import path
from .views import (
UserCreateAPIView,
SalfaInfoView,
SalfaUpdateView,
SalfaDeleteView,
SalfaCreateView,
AddToCartView,
CartCheckoutAPIView,
SalfaDeleteFromCartView,
ProfileAPIView
)
from rest_framework_simplejwt.views import TokenObtainPairView
urlpatterns = [
path('login/', TokenObtainPairView.as_view() , name='login'),
path('register/', UserCreateAPIView.as_view(), name='register'),
path('info/', SalfaInfoView.as_view(), name='api-info'),
path('create/', SalfaCreateView.as_view(), name='api-create'),
path('update/<int:salfa_id>/', SalfaUpdateView.as_view(), name='api-update'),
path('delete/<int:salfa_id>/', SalfaDeleteView.as_view(), name='api-delete'),
path('delete/added/<int:salfa_id>/', SalfaDeleteFromCartView.as_view(), name='api-delete'),
path('cart/add/', AddToCartView.as_view(), name="cart-add"),
path('checkout/', CartCheckoutAPIView.as_view(), name="cart-checkout"),
path('profile/', ProfileAPIView.as_view(), name='profile'),
] |
999,792 | becbe3adaf3df734ae3c68344d8f3bb7d9d1b3b2 | #!/usr/bin/python3
## Stephen Bavington 8/16/18 Hellastorm Inc.
## This program creates a 10ary of files ranging in size from 4094 bytes to 1 meg and stors them in a
# 10Ary .
import os
import math
root = './files/'
files = 100
dir_depth = int(math.log((files + 1), 10))
os.system('rm -r ./files/')
print('Dir Depth = {}'.format(dir_depth))
baseDir = []
for x in range(0, dir_depth):
baseDir.append('0')
print(baseDir)
makeDir = root
for rootDir in baseDir:
makeDir = makeDir + str(rootDir) + '/'
print(makeDir)
os.makedirs(makeDir, exist_ok=True)
def formatDir(value):
value = value + 1
dirList = (list(str(value).zfill(dir_depth)))
newDir = (dirList)
return (value, newDir)
fileSizes = [4096, 32768, 262144, 1024000]
DirVal = -1
t = -1
x = 0
s = 1
name = 0
#baseDir = makeDir
(DirVal, baseDir) = formatDir(DirVal)
fileSize = fileSizes[x]
for n in range(0, files):
t = t + 1
s = s + 1
name = name + 1
print('Write file {} to dir {} basdir {}'.format(n, DirVal, baseDir))
FileDir = "/".join(baseDir)
if t == 9:
(DirVal, baseDir) = formatDir(DirVal)
t = -1
name = 0
if s == 300000: x = x + 1
if s == 450000: x = x + 1
if s == 600000: x = x + 1
fileName = str(root + FileDir + '/' + str(name) + '.html')
os.makedirs(root + FileDir, exist_ok=True)
print ('FileNanme = {}'.format(fileName))
f = open(fileName, 'wb')
fileSize = fileSizes[x]
## For testing the file size is set to 10 bytes. swap lne below for the write line in production
f.write(b'\0' * 10)
|
999,793 | 08c9031237abdbf82e7d5317f5409d22c2f5c6fa | i_list = []
for tc in range(1,int(input())+1):
i_list.append(int(input()))
max_inp = max(i_list)//10
dp = [0]*(max_inp+1)
dp[1],dp[2] = 1, 3
for i in range(3,max_inp+1):
dp[i] = dp[i-1]+2*dp[i-2]
for idx, v in enumerate(i_list):
print(f'#{idx+1} {dp[v//10]}') |
999,794 | 932bbadedee3097c2d6e9a43bf2707bedf50cb46 | import string
alpha = string.ascii_lowercase
n = int(input())
L = []
if n!=8:
for i in range(n):
s = "-".join(alpha[i:n])
L.append((s[::-1]+s[1:]).center(4*n-3, "-"))
print('\n'.join(L[:0:-1]+L))
else:
print("""--------------h--------------
------------h-g-h------------
----------h-g-f-g-h----------
--------h-g-f-e-f-g-h--------
------h-g-f-e-d-e-f-g-h------
----h-g-f-e-d-c-d-e-f-g-h----
--h-g-f-e-d-c-b-c-d-e-f-g-h--
h-g-f-e-d-c-b-a-b-c-d-e-f-g-h
--h-g-f-e-d-c-b""") |
999,795 | ac4fccda4a181198633affcd970a806970c7fb7f | # -*- coding: utf-8 -*-
#import numpy
from datapackage import Package
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
class Constants:
TOKENS = {
'usda': '', #us dept. of agriculture, economic research service
'eia': '', #us energy info administration
}
URLS = {
'emissions': 'https://datahub.io/core/co2-fossil-by-nation/datapackage.json',
'rd': 'https://datahub.io/core/expenditure-on-research-and-development/datapackage.json',
'refugee': 'https://datahub.io/world-bank/sm.pop.refg/datapackage.json',
'fertilizer': 'https://datahub.io/world-bank/ag.con.fert.zs/datapackage.json'
}
def url2df(url):
'''
converts url to pandas dataframe
'''
package = Package(url)
resources = package.resources
data = pd.read_csv(resources[-1].descriptor['path']) #seems like last resource always has main data
if data.empty: #last resource wasn't populated
for resource in resources:
if resource.tabular:
data = pd.read_csv(resource.descriptor['path'])
return data
def format_df(df, tag = None):
'''
input: df with Columns 'Country' & 'Year'
- tag (optional): add supplementary descriptor to end of col label
format to following criteria:
+ columns of form country_data
+ year index
'''
#1) pivot & format df's
df= df.pivot(index='Country', columns = 'Year')
df= pd.DataFrame(df.stack(0).transpose()) #condense columns into one
#rename the columns somehow (like "AUSTRIA_Liquid Fuel")
if tag:
tag = ' ' + tag
else:
tag = ''
df.columns = ['__'.join(col).strip()+ tag for col in df.columns.values]
return df
#emissions data
def fetch_fossFuel():
emissions_data = url2df(Constants.URLS['emissions'])
#convert to proper format
emissions = format_df(emissions_data, 'Emissions')
return emissions #good!
#international research & development budget data
def fetch_rd():
rd_data = url2df(Constants.URLS['rd'])
rd_data.rename(columns = {'TIME': 'Year'}, inplace = True)
rd = format_df(rd_data.iloc[:,1:], 'R&D Fund')
return rd
#Refugee population by country or territory of asylum
def fetch_refPop():
refugee_data = url2df(Constants.URLS['refugee'])
refugee_data.rename(columns = {'Country Name': 'Country',
'Value': 'Refugee Pop'}, inplace = True)
refugee_data.drop(columns = ['Country Code'], inplace=True) #take out country code column
ref_pop = format_df(refugee_data)
return ref_pop
# Fertilizer consumption (kilograms per hectare of arable land)
def fetch_fert():
fertilizer_data = url2df(Constants.URLS['fertilizer'])
fertilizer_data.rename(columns = {'Country Name': 'Country',
'Value': 'Fertilizer Cons'}, inplace = True)
fertilizer_data.drop(columns = ['Country Code'], inplace=True) #take out country code column
fert_cons = format_df(fertilizer_data)
return fert_cons
#Methods
#get all the data we have
def full_fetch():
'''
Returns all available datasets as a large merged panda df
'''
#instantiation
merged = fetch_fossFuel()
fetch_list = []
#add all the functions (ongoing)
fetch_list.append(fetch_rd)
fetch_list.append(fetch_refPop)
fetch_list.append(fetch_fert)
for fetcher in fetch_list:
merged = pd.merge(merged, fetcher(), how='inner', left_index = True, right_index=True)
#try how='outer' to test Tyler's theory
#change dropna in [most_correlated] from axis=1 to axis=0
return merged
def comp_labels(lab1, lab2=None, df=pd.DataFrame()):
'''
input: 2 labels from existing data set. default label 2 is data most
correlated w/ label 1
output: correlation coefficient, matplotlib chart of overlayed datasets
'''
if df.empty: #set default dataframe to full_fetch data
df = full_fetch()
corrMat = df.dropna(axis=1).corr()
# corrMat = df.corr()
corr= most_correlated(lab1, 0, corrMat)
if not lab2: #set lab2 to default
lab2 = corr.index.tolist()[-1] #label of data most correlated w/lab1
corrCoeff = corr[-1]
print('Most Correlated Data: ', lab2, '\n')
else:
corrCoeff =corr.loc[lab2]
print('Correlation Coefficient: ', corrCoeff)
# Pull plot data
t = df.index
data1 = df[lab1]
data2 = df[lab2]
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Years')
ax1.set_ylabel(lab1, color=color)
ax1.plot(t, data1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) #cast x-axis ticks to ints
# ax1.set_xlim(2013, 2016)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel(lab2, color=color) # we already handled the x-label with ax1
ax2.plot(t, data2, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
comp_labels('Poland__Refugee Pop')
full_df = full_fetch()
#%%
#exploring time offset - 3 steps
def lagging_ind(label, offset=None, df = pd.DataFrame()):
'''
Objective: Find lagging indicator of label
input:
label of interest
time lag offset (years)- default: offset for most correlated lag. max val = 25% of data span
dataframe- default: full_fetch df
output: correlation coefficient + plot of best lagging indicator
'''
#helper fcn to output time-offset df
def df_shift(dframe, tlag=1, tlab=None):
if not tlab:
tlab = tlag
#extract, turn to datetime, add year, turn back to index
years = dframe.index
dt = pd.to_datetime(years, format = '%Y') #index of int -> datetime years
#make new offset df
dt=pd.to_datetime(dt.year + tlag, format='%Y')
dframe.index = dt.year
t = 'year' if tlab == 1 else 'years'
tag = f' -- {tlab} {t} ahead'
dframe.columns =[col + tag for col in dframe.columns.values] #rename for col
return dframe #fcn MODIFIES whatever df is passed in. avoids expensive copy
#0) extract data column
x = df[label].dropna()
#1) Duplicate original df.
df2 = df.copy()
#2) Shift df2 index into past/future. Relabel columns to reflect time shift
if offset:
window = 2 #experiment with different window sizes
tol = .1 #tolerance to price in tradeoff of time offset
df2 = df_shift(df2, offset)
df_window_forward = df2.copy() #reference is shifted frame
df_window_back = df2.copy()
#3) Re-merge, re-calculate
df3 = pd.merge(x, df2, how='inner', left_index = True, right_index=True)
#give precedence to requested frame - if next best corr isn't better by
#corr1 + tol, then stick with corr1
corr1 = most_correlated(label, top=1, df = df3, weighted = True)[0]
#COMPUTE TIME LAGGED W/ CROSS CORRELATION
orig_labels = df.columns
#do forward & backward for loop
for i in range(1, window+1):
back_off = -i #backwards offset
fwd = df_shift(df_window_forward, tlab = i) #i=offset. increment each iter
rev = df_shift(df_window_back, -1, tlab = back_off)
#need to reset shifted columns before merge
df4 = pd.merge(df3, fwd, how='inner', left_index = True, right_index=True)
df4 = pd.merge(df3, fwd, how='inner', left_index = True, right_index=True)
fwd.columns = orig_labels
rev.columns = orig_labels
corr2 = most_correlated(label, top=1, df = df4, weighted = True)[0]
if corr2 > corr1+tol: #window found better vals
df3 = df4
else: #IF NO OFFSET, calculate optimal offset
span = x.index[-1] - x.index[0]
df3 = x.copy()
orig_labels = df2.columns #keep tracck of original cols for labelling
#make a huge dataframe of all the time offsets and use comp_labels to find best
for i in range(1, span//4 + 1):
try: #FOR DEBUGGING
shifted = df_shift(df2, tlab = i) #i=offset. increment each iter
except: #assume it's OutOfBoundsDatetime error
print('DateTimeError: ', df2.index[-1],i)
#need to reset shifted columns before merge
print('DEBUG - MTTF: ', i, span//4)
df3 = pd.merge(df3, shifted, how='inner', left_index = True, right_index=True)
shifted.columns = orig_labels
comp_labels(label, df=df3)
#%%
def most_correlated(label, top=0, df = pd.DataFrame(), weighted = False):
'''
input: valid column label from merged_df; dframe from which to compute corr Matrix
- x: number of items in return list
- weighted: if true, correlations weighted based on how many overlapping
data values there are
output: series w/ list of all correlated rv's arranged in order of highest
to lowest corr
'''
if df.empty: #there's no dataset to compare against
df = full_fetch()
corrMat = df[df.columns].apply(lambda x: x.corr(df[label]))
print(corrMat)
series = corrMat.round(2) #isolate column; round to hundreth
if weighted: #need to factor in how much each col overlaps with rv in question
tuner = 1 #tweak tuner to change how much matching time span matters
#1) find the range in the df for which the label of interest is defined
x = df.loc[:, label].dropna() # isolate col of interest
#crop df based on rv of interest
df = df.truncate(before=x.index[0], after = x.index[-1])
denom = x.count()
mult = df.count()/denom * tuner #list of multipliers
#make a new col of weighted correlations, merge in that
wcol = series * mult
weight_df = pd.merge(series.rename("Corr"), wcol.rename('Weighted Corr'), how='outer', left_index = True, right_index=True)
#pruning
weight_df.dropna(inplace=True)
#sort from least to greatest. maybe good to sort by absolute vals
weight_df.drop(labels = [lab for lab in weight_df.index.tolist() if label in lab], inplace = True) #don't need to see correlation with shifted versions of self
weight_df = weight_df[(weight_df.T != 0).any()] #filter out 0's
weight_df.sort_values(by = 'Weighted Corr', inplace = True) #sort it by weighted vals of last col
return weight_df.iloc[-top:, 0] #correlation series, sorted by weighted corr
else:
#pruning
series.dropna(inplace=True)
#sort from least to greatest. maybe good to sort by absolute vals
series.drop(index=label, inplace=True) #don't need to see correlation with itself
series = series[series != 0] #filter out 0's
#sort by the last column
return series.sort_values().iloc[-top:]
def comp_labels(lab1, lab2=None, df=pd.DataFrame()):
'''
input: 2 labels from existing data set. default label 2 is data most
correlated w/ label 1
output: correlation coefficient, matplotlib chart of overlayed datasets
'''
if df.empty: #set default dataframe to full_fetch data
df = full_fetch()
corr= most_correlated(lab1, 0, df, True)
if not lab2: #set lab2 to default
# lab2 = corr.index.tolist()[-1] #label of data most correlated w/lab1
lab2 = corr.index.tolist()[-1] #label of data most correlated w/lab1
corrCoeff = corr[-1]
print('Most Correlated Data: ', lab2, '\n')
else:
corrCoeff =corr.loc[lab2]
print('Correlation Coefficient: ', corrCoeff)
# Pull plot data
data1 = df[lab1].dropna()
t1 = data1.index
#truncate the data based on bounds of data1
df = df.truncate(before=data1.index[0], after = data1.index[-1])
data2 = df[lab2]
t2 = data2.index
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Years')
ax1.set_ylabel(lab1, color=color)
ax1.plot(t1, data1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) #cast x-axis ticks to ints
# ax1.set_xlim(2013, 2016)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel(lab2, color=color) # we already handled the x-label with ax1
ax2.plot(t2, data2, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
print('PLOT 1') #DEBUG
plt.show()
if ' --' in lab2: #it's a time-lagged relationship
# PLOT UNSHIFTED DATA 2 (to show 'forecast')
data1 = df[lab1].dropna()
#truncate the data based on data1
# df = df.truncate(before=data1.index[0], after = data1.index[-1])
data3 = full_df[lab2.split(' --')[0]] #get the unshifted data. will lag behind data1
t3 = data3.index
color = 'tab:red'
ax1.plot(t1, data1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
# ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) #cast x-axis ticks to ints
ax3 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax3.set_ylabel(lab2, color=color) # we already handled the x-label with ax1
ax3.plot(t3, data3, color=color)
ax3.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
print('PLOT 2') #DEBUG
plt.show() #<-- NOT WORKING RN
#comp_labels('Poland__Refugee Pop')
comp_labels('Russian Federation__Medical and health sciences R&D Fund')
full_df = full_fetch()
#print(most_correlated('ANDORRA__Liquid Fuel Emissions', weighted = True))
#%%
#testbed
#0) find some data
x = full_df['Mauritius__Fertilizer Cons']
#1) Duplicate original df.
df2 = full_df.copy()
#2) Shift df2 index into past/future. Relabel columns to reflect time shift
#extract, turn to datetime, add year, turn back to index
years = df2.index
dt = pd.to_datetime(years, format = '%Y') #index of int -> datetime years
offset = .5 #years
t = 'year' if offset == 1 else 'years'
dt=pd.to_datetime(dt.year + offset, format='%Y')
df2.index = dt.year
tag = f' ({offset} {t} ahead)'
df2.columns =[col + tag for col in df2.columns.values] #rename for col
print(lagging_ind('Mauritius__Fertilizer Cons', 1), full_df)
#3) Re-merge, re-calculate
df3 = pd.merge(x, df2, how='inner', left_index = True, right_index=True)
|
999,796 | 8b62489ce2e98d63b021900895619f53324ecd9c | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
'''
Bugs/TODOs:
- Make maps more intelligently generated (and spawn point)
- Multiple rooms
- Ring of Fire does not work
- Monster HP
- Attack power/skills
- Items
'''
###############################################################################
import os, sys
from subprocess import call
from random import randint
###############################################################################
OS_PLATFORM = ''
test1 = sys.platform
test2 = os.name
if test1.startswith('linux') and test2 == 'posix':
OS_PLATFORM = 'posix'
elif test1.startswith('win32') and test2 == 'nt':
OS_PLATFORM = 'windows'
elif test1.startswith('cygwin') and test2 == 'posix':
OS_PLATFORM = 'posix'
elif test1.startswith('darwin') and test2 == 'posix':
OS_PLATFORM = 'posix'
###############################################################################
if sys.version_info >= (3,0):
from builtins import input as raw_input
###############################################################################
AUTHORIZED_CMD = ('exit','quit','help','print','stat','stats','status','w','a',
's','d','j','q','e','f')
###############################################################################
class Keybinding:
Jump = 'j'
Shield = 'q'
Inventory = 'e'
RingOfFire = 'f'
###############################################################################
class Terrain:
Walk = ' '
Unwalk = "\x01"
Fire = '~'
Heal = '+'
Jump = 'J'
Point = 'P'
Monster = '@'
Win = '*'
Life = 'T'
Shield = 'S'
Lava = 'L'
class Block:
'''Block of a certain terrain.'''
def __init__(self, terrain, x, y):
'''Create a new block of terrain.'''
self.terrain = terrain
self.x = x
self.y = y
def morph(self, terrain):
self.terrain = terrain
def clear(self):
self.terrain = Terrain.Walk
def destroy(self):
self.terrain = Terrain.Unwalk
def coords(self):
return (self.x, self.y)
class Player:
'''Player character with stats.'''
def __init__(self, icon='X', x=0, y=0, hp=10, mp=10, gold=1, pts=0, kills=0, lives=3, deaths=0, shield=False, movlen=1, jmp=0, luck=10, inv=[]):
'''Create player with stats.'''
self.icon = icon
self.x = x
self.y = y
self.hp = hp
self.mp = mp
self.gold = gold
self.pts = pts
self.kills = kills
self.lives = lives
self.deaths = deaths
self.shield = shield
self.movlen = movlen
self.jmp = jmp
self.luck = luck
self.inv = inv
def die(self):
self.luck -= randint(0-self.luck, self.luck//self.pts)
self.lives -= 1
self.deaths += 1
self.pts -= 1
self.gold //= 2
self.mp //= 2
self.hp = 1
self.x = 0
self.y = 0
##################################
def clr_screen():
if OS_PLATFORM == 'posix':
call(['clear'])
elif OS_PLATFORM == 'windows':
call(['cmd', '/c', 'cls'])
##################################
def gen_empty_map(size):
empty_map = size * ['']
for line in range(size):
empty_map[line] = size * [' ']
return empty_map
##################################################################
def populate_map(game_map):
n = len(game_map)
for x in range(n):
for y in range(n):
block = randint(-20,8)
if (block < -10): game_map[y][x] = Terrain.Unwalk
elif (block is 2): game_map[y][x] = Terrain.Fire
elif (block is 3): game_map[y][x] = Terrain.Heal
elif (block is 4): game_map[y][x] = Terrain.Jump
elif (block is 5): game_map[y][x] = Terrain.Point
elif (block is 6): game_map[y][x] = Terrain.Monster
elif (block is 7): game_map[y][x] = Terrain.Life
elif (block is 8): game_map[y][x] = Terrain.Shield
game_map[randint(1,n-1)][randint(1,n-1)] = Terrain.Win
return game_map
##################################################################
def print_map(game_map, curpos_x, curpos_y, icon):
mapgrid = '\b'
for x in game_map:
for y in x:
mapgrid += y
mapgrid += "\n"
curpos = curpos_x + (len(game_map)+1) * curpos_y
mapgrid = mapgrid[:curpos+1] + icon + mapgrid[curpos+2:]
print(' '.join(mapgrid))
###########################################################
def game_loop(game_map):
'''Input "exit" or "quit" to stop playing.
Input "w", "a", "s", "d" for movement.
Input "stat", "stats" or "status" to show player status.
Input "j" to jump for a turn if you have the ability.
Input "q" to shield up if you have a shield.
Input "e" to show inventory.
Input "f" to cast ring of fire around player.
Input "help" to show this again.
'''
tut = game_loop.__doc__
# STATS AND VARS
#################
n = len(game_map)
cmd = ''
msg = ''
win = False
pc = Player()
box_coords = ((pc.x-1,pc.y-1),(pc.x,pc.y-1),(pc.x+1,pc.y-1),(pc.x-1,pc.y),(pc.x+1,pc.y),(pc.x-1,pc.y+1),(pc.x,pc.y+1),(pc.x+1,pc.y+1))
#################
while (cmd not in AUTHORIZED_CMD[:2]):
################################## Clear the screen at the beginning of every loop
clr_screen()
############################### Variable and stats setup/reset every loop
print_map(game_map, pc.x, pc.y, pc.icon)
print(msg)
msg = ''
mvmt = pc.movlen + pc.jmp
pc.jmp = 0
pc.shield = False
pc.pts = pc.hp + pc.gold + pc.kills + pc.lives + pc.luck
################################################## User input
cmd = raw_input('>> ').lower()
############################## Input processing
if (cmd == 'help'):
clr_screen()
raw_input(tut)
continue
elif (cmd == 'print'):
raw_input("%d,%d" % (pc.x, pc.y))
continue
elif (cmd in AUTHORIZED_CMD[4:7]):
msg += 'HP: ' + str(pc.hp) + ' | Mana: ' + str(pc.mp) + ' | Gold: '
msg += str(pc.gold) + ' | Lives: ' + str(pc.lives) + ' | Deaths: '
msg += str(pc.deaths) + ' | Kills: ' + str(pc.kills) + ' | Points: '
msg += str(pc.pts) + ' | Movement length: ' + str(pc.movlen)
msg += ' | Jump: ' + str(pc.jmp) + ' | Shield: ' + str(pc.shield)
#msg += ' | Luck: ' + str(luck)
continue
elif (cmd == 'w') and (pc.y-mvmt >= 0) and (game_map[pc.y-mvmt][pc.x] is not Terrain.Unwalk):
pc.y-=mvmt
elif (cmd == 'a') and (pc.x-mvmt >= 0) and (game_map[pc.y][pc.x-mvmt] is not Terrain.Unwalk):
pc.x-=mvmt
elif (cmd == 's') and (pc.y+mvmt < n) and (game_map[pc.y+mvmt][pc.x] is not Terrain.Unwalk):
pc.y+=mvmt
elif (cmd == 'd') and (pc.x+mvmt < n) and (game_map[pc.y][pc.x+mvmt] is not Terrain.Unwalk):
pc.x+=mvmt
elif (cmd == Keybinding.Jump):
if (Keybinding.Jump.upper() in pc.inv):
pc.jmp = 1
msg += 'You can now jump. '
else: msg += 'You do not possess the ability to jump. '
continue
elif (cmd == Keybinding.Shield):
if ('S' in pc.inv):
pc.shield = True
msg += 'Shielded. '
else: msg += 'No shield. '
continue
elif (cmd == Keybinding.Inventory):
msg += str(pc.inv)
continue
elif (cmd == Keybinding.RingOfFire):
if (Keybinding.RingOfFire.upper() in pc.inv):
pc.mp-=5
for area in box_coords:
if (0 <= area[0] < n) and (0 <= area[1] < n) and (game_map[area[1]][area[0]] == Terrain.Monster):
game_map[area[1]][area[0]] = Terrain.Walk
else: msg += 'You do not possess a Ring of Fire. '
elif (cmd == '') or (cmd.count(' ') is len(cmd)):
#~ continue
if (pc.luck > 0): pc.luck-=1
else:
msg += 'Invalid input. '
############################ Events depending on current block position
curpos = game_map[pc.y][pc.x]
if (curpos == Terrain.Win):
clr_screen()
raw_input("You have won the game!\nHere are your points: " + str(pc.pts))
break
elif (curpos == Terrain.Fire):
pc.hp-=1
msg += 'You lost 1 hp by fire damage. '
if (randint(0,10+pc.luck) >= 10):
pc.inv.append(Keybinding.RingOfFire.upper())
msg += 'You now possess a Ring of Fire. Input "' + Keybinding.RingOfFire + '" to use. Costs 5 mana per use. '
elif (curpos == Terrain.Heal):
if (randint(-1,1+pc.luck) < 1): hpgain = 1
else: hpgain = 1 + pc.luck//5
pc.hp+=hpgain
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'You have gained ' + str(hpgain) + ' hp. '
elif (curpos == Terrain.Jump):
pc.inv.append(Keybinding.Jump.upper())
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'You now possess the ability to jump. Input "' + Keybinding.Jump + '" and then the direction to use. '
elif (curpos == Terrain.Point):
pc.pts+=2
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'You have gained 2 points. '
elif (curpos == Terrain.Monster):
if (pc.shield):
dmg = randint(0, (1//pc.luck+1))
else:
dmg = randint(0, (1//pc.luck+1)*2)
pc.hp -= dmg
msg += 'You attacked a monster and have lost ' + str(dmg) + ' hp. '
if (randint(-1, pc.luck+1) > 0):
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'The monster died. '
elif (curpos == Terrain.Life):
pc.lives+=1
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'You have gained a life. '
elif (curpos == Terrain.Shield):
pc.inv.append('S')
game_map[pc.y][pc.x] = Terrain.Walk
msg += 'You have gained a shield. Input "' + Keybinding.Shield + '" to enter shield stance for a turn. '
if (randint(0,50+pc.luck) is 0):
game_map[pc.y][pc.x] = Terrain.Unwalk
msg += 'The ground at your feet feels shaky. '
################################################## Death and permanent death
if (pc.hp is 0):
clr_screen()
pc.die()
raw_input('You died.')
if (pc.lives is 0):
raw_input("GAME OVER\nHere are your points: " + str(pc.pts))
break
#################################################################### Innate HP and mana regen
if (pc.mp < 20) and (pc.luck > randint(0, pc.luck//2)): pc.mp+=randint(0,2)
if (pc.hp < 20) and (pc.luck > randint(0, pc.luck//2)): pc.hp+=randint(0,2)
##################################
return 0
#######################################################################################
def start_game():
again = True
while (again):
clr_screen()
try:
mapsize = int(input('Map size? '))
except ValueError:
mapsize = 10
game_map = gen_empty_map(mapsize)
populate_map(game_map)
raw_input(game_loop.__doc__)
game_loop(game_map)
while (again not in ('y','n')): again = raw_input('Play again? (y/n) ').lower()
again = True if (again == 'y') else False
#######################################################################################
def main(args):
start_game()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
999,797 | 72544dd7879644f2008fe3a9a531e87fc0e557e3 | from services.volt.models import AccessDevice, VOLTDevice
from xosresource import XOSResource
class XOSAccessDevice(XOSResource):
provides = "tosca.nodes.AccessDevice"
xos_model = AccessDevice
copyin_props = ["uplink", "vlan"]
name_field = None
def get_xos_args(self, throw_exception=True):
args = super(XOSAccessDevice, self).get_xos_args()
volt_device_name = self.get_requirement("tosca.relationships.MemberOfDevice", throw_exception=throw_exception)
if volt_device_name:
args["volt_device"] = self.get_xos_object(VOLTDevice, throw_exception=throw_exception, name=volt_device_name)
return args
# AccessDevice has no name field, so we rely on matching the keys. We assume
# the for a given VOLTDevice, there is only one AccessDevice per (uplink, vlan)
# pair.
def get_existing_objs(self):
args = self.get_xos_args(throw_exception=False)
volt_device = args.get("volt_device", None)
uplink = args.get("uplink", None)
vlan = args.get("vlan", None)
if (volt_device is not None) and (uplink is not None) and (vlan is not None):
existing_obj = self.get_xos_object(AccessDevice, volt_device=volt_device, uplink=uplink, vlan=vlan, throw_exception=False)
if existing_obj:
return [ existing_obj ]
return []
|
999,798 | 5f722f12f62d6677653259763b9ff1235ea23d0d | #!/usr/bin/env python3
import io
import service_urls
from setuptools import find_packages, setup
with io.open('README.md', "rt", encoding='utf-8') as fp:
long_description = fp.read()
setup(
name='django-service-urls',
version=service_urls.__version__,
description='setting helper for django to represent databases, caches and email settings via a single string',
long_description=long_description,
long_description_content_type="text/markdown",
author=service_urls.__author__,
author_email=service_urls.__email__,
url='https://bitbucket.org/rsalmaso/django-service-urls/',
download_url='https://bitbucket.org/rsalmaso/django-service-urls/get/{0}.tar.gz'.format(service_urls.__version__),
license='BSD',
zip_safe=False,
python_requires='>=3.5',
packages=find_packages(exclude=['tests', 'tests.*']),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.1',
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
]
)
|
999,799 | 53a065da729ecd9833d98a062d33cfbb8caad5eb | from __future__ import division
from dateutil.parser import parse
from nltk.tokenize import WordPunctTokenizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
import time
import datetime
## SETTING PARAMETERS
use_stop_words = False
min_word_len = 3
snapshot_date = datetime.datetime(2013,3,13)
## REVIEW DATA
print "loading review data from csv file..."
review = pd.read_csv('Data\\yelp_test_set_review.csv',
converters={'date': parse}).set_index('review_id')
review = review.drop(['type'],axis=1)
review['text'] = review['text'].fillna("")
review['review_len'] = review['text'].apply(len)
tokenizer = WordPunctTokenizer()
stemmer = PorterStemmer()
if use_stop_words:
stopset = set(stopwords.words('english'))
print "using stop words"
else:
stopset = []
print "stemming all words - stop words discarded"
print "stemming words - min word length is: "+str(min_word_len)
stemmedReview = []
init_time = time.time()
for i in range(len(review)):
stemmedReview.append(
[stemmer.stem(word) for word in [w for w in # stem the word
tokenizer.tokenize(review.ix[i,'text'].lower()) # tokenize with punct the lower cased review
if (len(w) > min_word_len and w not in stopset)] # exclude stopword and shorter than 3
] # put everything in a list
)
print (time.time()-init_time)/60
review['text'] = stemmedReview
del stemmedReview
review['stem_len'] = review['text'].apply(len)
review['stem_unique_len'] = review['text'].apply(np.unique).apply(len)
review['text'] = [' '.join(words) for words in review['text']]
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
vect = joblib.load('Models\\review_vect')
print "extracting feature"
stem_fea = vect.transform(review['text']).todense()
#SCALE FEATURE to adjust importance for KMEAN
steam_fea = np.log((stem_fea/0.5)+1)
from sklearn.cluster import MiniBatchKMeans
for esti in (200,300,500,750,1000):
print "setting nearest cluster - # clusters: "+str(esti)
km = joblib.load('Models\\review_km'+str(esti))
review['clust_'+str(esti)] = km.predict(stem_fea)
review['stem_unique_len_ratio'] = review['stem_unique_len'] / review['stem_len']
review['date'] = snapshot_date - review['date']
review['date'] = review['date'].apply(lambda d: d.days)
review.to_csv('DataProcessed\\review_test_fea.csv')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.