hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fab80aae07e49b359e0ffb7f6ecbf6e04a0ad735 | 85,687 | py | Python | v12/rhba_utils.py | gavinIRL/RHBotArray | 2b537c9657d74e9dd3f9dc8f679080c440df6e0c | [
"MIT"
] | null | null | null | v12/rhba_utils.py | gavinIRL/RHBotArray | 2b537c9657d74e9dd3f9dc8f679080c440df6e0c | [
"MIT"
] | 113 | 2021-05-02T13:23:10.000Z | 2021-07-12T21:22:01.000Z | v12/rhba_utils.py | gavinIRL/RHBotArray | 2b537c9657d74e9dd3f9dc8f679080c440df6e0c | [
"MIT"
] | null | null | null | import os
import cv2
import time
import math
import ctypes
import random
import win32ui
import win32gui
import warnings
import win32con
import threading
import subprocess
import pytesseract
import numpy as np
import pydirectinput
from fuzzywuzzy import process
from custom_input import CustomInput
from win32api import GetSystemMetrics
os.chdir(os.path.dirname(os.path.abspath(__file__)))
warnings.simplefilter("ignore", DeprecationWarning)
class HsvFilter:
def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None,
sAdd=None, sSub=None, vAdd=None, vSub=None):
self.hMin = hMin
self.sMin = sMin
self.vMin = vMin
self.hMax = hMax
self.sMax = sMax
self.vMax = vMax
self.sAdd = sAdd
self.sSub = sSub
self.vAdd = vAdd
self.vSub = vSub
class WindowCapture:
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
def __init__(self, window_name=None, custom_rect=None):
self.custom_rect = custom_rect
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# Declare all the class variables
self.w, self.h, self.cropped_x, self.cropped_y
self.offset_x, self.offset_y
self.update_window_position()
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj,
(self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel
img = img[..., :3]
# make image C_CONTIGUOUS
img = np.ascontiguousarray(img)
return img
def focus_window(self):
win32gui.SetForegroundWindow(self.hwnd)
def update_window_position(self, border=True):
self.window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = self.window_rect[2] - self.window_rect[0]
self.h = self.window_rect[3] - self.window_rect[1]
border_pixels = 8
titlebar_pixels = 30
if self.custom_rect is None:
if border:
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
else:
self.cropped_x = 0
self.cropped_y = 0
self.w += 3
else:
self.w = self.custom_rect[2] - self.custom_rect[0]
self.h = self.custom_rect[3] - self.custom_rect[1]
self.cropped_x = self.custom_rect[0]
self.cropped_y = self.custom_rect[1]
self.offset_x = self.window_rect[0] + self.cropped_x
self.offset_y = self.window_rect[1] + self.cropped_y
# WARNING: need to call the update_window_position function to prevent errors
# That would come from moving the window after starting the bot
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
class BotUtils:
def grab_online_servers():
output = subprocess.run("arp -a", capture_output=True).stdout.decode()
list_ips = []
with open("servers.txt", "r") as f:
lines = f.readlines()
for ip in lines:
if ip.strip() in output:
list_ips.append(ip.strip())
return list_ips
def grab_current_lan_ip():
output = subprocess.run(
"ipconfig", capture_output=True).stdout.decode()
_, output = output.split("IPv4 Address. . . . . . . . . . . : 169")
output, _ = output.split("Subnet Mask", maxsplit=1)
current_lan_ip = "169" + output.strip()
return current_lan_ip
def start_server_threads(list_servers):
for server in list_servers:
t = threading.Thread(target=server.main_loop)
t.start()
def grab_closest(rel_list: list):
closest_index = False
smallest_dist = 100000
for i, pair in enumerate(rel_list):
x = abs(pair[0])
y = abs(pair[1])
hypot = math.hypot(x, y)
if hypot < smallest_dist:
smallest_dist = hypot
closest_index = i
return closest_index
def grab_order_closeness(relatives):
dists = []
for x, y in relatives:
dists.append(math.hypot(x, y))
return sorted(range(len(dists)), key=dists.__getitem__)
def grab_order_lowest_y(coords):
y_only = []
for _, y in coords:
y_only.append(y)
return sorted(range(len(y_only)), key=y_only.__getitem__)
# Angle is left->right travel of room angle, north being 0deg
def move_diagonal(gamename, x, y, angle=90, speed=20, rel=False):
# If not a direct relative move command
if not rel:
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
player_pos = BotUtils.grab_player_pos(gamename)
start_time = time.time()
while not player_pos:
time.sleep(0.05)
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
time.sleep(0.05)
player_pos = BotUtils.grab_player_pos(gamename)
if time.time() - start_time > 5:
print("Error with finding player")
os._exit(1)
BotUtils.close_map_and_menu(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
while abs(relx) > 100 or abs(rely > 100):
CustomInput.press_key(CustomInput.key_map["right"], "right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
time.sleep(0.02)
player_pos = BotUtils.grab_player_pos(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
# Otherwise treat x,y as direct commands
else:
relx = x
rely = y
mult = 0.707
if relx > 0:
keyx = "left"
CustomInput.press_key(CustomInput.key_map["left"], "left")
timeleftx = float("{:.4f}".format(abs(relx/(speed*mult))))
elif relx < 0:
keyx = "right"
CustomInput.press_key(CustomInput.key_map["right"], "right")
timeleftx = float("{:.4f}".format(abs(relx/(speed*mult))))
else:
timeleftx = 0
mult = 1
if rely > 0:
keyy = "down"
CustomInput.press_key(CustomInput.key_map["down"], "down")
timelefty = float("{:.4f}".format(abs(rely/(speed*mult))))
elif rely < 0:
keyy = "up"
CustomInput.press_key(CustomInput.key_map["up"], "up")
timelefty = float("{:.4f}".format(abs(rely/(speed*mult))))
else:
timelefty = 0
if relx != 0:
timeleftx = float("{:.4f}".format(abs(relx/speed)))
first_sleep = min([timeleftx, timelefty])
second_sleep = max([timeleftx, timelefty])
first_key = [keyx, keyy][[timeleftx, timelefty].index(first_sleep)]
second_key = [keyx, keyy][[timeleftx, timelefty].index(second_sleep)]
if first_sleep < 0.009:
if second_sleep < 0.009:
pass
else:
time.sleep(second_sleep-0.009)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
elif timelefty == timeleftx:
time.sleep(first_sleep-0.009)
CustomInput.release_key(CustomInput.key_map[first_key], first_key)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
else:
time.sleep(first_sleep - 0.009)
CustomInput.release_key(CustomInput.key_map[first_key], first_key)
time.sleep((second_sleep-first_sleep-0.009)*mult)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
def move_towards(value, dir):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
CustomInput.press_key(CustomInput.key_map[key], key)
def move_to(gamename, x, y, angle=90, yfirst=True, speed=22.5, loot=False, plyr=False, rel=False):
if not rel:
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
player_pos = BotUtils.grab_player_pos(gamename)
start_time = time.time()
while not player_pos:
time.sleep(0.05)
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
time.sleep(0.05)
player_pos = BotUtils.grab_player_pos(gamename)
if time.time() - start_time > 5:
print("Error with finding player")
os._exit(1)
BotUtils.close_map_and_menu(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
while abs(relx) > 100 or abs(rely > 100):
CustomInput.press_key(CustomInput.key_map["right"], "right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
time.sleep(0.02)
player_pos = BotUtils.grab_player_pos(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
else:
relx = x
rely = y
if not yfirst:
if not loot:
BotUtils.resolve_dir_v2(relx, "x", speed)
BotUtils.resolve_dir_v2(rely, "y", speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
relx, "x", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
rely, "y", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(
gamename, x, y, angle, yfirst, speed)
else:
if not loot:
BotUtils.resolve_dir_v2(rely, "y", speed)
BotUtils.resolve_dir_v2(relx, "x", speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
rely, "y", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
relx, "x", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(
gamename, x, y, angle, yfirst, speed)
def resolve_dir_v2(value, dir, speed):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
if time_reqd > 0.003:
CustomInput.press_key(CustomInput.key_map[key], key)
time.sleep(time_reqd-0.003)
CustomInput.release_key(CustomInput.key_map[key], key)
def resolve_dir_with_looting(value, dir, speed, gamename):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
start_time = time.time()
if time_reqd > 0.003:
CustomInput.press_key(CustomInput.key_map[key], key)
# Maximum lootcheck time is about 0.3secs worst case
# Nominal is about 0.2s
if time_reqd < 2:
time.sleep(time_reqd-0.003)
CustomInput.release_key(CustomInput.key_map[key], key)
else:
BotUtils.close_map(gamename)
loops = math.floor(time_reqd/2)
for i in range(loops):
time.sleep(1.65)
result = Looting.check_for_loot(gamename)
if result:
CustomInput.release_key(CustomInput.key_map[key], key)
return True
time_left = start_time+time_reqd-time.time()
time.sleep(time_left)
CustomInput.release_key(CustomInput.key_map[key], key)
return Looting.check_for_loot(gamename)
def resolve_single_direction(speed, value, dir, PAG=False):
if not PAG:
sleep_time = 0.003
else:
sleep_time = 0.1
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
key_map = CustomInput.grab_key_dict()
if not PAG:
CustomInput.press_key(key_map[key], key)
else:
pydirectinput.keyDown(key)
try:
time.sleep(time_reqd-sleep_time)
except:
pass
if not PAG:
CustomInput.release_key(key_map[key], key)
else:
pydirectinput.keyDown(key)
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
def grab_hpbar_locations(gamename=False):
if gamename:
wincap = WindowCapture(gamename, [100, 135, 1223, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/healthbars.jpg")
filter = HsvFilter(20, 174, 245, 26, 193, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True)
output_image = cv2.blur(output_image, (2, 2))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-10, y, 20, 5])
rectangles.append([x-10, y, 20, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.8)
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def grab_character_location(player_name, gamename=False):
player_chars = "".join(set(player_name))
if gamename:
wincap = WindowCapture(gamename, [200, 235, 1123, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/test_sensitive.jpg")
filter = HsvFilter(0, 0, 119, 179, 49, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, return_gray=True)
rgb = cv2.cvtColor(output_image, cv2.COLOR_GRAY2RGB)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=' + player_chars
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng', config=tess_config)
try:
best_match, _ = process.extractOne(
player_name, results["text"], score_cutoff=0.8)
i = results["text"].index(best_match)
x = int(results["left"][i] + (results["width"][i]/2))
y = int(results["top"][i] + (results["height"][i]/2))
# Account for the rect
x += 200
y += 235
return x, y
except:
return 640, 382
def shift_channel(c, amount):
if amount > 0:
lim = 255 - amount
c[c >= lim] = 255
c[c < lim] += amount
elif amount < 0:
amount = -amount
lim = amount
c[c <= lim] = 0
c[c > lim] -= amount
return c
def filter_blackwhite_invert(filter: HsvFilter, existing_image, return_gray=False, threshold=67, max=255):
hsv = cv2.cvtColor(existing_image, cv2.COLOR_BGR2HSV)
hsv_filter = filter
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
# now change it to greyscale
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# now change it to black and white
(thresh, blackAndWhiteImage) = cv2.threshold(
grayImage, threshold, max, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
if return_gray:
return inverted
inverted = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
return inverted
def convert_pynput_to_pag(button):
PYNPUT_SPECIAL_CASE_MAP = {
'alt_l': 'altleft',
'alt_r': 'altright',
'alt_gr': 'altright',
'caps_lock': 'capslock',
'ctrl_l': 'ctrlleft',
'ctrl_r': 'ctrlright',
'page_down': 'pagedown',
'page_up': 'pageup',
'shift_l': 'shiftleft',
'shift_r': 'shiftright',
'num_lock': 'numlock',
'print_screen': 'printscreen',
'scroll_lock': 'scrolllock',
}
# example: 'Key.F9' should return 'F9', 'w' should return as 'w'
cleaned_key = button.replace('Key.', '')
if cleaned_key in PYNPUT_SPECIAL_CASE_MAP:
return PYNPUT_SPECIAL_CASE_MAP[cleaned_key]
return cleaned_key
def detect_player_name(gamename):
plyrname_rect = [165, 45, 320, 65]
plyrname_wincap = WindowCapture(gamename, plyrname_rect)
plyrname_filt = HsvFilter(0, 0, 103, 89, 104, 255, 0, 0, 0, 0)
# get an updated image of the game
image = plyrname_wincap.get_screenshot()
# pre-process the image
image = BotUtils.apply_hsv_filter(
image, plyrname_filt)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
biggest = 0
name = False
for entry in results["text"]:
if len(entry) > biggest:
name = entry
biggest = len(entry)
return name
def detect_level_name(gamename):
wincap = WindowCapture(gamename, [1121, 31, 1248, 44])
existing_image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 169, 34, 255, 0, 0, 0, 0)
save_image = BotUtils.apply_hsv_filter(existing_image, filter)
gray_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(
gray_image, 129, 255, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
save_image = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
rgb = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def apply_hsv_filter(original_image, hsv_filter: HsvFilter):
# convert image to HSV
hsv = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR for imshow() to display it properly
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return img
def detect_sect_clear(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
464+156, 640, 464+261, 641])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+c > 700:
if d+e+f > 700:
return True
return False
def detect_boss_healthbar(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
415+97, 105+533, 415+98, 105+534])
image = wincap.get_screenshot()
# bgr
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if c+f > 440:
if a+b+d+e < 80:
return True
return False
def detect_xprompt(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
1137, 694, 1163, 695])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+d+e > 960 and c+f == 140:
return True
else:
return False
def grab_player_pos(gamename=False, map_rect=None, rect_rel=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not map_rect:
wincap = WindowCapture(gamename, [561, 282, 1111, 666])
else:
wincap = WindowCapture(gamename, map_rect)
filter = HsvFilter(34, 160, 122, 50, 255, 255, 0, 0, 0, 0)
image = wincap.get_screenshot()
save_image = BotUtils.filter_blackwhite_invert(filter, image)
vision = Vision('plyr.jpg')
rectangles = vision.find(
save_image, threshold=0.31, epsilon=0.5)
if len(rectangles) < 1:
return False, False
points = vision.get_click_points(rectangles)
x, y = points[0]
if not map_rect:
x += 561
y += 282
return x, y
elif rect_rel:
x += map_rect[0]
y += map_rect[1]
return x, y
else:
x += wincap.window_rect[0]
y += wincap.window_rect[1]
return x, y
def grab_level_rects():
rects = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Then add each rect to the rects dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
return rects
def grab_level_rects_and_speeds():
rects = {}
speeds = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Finally load the level speeds
with open("lvl_speed.txt") as f:
num_speeds = f.readlines()
for i, entry in enumerate(num_speeds):
num_speeds[i] = entry.split("|")
# Then add each rect to the rects dict against name
# Also add each speed to the speed dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
for num, speed in num_speeds:
if num == number:
speeds[name.rstrip().replace(
" ", "")] = float(speed.rstrip())
if "1" in name:
speeds[name.rstrip().replace(
" ", "").replace("1", "L")] = float(speed.rstrip())
if "ri" in name:
speeds[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = float(speed.rstrip())
break
return rects, speeds
def string_to_rect(string: str):
# This converts the rect from catalogue into int list
return [int(i) for i in string.split(',')]
def move_mouse_centre(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename)
centre_x = int(0.5 * wincap.w +
wincap.window_rect[0])
centre_y = int(0.5 * wincap.h +
wincap.window_rect[1])
ctypes.windll.user32.SetCursorPos(centre_x, centre_y)
def detect_bigmap_open(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[819, 263, 855, 264])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-2]]
if a+b+c < 30:
if d+e+f > 700:
return True
return False
def detect_menu_open(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[595, 278, 621, 281])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+c > 700:
if d+e+f > 700:
return True
return False
def convert_list_to_rel(item_list, playerx, playery, yoffset=0):
return_list = []
for item in item_list:
relx = playerx - item[0]
rely = item[1] - playery - yoffset
return_list.append((relx, rely))
return return_list
def close_map_and_menu(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
if BotUtils.detect_menu_open(gamename):
BotUtils.close_esc_menu(game_wincap)
if BotUtils.detect_bigmap_open(gamename):
BotUtils.close_map(game_wincap)
def try_toggle_map():
pydirectinput.keyDown("m")
time.sleep(0.05)
pydirectinput.keyUp("m")
time.sleep(0.08)
def try_toggle_map_clicking(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
pydirectinput.click(
int(1262+game_wincap.window_rect[0]), int(64+game_wincap.window_rect[1]))
def close_map(game_wincap=False):
if not game_wincap:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
pydirectinput.click(
int(859+game_wincap.window_rect[0]), int(260+game_wincap.window_rect[1]))
def close_esc_menu(game_wincap=False):
if not game_wincap:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
pydirectinput.click(
int(749+game_wincap.window_rect[0]), int(280+game_wincap.window_rect[1]))
def get_monitor_scaling():
scaleFactor = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100
return float(scaleFactor)
def grab_res_scroll_left(gamename):
wincap = WindowCapture(gamename, [112, 130, 125, 143])
image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 179, 18, 255, 0, 0, 0, 0)
image = BotUtils.apply_hsv_filter(image, filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=1234567890'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return int(result)
def read_mission_name(gamename):
wincap = WindowCapture(gamename, [749, 152, 978, 170])
image = wincap.get_screenshot()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def convert_click_to_ratio(gamename, truex, truey):
wincap = WindowCapture(gamename)
wincap.update_window_position(border=False)
scaling = BotUtils.get_monitor_scaling()
# print(scaling)
relx = (truex - (wincap.window_rect[0] * scaling))
rely = (truey - (wincap.window_rect[1] * scaling))
# print("relx, rely, w, h: {},{},{},{}".format(
# relx, rely, wincap.w, wincap.h))
ratx = relx/(wincap.w * scaling)
raty = rely/(wincap.h * scaling)
return ratx, raty
def convert_ratio_to_click(ratx, raty, gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename)
relx = int(ratx * wincap.w)
rely = int(raty * wincap.h)
truex = int((relx + wincap.window_rect[0]))
truey = int((rely + wincap.window_rect[1]))
return truex, truey
def convert_true_to_window(gamename, truex, truey):
scaling = BotUtils.get_monitor_scaling()
wincap = WindowCapture(gamename)
relx = (truex/scaling) - wincap.window_rect[0]
rely = (truey/scaling) - wincap.window_rect[1]
return relx, rely
def convert_window_to_true(gamename, relx, rely):
wincap = WindowCapture(gamename)
truex = int(relx + wincap.window_rect[0])
truey = int(rely + wincap.window_rect[1])
return truex, truey
def find_other_player(gamename, all=False):
othr_plyr_vision = Vision("otherplayerinvert.jpg")
othr_plyr_wincap = WindowCapture(gamename, [1100, 50, 1260, 210])
image = othr_plyr_wincap.get_screenshot()
filter = HsvFilter(24, 194, 205, 31, 255, 255, 0, 0, 0, 0)
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = othr_plyr_vision.find(
image, threshold=0.61, epsilon=0.5)
points = othr_plyr_vision.get_click_points(rectangles)
if len(points) >= 1:
if not all:
relx = points[0][0] - 0
rely = 0 - points[0][1]
return relx, rely
else:
return points
return False
def find_enemy(gamename, all=False):
othr_plyr_vision = Vision("otherplayerinvert.jpg")
othr_plyr_wincap = WindowCapture(gamename, [1100, 50, 1260, 210])
image = othr_plyr_wincap.get_screenshot()
filter = HsvFilter(0, 198, 141, 8, 255, 255, 0, 0, 0, 0)
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = othr_plyr_vision.find(
image, threshold=0.41, epsilon=0.5)
points = othr_plyr_vision.get_click_points(rectangles)
if len(points) >= 1:
if not all:
relx = points[0][0] - 0
rely = 0 - points[0][1]
return relx, rely
else:
return points
return False
def find_midlevel_event(gamename=False, playerx=False, playery=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not playerx:
playerx, playery = BotUtils.grab_player_pos(
gamename, [1100, 50, 1260, 210], True)
filter = HsvFilter(76, 247, 170, 100, 255, 255, 0, 0, 0, 0)
vision = Vision("otherplayerinvert.jpg")
wincap = WindowCapture(gamename, [1100, 50, 1260, 210])
image = wincap.get_screenshot()
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = vision.find(
image, threshold=0.61, epsilon=0.5)
points = vision.get_click_points(rectangles)
if len(points) >= 1:
relx = points[0][0] - playerx
rely = playery - points[0][1]
return relx, rely
return False, False
def stop_movement(follower=False):
if follower:
follower.pressed_keys = []
for key in ["up", "down", "left", "right"]:
CustomInput.release_key(CustomInput.key_map[key], key)
class Looting:
def loot_current_room(gamename, player_name, search_points=False):
# Start by picking up loot already in range
BotUtils.close_map_and_menu(gamename)
Looting.grab_nearby_loot(gamename)
# Then try grabbing all visible far loot
Looting.grab_all_visible_loot(gamename, player_name)
# Then once that is exhausted cycle through the searchpoints
if search_points:
for point in search_points:
x, y, first_dir = point
BotUtils.move_to(gamename, x, y, yfirst=first_dir == "y")
Looting.grab_nearby_loot(gamename)
BotUtils.close_map_and_menu(gamename)
Looting.grab_all_visible_loot(gamename, player_name)
def grab_nearby_loot(gamename):
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.09)
CustomInput.press_key(CustomInput.key_map["right"], "right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
def grab_all_visible_loot(gamename, player_name):
start_time = time.time()
while True:
if time.time() - start_time > 20:
break
outcome = Looting.try_find_and_grab_loot(
gamename, player_name)
if outcome == "noloot":
break
elif outcome == "noplayer":
pydirectinput.press("right")
outcome = Looting.try_find_and_grab_loot(
gamename, player_name)
if outcome == "noplayer":
break
elif outcome == "falsepos":
break
elif outcome == True:
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.09)
def check_for_loot(gamename):
# This will be a lightweight check for any positive loot ident
# Meant to be used when moving and normal looting has ceased
# i.e. opportunistic looting
data = Looting.grab_farloot_locations(
gamename, return_image=True)
if not data:
return False
else:
loot_list, image, xoff, yoff = data
confirmed = False
try:
for _, coords in enumerate(loot_list):
x, y = coords
x -= xoff
y -= yoff
rgb = image[y-22:y+22, x-75:x+75]
filter = HsvFilter(0, 0, 131, 151, 255, 255, 0, 0, 0, 0)
rgb = BotUtils.apply_hsv_filter(rgb, filter)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if len(result) > 3:
return True
except:
return False
if not confirmed:
return False
def try_find_and_grab_loot(gamename, player_name, loot_lowest=True, printout=False):
# First need to close anything that might be in the way
BotUtils.close_map_and_menu(gamename)
# Then grab loot locations
loot_list = Looting.grab_farloot_locations(gamename)
if not loot_list:
# print("No loot found")
return "noloot"
# else:
# print("Loot found")
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
# If didn't find player then try once more
if not playerx:
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
if not playerx:
return "noplayer"
# if want to always loot the nearest first despite the cpu hit
if not loot_lowest:
# Then convert lootlist to rel_pos list
relatives = BotUtils.convert_list_to_rel(
loot_list, playerx, playery, 275)
# Grab the indexes in ascending order of closesness
order = BotUtils.grab_order_closeness(relatives)
# Then reorder the lootlist to match
loot_list = [x for _, x in sorted(zip(order, loot_list))]
# Otherwise if want to loot from bottom of screen to top
# Typically better as see all loot then in y direction
# but potentially miss loot in x direction
else:
# Grab the indexes in ascending order of distance from
# bottom of the screen
order = BotUtils.grab_order_lowest_y(loot_list)
# Then reorder the lootlist to match
loot_list = [x for _, x in sorted(zip(order, loot_list))]
# print(len(loot_list))
confirmed = False
for index, coords in enumerate(loot_list):
x, y = coords
wincap = WindowCapture(gamename, [x-95, y-50, x+95, y+50])
rgb = wincap.get_screenshot()
filter = HsvFilter(0, 0, 131, 151, 255, 255, 0, 0, 0, 0)
rgb = BotUtils.apply_hsv_filter(rgb, filter)
# cv2.imwrite("testytest.jpg", rgb)
tess_config = '--psm 5 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if len(result) > 3:
if printout:
print(result)
confirmed = loot_list[index]
break
if not confirmed:
# print("Lootname not confirmed or detected")
return "noloot"
relx = playerx - confirmed[0]
rely = confirmed[1] - playery - 275
rect = [confirmed[0]-100, confirmed[1] -
30, confirmed[0]+100, confirmed[1]+30]
BotUtils.move_towards(relx, "x")
loop_time = time.time()
time_remaining = 0.1
time.sleep(0.01)
while time_remaining > 0:
time.sleep(0.003)
if BotUtils.detect_xprompt(gamename):
break
try:
newx, newy = Looting.grab_farloot_locations(gamename, rect)[
0]
time_taken = time.time() - loop_time
movementx = confirmed[0] - newx
speed = movementx/time_taken
if speed != 0:
time_remaining = abs(
relx/speed) - time_taken
rect = [newx-100, newy-30, newx+100, newy+30]
except:
try:
time.sleep(time_remaining)
break
except:
return False
for key in ["left", "right"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(rely, "y")
start_time = time.time()
if rely < 0:
expected_time = abs(rely/7.5)
else:
expected_time = abs(rely/5.5)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.005)
# After moving in opposite direction
if time.time() - start_time > 10:
# If have moved opposite with no result for equal amount
if time.time() - start_time > 10 + 2*(1 + expected_time):
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
# Return falsepos so that it will ignore this detection
return "falsepos"
# If no result for 3 seconds
elif time.time() - start_time > 1 + expected_time:
# Try moving in the opposite direction
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(-1*rely, "y")
start_time -= 8.5
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
pydirectinput.press("x")
return True
def grab_farloot_locations(gamename=False, rect=False, return_image=False):
if gamename:
if not rect:
rect1 = [100, 160, 1223, 688]
wincap = WindowCapture(gamename, rect1)
else:
wincap = WindowCapture(gamename, rect)
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/lootscene.jpg")
filter = HsvFilter(15, 180, 0, 20, 255, 63, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True, 0, 180)
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-50, y, 100, 5])
rectangles.append([x-50, y, 100, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.9)
if len(rectangles) < 1:
return False
points = []
for (x, y, w, h) in rectangles:
# Account for the rect
if rect:
# Account for the rect
x += rect[0]
y += rect[1]
else:
x += 100
y += 135
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
if return_image:
if rect:
return points, original_image, rect[0], rect[1]
else:
return points, original_image, rect1[0], rect1[1]
return points
class Events:
def choose_random_reward(gamename):
wincap = WindowCapture(gamename)
posx = wincap.window_rect[0] + (460+(180*random.randint(0, 2)))
posy = wincap.window_rect[1] + (200+(132*random.randint(0, 3)))
pydirectinput.click(int(posx), int(posy))
time.sleep(0.1)
# Now accept the reward
pydirectinput.click(
wincap.window_rect[0]+750, wincap.window_rect[1]+720)
def detect_reward_choice_open(gamename):
wincap = WindowCapture(gamename, [503, 90, 535, 92])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 500:
if c + f < 105:
return True
return False
def detect_move_reward_screen(gamename):
wincap = WindowCapture(gamename, [581, 270, 593, 272])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 360 and a + d < 400:
if b + e > 360 and b + e < 400:
if c + f < 10:
return True
return False
def detect_endlevel_chest(gamename):
wincap = WindowCapture(gamename, [454, 250, 525, 252])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d < 50:
if b + e > 480:
if c + f > 290 and c+f < 320:
return True
return False
def detect_endlevel_bonus_area(gamename):
wincap = WindowCapture(gamename, [503, 487, 514, 589])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 400:
if c + f > 400:
return True
return False
def detect_in_dungeon(wincap=False):
if not wincap:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1090, 331, 1092, 353])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if d < 20:
if a + b + e > 400 and a+b+e < 500:
if c + f > 480:
return True
return False
def detect_go(gamename):
wincap = WindowCapture(gamename, [623, 247, 628, 249])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
if a < 30:
if b > 240:
if c > 140:
return True
return False
def detect_one_card(gamename):
# Cards only show up once one has been picked
# Therefore need to check against bronze, gold, silver
wincap = WindowCapture(gamename, [833, 44, 835, 46])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
# Bronze
if a == 27:
if b == 48:
if c == 87:
return True
# Silver
if a == 139:
if b == 139:
if c == 139:
return True
# Gold
if a == 38:
if b == 129:
if c == 160:
return True
return False
def detect_yes_no(gamename):
wincap = WindowCapture(gamename, [516, 426, 541, 441])
image = wincap.get_screenshot()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=Yes'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if result == "Yes":
return True
return False
def detect_resurrect_prompt(gamename):
wincap = WindowCapture(gamename, [763, 490, 818, 492])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if a + d > 500:
if b + e > 500:
if c + f > 500:
return True
return False
def detect_store(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1084, 265, 1099, 267])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if a + d > 500:
if b + e > 500:
if c + f > 500:
return True
return False
class RHClick:
def click_yes(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+528, wincap.window_rect[1]+433)
def click_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+763, wincap.window_rect[1]+433)
def click_otherworld_ok(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+503, wincap.window_rect[1]+487)
def click_otherworld_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+778, wincap.window_rect[1]+487)
def click_choose_map(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+210)
def click_explore_again(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+152)
def click_back_to_town(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+328)
def click_map_number(gamename, mapnum):
wincap = WindowCapture(gamename)
map_to_clickpoints = {
5: (728, 521),
6: (640, 631),
7: (605, 455),
8: (542, 350),
9: (293, 297),
10: (777, 406),
11: (140, 370),
12: (500, 246),
13: (500, 672),
14: (419, 478),
15: (423, 263),
16: (563, 562),
17: (642, 432),
18: (249, 325)
}
x, y = map_to_clickpoints[mapnum]
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
def choose_difficulty_and_enter(gamename, diff):
wincap = WindowCapture(gamename)
num_clicks = 0
if diff == "N":
num_clicks = 0
elif diff == "H":
num_clicks = 1
elif diff == "VH":
num_clicks == 2
elif diff == "BM":
num_clicks == 3
for i in range(num_clicks):
pydirectinput.click(
wincap.window_rect[0]+618, wincap.window_rect[1]+333)
time.sleep(0.3)
# Then click on enter dungeon
pydirectinput.click(
wincap.window_rect[0]+1033, wincap.window_rect[1]+736)
def go_to_change_character(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+363)
def exit_game(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+480)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+428)
def choose_character(gamename, charnum):
wincap = WindowCapture(gamename)
char_clickpoints = {
1: (1100, 140),
2: (1100, 210),
3: (1100, 280),
4: (1100, 350),
5: (1100, 420),
6: (1100, 490),
7: (1100, 560),
8: (1100, 630)
}
if charnum > 8:
pydirectinput.click(
wincap.window_rect[0]+1165, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum-8]
else:
pydirectinput.click(
wincap.window_rect[0]+1035, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum]
time.sleep(0.2)
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+765)
class Vision:
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED):
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, max_results=15, epsilon=0.5):
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
rectangles.append(rect)
rectangles.append(rect)
rectangles, weights = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=epsilon)
return rectangles
def get_click_points(self, rectangles):
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def draw_rectangles(self, haystack_img, rectangles):
# BGR
line_color = (0, 255, 0)
line_type = cv2.LINE_4
for (x, y, w, h) in rectangles:
top_left = (x, y)
bottom_right = (x + w, y + h)
cv2.rectangle(haystack_img, top_left, bottom_right,
line_color, lineType=line_type)
return haystack_img
def draw_crosshairs(self, haystack_img, points):
# BGR
marker_color = (255, 0, 255)
marker_type = cv2.MARKER_CROSS
for (center_x, center_y) in points:
cv2.drawMarker(haystack_img, (center_x, center_y),
marker_color, marker_type)
return haystack_img
class DynamicFilter:
TRACKBAR_WINDOW = "Trackbars"
# create gui window with controls for adjusting arguments in real-time
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED):
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, epsilon=0.5):
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
rectangles.append(rect)
rectangles.append(rect)
rectangles, weights = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=epsilon)
return rectangles
def get_click_points(self, rectangles):
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def draw_rectangles(self, haystack_img, rectangles):
# BGR
line_color = (0, 255, 0)
line_type = cv2.LINE_4
for (x, y, w, h) in rectangles:
top_left = (x, y)
bottom_right = (x + w, y + h)
cv2.rectangle(haystack_img, top_left, bottom_right,
line_color, lineType=line_type)
return haystack_img
def draw_crosshairs(self, haystack_img, points):
# BGR
marker_color = (255, 0, 255)
marker_type = cv2.MARKER_CROSS
for (center_x, center_y) in points:
cv2.drawMarker(haystack_img, (center_x, center_y),
marker_color, marker_type)
return haystack_img
def init_control_gui(self):
cv2.namedWindow(self.TRACKBAR_WINDOW, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.TRACKBAR_WINDOW, 350, 700)
# required callback. we'll be using getTrackbarPos() to do lookups
# instead of using the callback.
def nothing(position):
pass
# create trackbars for bracketing.
# OpenCV scale for HSV is H: 0-179, S: 0-255, V: 0-255
cv2.createTrackbar('HMin', self.TRACKBAR_WINDOW, 0, 179, nothing)
cv2.createTrackbar('SMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('HMax', self.TRACKBAR_WINDOW, 0, 179, nothing)
cv2.createTrackbar('SMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
# Set default value for Max HSV trackbars
cv2.setTrackbarPos('HMax', self.TRACKBAR_WINDOW, 179)
cv2.setTrackbarPos('SMax', self.TRACKBAR_WINDOW, 255)
cv2.setTrackbarPos('VMax', self.TRACKBAR_WINDOW, 255)
# trackbars for increasing/decreasing saturation and value
cv2.createTrackbar('SAdd', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('SSub', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VAdd', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VSub', self.TRACKBAR_WINDOW, 0, 255, nothing)
# returns an HSV filter object based on the control GUI values
def get_hsv_filter_from_controls(self):
# Get current positions of all trackbars
hsv_filter = HsvFilter()
hsv_filter.hMin = cv2.getTrackbarPos('HMin', self.TRACKBAR_WINDOW)
hsv_filter.sMin = cv2.getTrackbarPos('SMin', self.TRACKBAR_WINDOW)
hsv_filter.vMin = cv2.getTrackbarPos('VMin', self.TRACKBAR_WINDOW)
hsv_filter.hMax = cv2.getTrackbarPos('HMax', self.TRACKBAR_WINDOW)
hsv_filter.sMax = cv2.getTrackbarPos('SMax', self.TRACKBAR_WINDOW)
hsv_filter.vMax = cv2.getTrackbarPos('VMax', self.TRACKBAR_WINDOW)
hsv_filter.sAdd = cv2.getTrackbarPos('SAdd', self.TRACKBAR_WINDOW)
hsv_filter.sSub = cv2.getTrackbarPos('SSub', self.TRACKBAR_WINDOW)
hsv_filter.vAdd = cv2.getTrackbarPos('VAdd', self.TRACKBAR_WINDOW)
hsv_filter.vSub = cv2.getTrackbarPos('VSub', self.TRACKBAR_WINDOW)
return hsv_filter
def apply_hsv_filter(self, original_image, hsv_filter=None):
hsv = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
if not hsv_filter:
hsv_filter = self.get_hsv_filter_from_controls()
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return img
class SellRepair():
def __init__(self, rarity_cutoff=1, last_row_protect=True) -> None:
# rarities are as follows:
# nocolour=0, green=1, blue=2
self.cutoff = rarity_cutoff
# this is for whether lastrow in equip is protected
# useful for characters levelling with next upgrades ready
self.last_row_protect = last_row_protect
with open("gamename.txt") as f:
self.gamename = f.readline()
self.inventory_wincap = WindowCapture(
self.gamename, [512, 277, 775, 430])
# This is for correct mouse positioning
self.game_wincap = WindowCapture(self.gamename)
self.shop_check_wincap = WindowCapture(
self.gamename, [274, 207, 444, 208])
# These are for holding reference rgb values
# Using sets as can then compare easily to other sets
self.empty = {41, 45, 50}
self.rar_green = {2, 204, 43}
self.rar_blue = {232, 144, 5}
self.rar_none = {24, 33, 48}
self.junk_list = self.grab_junk_list()
def grab_junk_list(self):
jl = []
with open("itemrgb.txt") as f:
lines = f.readlines()
for line in lines:
_, rgb = line.split("|")
r, g, b = rgb.split(",")
jl.append({int(r), int(g), int(b)})
return jl
def ident_sell_repair(self):
self.game_wincap.update_window_position(border=False)
self.shop_check_wincap.update_window_position(border=False)
self.open_store_if_necessary()
# First go through all the equipment
self.change_tab("Equipment")
# time.sleep(0.2)
# self.hover_mouse_all()
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_rarities_equip(non_empty, screenshot)
self.sell(junk_list, "Equipment")
# Then go through all the other loot
self.change_tab("Other")
# time.sleep(0.2)
# self.hover_mouse_all()
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_items_other(non_empty, screenshot)
self.sell(junk_list)
# and finally repair gear
self.repair()
# and now go through all the steps again minus repair to make sure
self.change_tab("Equipment")
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_rarities_equip(non_empty, screenshot)
self.sell(junk_list, "Equipment")
self.change_tab("Other")
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_items_other(non_empty, screenshot)
self.sell(junk_list)
def open_store_if_necessary(self):
# This will search to see if the inventory is open
# in the correct spot and then click shop if not
screenshot = self.shop_check_wincap.get_screenshot()
pix1 = screenshot[0, 0]
pix1 = int(pix1[0]) + int(pix1[1]) + int(pix1[2])
pix2 = screenshot[0, 169]
pix2 = int(pix2[0]) + int(pix2[1]) + int(pix2[2])
if pix1 == 103 and pix2 == 223:
pass
else:
# need to open the store
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
ctypes.windll.user32.SetCursorPos(offsetx+610, offsety-10)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def change_tab(self, name):
self.game_wincap.update_window_position(border=False)
x = self.game_wincap.window_rect[0] + 534-60
if name == "Equipment":
y = self.game_wincap.window_rect[1] + 277 - 15
elif name == "Other":
y = self.game_wincap.window_rect[1] + 277 + 44
ctypes.windll.user32.SetCursorPos(x, y)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def hover_mouse_all(self):
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
for i in range(4):
for j in range(6):
x = offsetx+j*44
y = offsety+i*44
ctypes.windll.user32.SetCursorPos(x-10, y)
time.sleep(0.03)
ctypes.windll.user32.SetCursorPos(x, y)
time.sleep(0.03)
ctypes.windll.user32.SetCursorPos(x+10, y)
ctypes.windll.user32.SetCursorPos(offsetx, offsety-70)
# ctypes.windll.user32.SetCursorPos(offsetx+610, offsety-10)
def remove_empty(self, screenshot):
non_empty = []
for i in range(4):
for j in range(6):
colour = set(screenshot[i*44, 22+j*44])
if colour != self.empty:
non_empty.append([i, j])
# format will be as follows of return list
# x,y,r,g,b
return non_empty
def identify_rarities_equip(self, rowcol_list, screenshot):
junk = []
for rowcol in rowcol_list:
colour = set(screenshot[rowcol[0]*44, rowcol[1]*44])
if colour == self.rar_none:
junk.append([rowcol[0], rowcol[1]])
elif colour == self.rar_green:
if self.cutoff >= 1:
junk.append([rowcol[0], rowcol[1]])
elif colour == self.rar_green:
if self.cutoff >= 2:
junk.append([rowcol[0], rowcol[1]])
# format will be as follows of return list
# x,y corresponding to row,col
return junk
def identify_items_other(self, rowcol_list, screenshot):
junk = []
for rowcol in rowcol_list:
colour = set(screenshot[rowcol[0]*44, 22+rowcol[1]*44])
if colour in self.junk_list:
junk.append([rowcol[0], rowcol[1]])
# format will be as follows of return list
# x,y corresponding to row,col
return junk
def sell(self, rowcol_list, tab="Other"):
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
for item in rowcol_list:
if tab == "Equipment":
if self.last_row_protect:
if item[0] == 3:
continue
x = offsetx+item[1]*44
y = offsety+item[0]*44
ctypes.windll.user32.SetCursorPos(x, y)
time.sleep(0.1)
ctypes.windll.user32.mouse_event(
0x0008, 0, 0, 0, 0)
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0010, 0, 0, 0, 0)
# Then click a second time to be sure
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0008, 0, 0, 0, 0)
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0010, 0, 0, 0, 0)
def repair(self):
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
ctypes.windll.user32.SetCursorPos(offsetx-310, offsety+325)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
ctypes.windll.user32.SetCursorPos(offsetx+0, offsety+180)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
# this is if everything is already repaired
ctypes.windll.user32.SetCursorPos(offsetx+100, offsety+180)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
class QuestHandle():
def __init__(self) -> None:
with open("gamename.txt") as f:
gamename = f.readline()
self.game_wincap = WindowCapture(gamename)
self.white_text_filter = HsvFilter(
0, 0, 102, 45, 65, 255, 0, 0, 0, 0)
self.yellow_text_filter = HsvFilter(
16, 71, 234, 33, 202, 255, 0, 0, 0, 0)
self.blue_text_filter = HsvFilter(
83, 126, 85, 102, 255, 255, 0, 0, 0, 0)
self.all_text_filter = HsvFilter(
0, 0, 61, 78, 255, 255, 0, 255, 0, 0)
self.vision = Vision('xprompt67filtv2.jpg')
self.accept_rect = [725, 525, 925, 595]
self.accept_wincap = WindowCapture(gamename, self.accept_rect)
self.skip_rect = [730, 740, 890, 780]
self.skip_wincap = WindowCapture(gamename, self.skip_rect)
self.next_rect = [880, 740, 1040, 780]
self.next_wincap = WindowCapture(gamename, self.next_rect)
self.quest_rect = [310, 160, 1055, 650]
self.quest_wincap = WindowCapture(gamename, self.quest_rect)
self.questlist_rect = [740, 240, 1050, 580]
self.questlist_wincap = WindowCapture(gamename, self.questlist_rect)
self.complete_wincap = WindowCapture(gamename, self.next_rect)
self.xprompt_rect = [1130, 670, 1250, 720]
self.xprompt_wincap = WindowCapture(gamename, self.xprompt_rect)
def start_quest_handle(self):
start_time = time.time()
while time.time() < start_time + 2:
if self.check_for_accept():
break
def convert_and_click(self, x, y, rect):
self.game_wincap.update_window_position(border=False)
truex = int(x + self.game_wincap.window_rect[0] + rect[0])
truey = int(y + self.game_wincap.window_rect[1] + rect[1])
ctypes.windll.user32.SetCursorPos(truex, truey)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def check_for_accept(self):
image = self.accept_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Accept" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.accept_rect)
detection = True
break
if not detection:
return self.check_for_skip()
else:
return True
def check_for_skip(self):
image = self.skip_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Skip" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.skip_rect)
detection = True
break
if not detection:
return self.check_for_next()
else:
return True
def check_for_next(self):
image = self.next_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Next" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.next_rect)
detection = True
break
if not detection:
return self.check_for_quest()
else:
return True
def check_for_quest(self):
image = self.quest_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=Quest'
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng', config=tess_config)
detection = False
for i in range(0, len(results["text"])):
if "Quest" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.quest_rect)
detection = True
break
if not detection:
return self.check_for_questlist()
else:
return True
def check_for_questlist(self):
image = self.questlist_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.all_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "LV" in results["text"][i]:
# at this point need to grab the centre of the rect
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
# and then click at this position
self.convert_and_click(x, y, self.questlist_rect)
detection = True
break
if not detection:
return self.check_for_complete()
else:
return True
def check_for_complete(self):
image = self.complete_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Com" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.next_rect)
detection = True
break
if not detection:
return self.check_for_xprompt()
else:
return True
def check_for_xprompt(self):
image = self.xprompt_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.blue_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Press" in results["text"][i]:
pydirectinput.keyDown("x")
time.sleep(0.1)
pydirectinput.keyUp("x")
detection = True
break
if not detection:
return False
else:
return True
class Follower():
def __init__(self) -> None:
self.pressed_keys = []
self.relx = 0
self.rely = 0
def navigate_towards(self, x, y):
self.relx = x
self.rely = y
if self.relx > 1:
# Check if opposite key held down
if "left" in self.pressed_keys:
self.pressed_keys.remove("left")
CustomInput.release_key(CustomInput.key_map["left"], "left")
# Check that not already being held down
if "right" not in self.pressed_keys:
self.pressed_keys.append("right")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["right"], "right")
elif self.relx < -1:
# Check if opposite key held down
if "right" in self.pressed_keys:
self.pressed_keys.remove("right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
# Check that not already being held down
if "left" not in self.pressed_keys:
self.pressed_keys.append("left")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["left"], "left")
else:
# Handling for case where = 0, need to remove both keys
if "right" in self.pressed_keys:
self.pressed_keys.remove("right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
if "left" in self.pressed_keys:
self.pressed_keys.remove("left")
CustomInput.release_key(CustomInput.key_map["left"], "left")
# Handling for y-dir next
if self.rely > 1:
# Check if opposite key held down
if "down" in self.pressed_keys:
self.pressed_keys.remove("down")
CustomInput.release_key(CustomInput.key_map["down"], "down")
# Check that not already being held down
if "up" not in self.pressed_keys:
self.pressed_keys.append("up")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["up"], "up")
elif self.rely < -1:
# Check if opposite key held down
if "up" in self.pressed_keys:
self.pressed_keys.remove("up")
CustomInput.release_key(CustomInput.key_map["up"], "up")
# Check that not already being held down
if "down" not in self.pressed_keys:
self.pressed_keys.append("down")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["down"], "down")
else:
# Handling for case where = 0, need to remove both keys
if "up" in self.pressed_keys:
self.pressed_keys.remove("up")
CustomInput.release_key(CustomInput.key_map["up"], "up")
if "down" in self.pressed_keys:
self.pressed_keys.remove("down")
CustomInput.release_key(CustomInput.key_map["down"], "down")
if __name__ == "__main__":
time.sleep(2)
with open("gamename.txt") as f:
gamename = f.readline()
# start = time.time()
# BotUtils.detect_xprompt(gamename)
# print("Time taken: {}s".format(time.time()-start))
BotUtils.close_map_and_menu(gamename)
| 39.910107 | 130 | 0.562536 | 10,401 | 85,687 | 4.47707 | 0.10124 | 0.005369 | 0.023708 | 0.01675 | 0.640081 | 0.603294 | 0.56668 | 0.540137 | 0.505949 | 0.483894 | 0 | 0.045521 | 0.332151 | 85,687 | 2,146 | 131 | 39.928705 | 0.7682 | 0.067396 | 0 | 0.561884 | 0 | 0 | 0.030656 | 0.007185 | 0 | 0 | 0.001204 | 0 | 0 | 1 | 0.066265 | false | 0.002191 | 0.009858 | 0.001095 | 0.152793 | 0.003834 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fab81a4ff4643ede4d0508d54ffa75a9959d0825 | 2,072 | py | Python | 03-basic-collections-and-for/07s-solution-parsing-reddit.py | Tebs-Lab/python-for-scripting-workshop | 8e3aeb99f95112143701926aa7ab495358c4e3ee | [
"Unlicense"
] | null | null | null | 03-basic-collections-and-for/07s-solution-parsing-reddit.py | Tebs-Lab/python-for-scripting-workshop | 8e3aeb99f95112143701926aa7ab495358c4e3ee | [
"Unlicense"
] | null | null | null | 03-basic-collections-and-for/07s-solution-parsing-reddit.py | Tebs-Lab/python-for-scripting-workshop | 8e3aeb99f95112143701926aa7ab495358c4e3ee | [
"Unlicense"
] | null | null | null | import json
import pathlib
import ssl
from urllib.request import Request, urlopen
# Change this to False to use the file data.
use_live_data = True
if use_live_data:
# Fetching the live data from reddit.
url = "http://www.reddit.com/r/aww.json"
request = Request(
url,
headers={
'User-Agent': 'TebsLabPythonExercise/0.0.1' # setting the user agent decreases throttling by Reddit
}
)
# Context is for MacOS users related to SSL certificates. Details: https://clay-atlas.com/us/blog/2021/09/26/python-en-urllib-error-ssl-certificate/
response = urlopen(request, context=ssl._create_unverified_context())
listing = json.load(response)
else:
# Alternatively, loading the data from the provided json file.
containing_dir = pathlib.Path(__file__).parent.resolve()
with open(pathlib.Path(containing_dir / 'supplemental-materials' / 'reddit-aww.json')) as json_file:
listing = json.load(json_file)
# Extract the posts to loop over them
posts = listing['data']['children']
# For our fact finding mission
posts_by_user = {}
sum_of_upvote_ratio = 0
# Iterate over the posts, extract the data, print
for post in posts:
post_data = post['data']
title = post_data['title']
username = post_data['author']
upvote_ratio = post_data['upvote_ratio']
post_url = post_data['url']
# Check if the user is already in there
if username not in posts_by_user:
posts_by_user[username] = 0
# Then increase their post count.
posts_by_user[username] += 1
sum_of_upvote_ratio += upvote_ratio
print('================')
print(f'Title: {title}\nUser: {username}\nUpvote Ratio: {upvote_ratio}\nURL: {post_url}')
print()
# Display which users posted multiple times (if any)
for username, post_count in posts_by_user.items():
if post_count > 1:
print(f'{username} posted {post_count} times!')
# Compute the avg upvote ratio
avg_upvote_ratio = sum_of_upvote_ratio / len(posts)
print(f'The average upvote ratio was {avg_upvote_ratio}')
| 32.375 | 152 | 0.696911 | 296 | 2,072 | 4.702703 | 0.408784 | 0.086925 | 0.039511 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008977 | 0.193533 | 2,072 | 63 | 153 | 32.888889 | 0.824057 | 0.291023 | 0 | 0 | 0 | 0.025 | 0.224588 | 0.033654 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faba9d7e91fbeec825bc33f3a7bfbbe7e75ec534 | 501 | py | Python | src/topology/all_gates/qft.py | Dreamonic/shor-algorithm | 19a4d95f0f19809cd3fe1db4d834ff3a02fba68d | [
"MIT"
] | null | null | null | src/topology/all_gates/qft.py | Dreamonic/shor-algorithm | 19a4d95f0f19809cd3fe1db4d834ff3a02fba68d | [
"MIT"
] | null | null | null | src/topology/all_gates/qft.py | Dreamonic/shor-algorithm | 19a4d95f0f19809cd3fe1db4d834ff3a02fba68d | [
"MIT"
] | null | null | null | from projectq.meta import Dagger
from projectq.ops import H, CRz
from src.shared.rotate import calculate_phase
def qft(eng, circuit, qubits):
m = len(qubits)
for i in range(m - 1, -1, -1):
circuit.apply_single_qubit_gate(H, qubits[i])
for j in range(2, i + 2):
circuit.apply_ld_two_qubit_gate(CRz(calculate_phase(j)), qubits[i - j + 1], qubits[i])
def qft_inverse(eng, circuit, qubits):
with Dagger(eng):
qft(eng, circuit, qubits)
| 27.833333 | 99 | 0.636727 | 78 | 501 | 3.961538 | 0.448718 | 0.097087 | 0.15534 | 0.122977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 0.245509 | 501 | 17 | 100 | 29.470588 | 0.801587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabb5ec294332474767da7f125ccef5a1b6a0eb2 | 1,777 | py | Python | src/chapter_3/report.py | gm672/memoire | 8d8c288e7996119aba89ff7c61a78641840a1206 | [
"RSA-MD"
] | null | null | null | src/chapter_3/report.py | gm672/memoire | 8d8c288e7996119aba89ff7c61a78641840a1206 | [
"RSA-MD"
] | null | null | null | src/chapter_3/report.py | gm672/memoire | 8d8c288e7996119aba89ff7c61a78641840a1206 | [
"RSA-MD"
] | null | null | null | import dl
import dlm
import conll3
import csv
import argparse
import textwrap
def create(fileName1):
# store the dependency tree in a dict
T = conll3.conllFile2trees(fileName1)
stats = []
c=0
for tree in T:
c+=1
l = len(tree) # length
d = dl.DL_T(tree) # observed sentence
#true random
r = dl.true_random(tree)
dr = dl.DL_L(r,tree)
#optimal
linearization = dlm.optimal_linearization(tree)
dmin = dl.DL_L(linearization,tree)
omega = dl.omega(dmin,dr,d,l) # Omega
gamma = dl.gamma(dmin,d) # Gamma
mdd = dl.MDD(d,l) # MDD
stats.append([l,d,dr,dmin,omega,gamma,mdd]) # Create the csv line
return stats
parser = argparse.ArgumentParser(description='Get a csv file with DL measure from a CONLL file. --help for more information',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('file',
help= textwrap.dedent('''\
The file to be analysed
'''))
parser.add_argument('output',
help= textwrap.dedent('''\
Output file.
The CSV file does not have headers.
It is formated as :
length, actual dependency length, random DL , minimum DL, omega, gamma, MDD
'''))
args = parser.parse_args()
print(args.file)
print(args.output)
stats = create(args.file)
with open(args.output, 'w+',newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
for line in stats:
spamwriter.writerow(line)
csvfile.close()
| 26.522388 | 172 | 0.549803 | 208 | 1,777 | 4.653846 | 0.418269 | 0.012397 | 0.010331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006082 | 0.352279 | 1,777 | 66 | 173 | 26.924242 | 0.834926 | 0.064716 | 0 | 0.090909 | 0 | 0 | 0.272785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.136364 | 0 | 0.181818 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabb6360664af2d6876a95c3edbd58b72fce2dcc | 3,323 | py | Python | scripts/inventory.py | Otus-DevOps-2019-08/ntikhomirov_infra | 428121d5b4ff13c508441d147d738fce72ca5590 | [
"MIT"
] | 1 | 2019-09-27T10:14:04.000Z | 2019-09-27T10:14:04.000Z | scripts/inventory.py | Otus-DevOps-2019-08/ntikhomirov_infra | 428121d5b4ff13c508441d147d738fce72ca5590 | [
"MIT"
] | 9 | 2019-10-02T09:29:18.000Z | 2019-11-22T12:43:41.000Z | scripts/inventory.py | Otus-DevOps-2019-08/ntikhomirov_infra | 428121d5b4ff13c508441d147d738fce72ca5590 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.6
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
gce = True
evn = 'prod'
count = 0
#Подключаем модули для использования API GCE
try:
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
except Exception as e:
import yaml
gce = False
class Inventory(object):
gce = ""
def __init__(self):
self.inventory = {}
self.read_cli_args()
if self.args.list:
self.inventory = self.dynamic_inventory()
elif self.args.host:
self.inventory = self.empty_inventory()
else:
self.inventory = self.empty_inventory()
print(json.dumps(self.inventory));
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
def dynamic_inventory(self):
counta = 0
inventory = {
'app': {
'hosts': [],
'vars': {}
},
'db': {
'hosts': [],
'vars': {}
},
'proxy': {
'hosts': [],
'vars': {}
},
'_meta': {
}
}
if gce:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
# Возможно надо убрать в конфиг
project = 'indigo-almanac-254221'
# Возможно надо убрать в конфиг
zone = 'europe-west4-a'
request = service.instances().list(project=project, zone=zone)
while request is not None:
response = request.execute()
#Производим выборку по tags из инстансов которые созданы в gcloud
for instance in response['items']:
if 'items' in instance['tags']:
t = instance['tags']['items']
for i in t:
if str(i)== 'db' :
inventory['db']['hosts'].append(instance['name'])
for j in instance['networkInterfaces'] :
inventory['app']['vars']['db_url'] = str(j['networkIP'])
elif str(i) == 'app':
inventory['app']['hosts'].append(instance['name'])
counta += 1
elif str(i) == 'proxy':
inventory['proxy']['hosts'].append(instance['name'])
elif str(i) == 'prod' or str(i) == 'test':
inventory['app']['vars']['env'] = str(i)
inventory['proxy']['vars']['env'] = str(i)
inventory['db']['vars']['env'] = str(i)
request = service.instances().list_next(previous_request=request, previous_response=response)
inventory['proxy']['vars']['count'] = str(counta)
return inventory
else:
with open('./inventory.yml') as f:
print(json.dumps(yaml.load(f)))
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
Inventory()
| 29.936937 | 106 | 0.504664 | 322 | 3,323 | 5.130435 | 0.406832 | 0.01937 | 0.041162 | 0.041768 | 0.09201 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006629 | 0.36443 | 3,323 | 110 | 107 | 30.209091 | 0.775568 | 0.057177 | 0 | 0.141176 | 0 | 0 | 0.098146 | 0.006714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0 | 0.105882 | 0.011765 | 0.2 | 0.023529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabbe3032884a2ee4ff62a5a09e013558b6d077f | 3,689 | py | Python | tests/test.py | ecmwf/pyfdb | 90716ddcaa8b3d981e695b47a1690123e0c230ba | [
"Apache-2.0"
] | null | null | null | tests/test.py | ecmwf/pyfdb | 90716ddcaa8b3d981e695b47a1690123e0c230ba | [
"Apache-2.0"
] | null | null | null | tests/test.py | ecmwf/pyfdb | 90716ddcaa8b3d981e695b47a1690123e0c230ba | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# (C) Copyright 1996- ECMWF.
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import shutil
from pyeccodes import Reader
import pyfdb
fdb = pyfdb.FDB()
# Archive #
key = {
"domain": "g",
"stream": "oper",
"levtype": "pl",
"levelist": "300",
"date": "20191110",
"time": "0000",
"step": "0",
"param": "138",
"class": "rd",
"type": "an",
"expver": "xxxx",
}
filename = "x138-300.grib"
fdb.archive(open(filename, "rb").read(), key)
key["levelist"] = "400"
filename = "x138-400.grib"
pyfdb.archive(open(filename, "rb").read())
key["expver"] = "xxxy"
filename = "y138-400.grib"
fdb.archive(open(filename, "rb").read())
fdb.flush()
# List #
request = {
"class": "rd",
"expver": "xxxx",
"stream": "oper",
"date": "20191110",
"time": "0000",
"domain": "g",
"type": "an",
"levtype": "pl",
"step": 0,
"levelist": [300, "500"],
"param": ["138", 155, "t"],
}
print("direct function, request as dictionary:", request)
for el in pyfdb.list(request):
print(el)
request["levelist"] = ["100", "200", "300", "400", "500", "700", "850", "1000"]
request["param"] = "138"
print("")
print("direct function, updated dictionary:", request)
for el in pyfdb.list(request):
print(el)
# as an alternative, create a FDB instance and start queries from there
request["levelist"] = ["400", "500", "700", "850", "1000"]
print("")
print("fdb object, request as dictionary:", request)
for el in fdb.list(request):
print(el)
#
# print('')
# print('list ALL:')
# for el in fdb.list():
# print(el)
# Retrieve #
request = {
"domain": "g",
"stream": "oper",
"levtype": "pl",
"step": "0",
"expver": "xxxx",
"date": "20191110",
"class": "rd",
"levelist": "300",
"param": "138",
"time": "0000",
"type": "an",
}
filename = "x138-300bis.grib"
print("")
print("save to file ", filename)
with open(filename, "wb") as o, fdb.retrieve(request) as i:
shutil.copyfileobj(i, o)
request["levelist"] = "400"
filename = "x138-400bis.grib"
print("save to file ", filename)
with open(filename, "wb") as o, fdb.retrieve(request) as i:
shutil.copyfileobj(i, o)
request["expver"] = "xxxy"
filename = "y138-400bis.grib"
print("save to file ", filename)
with open(filename, "wb") as o, pyfdb.retrieve(request) as i:
shutil.copyfileobj(i, o)
# request = {
# 'class': 'od',
# 'expver': '0001',
# 'stream': 'oper',
# 'date': '20040118',
# 'time': '0000',
# 'domain': 'g',
# 'type': 'an',
# 'levtype': 'sfc',
# 'step': 0,
# 'param': 151
# }
print("")
print("FDB retrieve")
print("direct function, retrieve from request:", request)
datareader = pyfdb.retrieve(request)
print("")
print("reading a small chunk")
chunk = datareader.read(10)
print(chunk)
print("tell()", datareader.tell())
print("go back (partially) - seek(2)")
datareader.seek(2)
print("tell()", datareader.tell())
print("reading a larger chunk")
chunk = datareader.read(40)
print(chunk)
print("go back - seek(0)")
datareader.seek(0)
print("")
print("decode GRIB")
reader = Reader(datareader)
grib = next(reader)
grib.dump()
request["levelist"] = [300, "400"]
request["expver"] = "xxxx"
filename = "foo.grib"
print("")
print("save to file ", filename)
with open(filename, "wb") as o, fdb.retrieve(request) as i:
shutil.copyfileobj(i, o)
| 21.7 | 80 | 0.612632 | 486 | 3,689 | 4.650206 | 0.308642 | 0.035398 | 0.012389 | 0.026549 | 0.371681 | 0.323451 | 0.286726 | 0.215044 | 0.215044 | 0.195575 | 0 | 0.063434 | 0.18379 | 3,689 | 169 | 81 | 21.828402 | 0.687147 | 0.204934 | 0 | 0.536364 | 0 | 0 | 0.289456 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027273 | 0 | 0.027273 | 0.254545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabd3410ff9a1457ff4d27dc4da7e340b30ed724 | 3,907 | py | Python | announcer/pref.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | null | null | null | announcer/pref.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | 1 | 2018-06-11T14:48:06.000Z | 2018-06-11T14:48:06.000Z | announcer/pref.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008, Stephen Hansen
# Copyright (c) 2009, Robert Corsaro
# Copyright (c) 2010, Robert Corsaro
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from trac.core import Component, implements, ExtensionPoint
from trac.prefs.api import IPreferencePanelProvider
from trac.web.chrome import ITemplateProvider, add_stylesheet, Chrome
from trac.web import IRequestHandler
from pkg_resources import resource_filename
from announcer.api import IAnnouncementPreferenceProvider, \
_, tag_, N_
def truth(v):
if v in (False, 'False', 'false', 0, '0', ''):
return None
return True
class AnnouncerPreferences(Component):
implements(IPreferencePanelProvider, ITemplateProvider)
preference_boxes = ExtensionPoint(IAnnouncementPreferenceProvider)
def get_htdocs_dirs(self):
return [('announcer', resource_filename(__name__, 'htdocs'))]
def get_templates_dirs(self):
resource_dir = resource_filename(__name__, 'templates')
return [resource_dir]
def get_preference_panels(self, req):
yield ('announcer', _('Announcements'))
yield ('exp-announcer', 'Exp.Announcements')
def _get_boxes(self, req):
for pr in self.preference_boxes:
boxes = pr.get_announcement_preference_boxes(req)
boxdata = {}
if boxes:
for boxname, boxlabel in boxes:
if boxname == 'general_wiki' and not req.perm.has_permission('WIKI_VIEW'):
continue
if (boxname == 'legacy' or boxname == 'joinable_groups') and not req.perm.has_permission('TICKET_VIEW'):
continue
yield ((boxname, boxlabel) +
pr.render_announcement_preference_box(req, boxname))
def render_preference_panel(self, req, panel, path_info=None):
streams = []
chrome = Chrome(self.env)
for name, label, template, data in self._get_boxes(req):
streams.append((label, chrome.render_template(
req, template, data, content_type='text/html', fragment=True
)))
add_stylesheet(req, 'announcer/css/announcer_prefs.css')
return 'prefs_announcer.html', {"boxes": streams}
| 43.898876 | 124 | 0.675966 | 457 | 3,907 | 5.667396 | 0.455142 | 0.012355 | 0.013127 | 0.017761 | 0.09112 | 0.072587 | 0.05251 | 0.05251 | 0.05251 | 0.05251 | 0 | 0.004987 | 0.2301 | 3,907 | 88 | 125 | 44.397727 | 0.856051 | 0.432045 | 0 | 0.046512 | 0 | 0 | 0.094694 | 0.015096 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.139535 | 0.023256 | 0.44186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabea44e446c06b8ed7bd4cbe6c687ec876e43a7 | 5,946 | py | Python | sher_scrapper.py | iammhk/AI_Poet | 7676cd5b7740925857a4dd4923755e66c62e49f5 | [
"Apache-2.0"
] | null | null | null | sher_scrapper.py | iammhk/AI_Poet | 7676cd5b7740925857a4dd4923755e66c62e49f5 | [
"Apache-2.0"
] | null | null | null | sher_scrapper.py | iammhk/AI_Poet | 7676cd5b7740925857a4dd4923755e66c62e49f5 | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
global json_content
import sqlite3
conn = sqlite3.connect("khusrau.db")
cur = conn.cursor()
blues_words=["","",""]
tag_strip=['wafa', 'Wahm', 'Wahshat', 'Waiz', 'Wajood', 'Waqt', 'Welcome', 'Yaad', 'Yaad-e-Raftagan', 'Zindagi', 'zindan', 'Zulf']
top100=['https://rekhta.org/tags/wafa-shayari', 'https://rekhta.org/tags/wahm-shayari', 'https://rekhta.org/tags/wahshat-shayari', 'https://rekhta.org/tags/waiz-shayari', 'https://rekhta.org/tags/wajood-shayari', 'https://rekhta.org/tags/waqt-shayari', 'https://rekhta.org/tags/welcome-shayari', 'https://rekhta.org/tags/yaad-shayari', 'https://rekhta.org/tags/yaad-e-raftagan-shayari', 'https://rekhta.org/tags/zindagi-shayari', 'https://rekhta.org/tags/zindan-shayari', 'https://rekhta.org/tags/zulf-shayari']
url="https://rekhta.org/Top-20-Ishq-Sher"
for m in range(0,len(top100)):
net_url=url+top100[m]
page = requests.get(top100[m])
soup = BeautifulSoup(page.content, 'lxml')
#tree = html.fromstring(page.content)
meaning_api="https://rekhta.org/Api_ShowMeaning/?id="
x=0
poem_title=soup.find("title").get_text()
print(poem_title)
#poem_author = soup.find(class_="ghazalAuthor")
#print(poem_author.get_text())
word_id=[]
desc=[]
word_meaning=""
sentence1=""
sentence0=""
trans=""
trans_trim=""
list1=""
list0=""
tags_net=""
poem_raw = soup.find(class_="left_pan pageContentContainer")
poem_couplet = poem_raw.find_all(class_=" nw_ghazalCard")
#cur.execute("INSERT INTO ghazal VALUES (?, ?, ?, ?, ?, ?);", (poem_title,poem_author.get_text(),net_url,None,None,None))
#cur.execute("INSERT INTO author VALUES (?, ?, ?, ?, ?);", (poem_author.get_text(), None, None, None, None))
for y in range(0,len(poem_couplet)): #finds sher
print(y)
tag_container = poem_couplet[y].find(class_="tagContainingList")
if tag_container is not None:
tags = tag_container.find_all("li")
else: tags=["yo"]
lower=poem_couplet[y].find(class_="OptContainingList")
blues=lower.find_all(class_="skyblue")
full_g=lower.find(class_="ReadFull")
for z in range (0,len(blues)):
blues_words=blues[0].get_text()
#print(blues_words)
if blues_words == 'TRANSLATION':
blues.pop(0)
print(blues[0].get_text())
author_id=blues[0].get_text()
full_ghazal="https://rekhta.org"+blues[-1]["href"]
print(full_ghazal)
couplet = poem_couplet[y]
poem_line = couplet.find(class_="PContainer")
for q in range(1, len(tags)):
tag_sing=tags[q].get_text()
#tag_length = len(tag_single)
#print(tag_single[-3])
if tag_sing[-3] is ",":
tag_list=list(tag_sing)
tag_list[-3]=""
tag_s = ''.join(tag_list)
tag_single = tag_s.strip()
else: tag_single=tag_sing
tags_net+= tag_single + " + "
tags_net += tag_strip[m]
print(tags_net)
#print(poem_line)
#for g in range(0,len(poem_line)):
line = poem_line.find_all(class_="DivLine")
#print(line)
for h in range(0, len(line)): #finds line
single_line=line[h]
#print(h)
poem_text = single_line.find_all(class_="WM")
for x in range(0, len(poem_text)): #finds words
poem_word=poem_text[x]
word=poem_word.get_text()
word_id=poem_word['data-key']
meaning_url= meaning_api + word_id + "&lang=0"
#print(meaning_url)
meaning_page = requests.get(meaning_url)
meaning_soup = BeautifulSoup(meaning_page.content, 'lxml')
meaning_panel= meaning_soup.find(class_="MeaningBoxWrap")
#print(meaning_panel)
meaning_lang= meaning_panel.find_all('li')
#print(meaning_lang[2].get_text())
word_meaning+= meaning_lang[0].get_text() + " | " +meaning_lang[1].get_text() + " | "+ meaning_lang[2].get_text()
#print(word_meaning)
if h is 0:
sentence0 += word
sentence0 += " "
list0 += word_id
list0 += " "
else:
sentence1+=word
sentence1+=" "
list1 += word_id
list1 += " "
#print(word, end=' ')
cur.execute("INSERT INTO words VALUES (?, ?, ?);", (word_id, word, word_meaning)) # word inserter
word_meaning = ""
#conn.commit()
#cur.execute("INSERT INTO sher2ghazal VALUES (?, ?, ?);", (poem_title,y,sentence0)) #sher2ghazal inserter
trans_couplet = couplet.find(class_="DivLineSmall PoemImageHost ImageTranslationHost")
if (trans_couplet is not None):
trans = trans_couplet.contents[0] + " + " + trans_couplet.contents[-1]
trans_trim=trans.strip()
print(sentence0)
print(top100[m])
print(sentence1)
#print(trans_couplet.contents[2])
cur.execute("INSERT INTO t20_sher VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",(sentence0, sentence0, list0, sentence1, list1, author_id, tags_net, full_ghazal, trans_trim, y, None)) #sher inserter
sentence1 = ""
sentence0 = ""
trans = ""
trans_trim = ""
tags_net = ""
list0 = ""
list1 = ""
#page=None
conn.commit()
conn.close()
| 46.093023 | 512 | 0.546418 | 676 | 5,946 | 4.602071 | 0.210059 | 0.053038 | 0.067502 | 0.069431 | 0.15172 | 0.018644 | 0 | 0 | 0 | 0 | 0 | 0.017714 | 0.306929 | 5,946 | 128 | 513 | 46.453125 | 0.7372 | 0.134544 | 0 | 0.15534 | 0 | 0 | 0.193348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029126 | 0 | 0.029126 | 0.07767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabec2036d476b356cb17a4315ed1f417bd3a26e | 2,818 | py | Python | portfolio_api/db/user/operations.py | kkiyama117/fastAPI_template | b56b3184d7217bc33aabc1c1a40174e06c80e2f3 | [
"MIT"
] | null | null | null | portfolio_api/db/user/operations.py | kkiyama117/fastAPI_template | b56b3184d7217bc33aabc1c1a40174e06c80e2f3 | [
"MIT"
] | null | null | null | portfolio_api/db/user/operations.py | kkiyama117/fastAPI_template | b56b3184d7217bc33aabc1c1a40174e06c80e2f3 | [
"MIT"
] | null | null | null | import sqlite3
from typing import List
from sqlalchemy import Table
from portfolio_api.domains import user
from portfolio_api import exceptions
from .schema import UserSchema
from .. import connection
async def get_users() -> List[user.UserGet]:
users = UserSchema().get_table()
query = users.select()
return await connection.fetch_all(query)
async def get_user(user_id: int) -> user.UserGet:
"""Get User domain by id.
Args:
user_id (): id of user
Returns:
user.UserGet: user domain
"""
users: Table = UserSchema().get_table()
query = users.select().where(users.columns.id == user_id).limit(1)
return await connection.fetch_one(query)
async def get_user_by_email(email: str) -> user.UserGet:
"""Get User domain by id.
Args:
email(str): email of user
Returns:
user.UserGet: user domain
"""
users: Table = UserSchema().get_table()
query = users.select().where(users.columns.email == email).limit(1)
return await connection.fetch_one(query)
async def create_user(user_data: user.UserCreate) -> user.UserGet:
users: Table = UserSchema().get_table()
query = users.insert().values(
email=user_data.email,
first_name=user_data.first_name,
last_name=user_data.last_name,
is_active=user_data.is_active,
is_admin=False,
)
try:
record_id = await connection.execute(query)
return await get_user(record_id)
except sqlite3.IntegrityError:
raise exceptions.UserAlreadyExistException(f"{user_data.email} data is already exists")
except Exception as e:
raise exceptions.DatabaseException(str(e))
async def update_user(user_id: int, user_data: user.UserUpdate) -> user.UserGet:
users: Table = UserSchema().get_table()
query = (
users.update()
.where(users.columns.id == user_id)
.values(
email=user_data.email,
first_name=user_data.first_name,
last_name=user_data.last_name,
is_active=user_data.is_active,
is_admin=False,
)
)
try:
result = await connection.execute(query)
if not result:
raise exceptions.UserNotExistException(f"user_{user_id} does not exist")
return await get_user(user_id)
except sqlite3.IntegrityError as e:
raise exceptions.BadRequestException(str(e))
async def delete_user(user_id: int):
users: Table = UserSchema().get_table()
query = users.delete().where(users.columns.id == user_id)
try:
return await connection.execute(query)
except sqlite3.IntegrityError:
raise exceptions.UserNotExistException(f"user_{user_id} does not exist")
except Exception as e:
raise exceptions.DatabaseException(str(e))
| 29.663158 | 95 | 0.671398 | 359 | 2,818 | 5.103064 | 0.211699 | 0.048035 | 0.058952 | 0.075328 | 0.628821 | 0.562227 | 0.512009 | 0.491266 | 0.456332 | 0.343886 | 0 | 0.002757 | 0.227821 | 2,818 | 94 | 96 | 29.978723 | 0.839154 | 0 | 0 | 0.4375 | 0 | 0 | 0.038162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.109375 | 0 | 0.203125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fabef270eb13994f8a08403379a335113eae92a3 | 657 | py | Python | test/test_engine.py | 0xflotus/cutecharts | 1e68616481099b89c777104fc8bd00518165a487 | [
"MIT"
] | 1 | 2019-10-14T02:55:27.000Z | 2019-10-14T02:55:27.000Z | test/test_engine.py | XksA-me/cutecharts | 844a8910d6a96e3f2e6c688a2c350e763c3d394d | [
"MIT"
] | null | null | null | test/test_engine.py | XksA-me/cutecharts | 844a8910d6a96e3f2e6c688a2c350e763c3d394d | [
"MIT"
] | null | null | null | from nose.tools import assert_equal, assert_in
from cutecharts.charts.basic import BasicChart
from cutecharts.faker import Faker
from cutecharts.globals import AssetsHost
def test_engine_render():
basic = BasicChart()
html = basic.render()
assert_in(AssetsHost.DEFAULT_HOST, html)
assert_in("chartXkcd", html)
def test_engine_render_notebook():
basic = BasicChart()
html = basic.render_notebook().__html__()
assert_in(AssetsHost.DEFAULT_HOST, html)
assert_in("chartXkcd", html)
def test_faker():
attrs = Faker.choose()
values = Faker.values()
assert_equal(len(attrs), len(values))
| 25.269231 | 47 | 0.707763 | 80 | 657 | 5.5625 | 0.3375 | 0.089888 | 0.080899 | 0.085393 | 0.408989 | 0.274157 | 0.274157 | 0.274157 | 0.274157 | 0.274157 | 0 | 0 | 0.193303 | 657 | 25 | 48 | 26.28 | 0.839623 | 0 | 0 | 0.333333 | 0 | 0 | 0.028481 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.166667 | false | 0 | 0.222222 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac10195498b6e1b77a6635ec6e48a1790c2cc27 | 1,532 | py | Python | recognise_in_live_video.py | mrdarwin4921/Face-Detection- | 151532db85b70f97192421349e629ad9c548d302 | [
"Apache-2.0"
] | 1 | 2020-10-01T07:57:31.000Z | 2020-10-01T07:57:31.000Z | recognise_in_live_video.py | mrdarwin4921/Face-Detection- | 151532db85b70f97192421349e629ad9c548d302 | [
"Apache-2.0"
] | null | null | null | recognise_in_live_video.py | mrdarwin4921/Face-Detection- | 151532db85b70f97192421349e629ad9c548d302 | [
"Apache-2.0"
] | 1 | 2020-10-01T06:02:24.000Z | 2020-10-01T06:02:24.000Z | from face_normalisation import get_normalised_faces
from train_model import *
import cv2
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier('P:\\GIT_FILE\\Face_Recognition\\OpenCVDemo\\haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
if len(face_coord):
faces = get_normalised_faces(gray, face_coord)
for i, face in enumerate(faces):
pred, conf = rec_fisher.predict(face)
image, labels, labels_dict = collect_dataset()
threshold = 1000
if conf < threshold:
per = int((threshold - conf) / threshold * 100)
cv2.putText(frame, labels_dict[pred].capitalize() + str(per),
(face_coord[i][0], face_coord[i][1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)
else:
cv2.putText(frame, "Unknown",
(face_coord[i][0], face_coord[i][1] - 10),
cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1)
cv2.rectangle(frame, (face_coord[i][0], face_coord[i][1]), (face_coord[i][0] + face_coord[i][2], face_coord[i][1] + face_coord[i][3]), (255, 0, 0), 1)
cv2.imshow('frame', frame)
k = cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows()
| 38.3 | 163 | 0.575718 | 191 | 1,532 | 4.450262 | 0.434555 | 0.137647 | 0.117647 | 0.051765 | 0.181176 | 0.164706 | 0.164706 | 0.115294 | 0.089412 | 0.089412 | 0 | 0.05915 | 0.293734 | 1,532 | 39 | 164 | 39.282051 | 0.726433 | 0 | 0 | 0.064516 | 0 | 0 | 0.060951 | 0.052914 | 0 | 0 | 0.002679 | 0 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac16197f12780b2bd3a10bf8f76be34cd94385f | 721 | py | Python | main.py | ZhouBo20171229/- | 56c9d859d6931cd971419c5225199acca6c189e5 | [
"MIT"
] | null | null | null | main.py | ZhouBo20171229/- | 56c9d859d6931cd971419c5225199acca6c189e5 | [
"MIT"
] | null | null | null | main.py | ZhouBo20171229/- | 56c9d859d6931cd971419c5225199acca6c189e5 | [
"MIT"
] | null | null | null | from RoiMatching import *
def Roimatching(RoiZipPath1, RoiZipPath2):
# [Dic1, DirPath1] = DicBuild('C:\Result\JR1.zip')
# [Dic2, DirPath2] = DicBuild('C:\Result\JR4.zip')
[Dic1, DirPath1] = DicBuild(RoiZipPath1)
[Dic2, DirPath2] = DicBuild(RoiZipPath2)
Rename((Match(Dic1, Dic2))[0], DirPath1)
Rename((Match(Dic1, Dic2))[1], DirPath2)
if __name__ == '__main__':
# print(os.path.dirname(os.path.realpath(__file__)))
CurrentProjectPath = os.path.dirname(os.path.realpath(__file__))
RoiZipPath1 = os.path.join(CurrentProjectPath, 'test1.zip')
RoiZipPath2 = os.path.join(CurrentProjectPath, 'test2.zip')
Roimatching(RoiZipPath1, RoiZipPath2)#入参均为roi的.zip文件
| 36.05 | 69 | 0.682386 | 79 | 721 | 6.025316 | 0.443038 | 0.07563 | 0.138655 | 0.079832 | 0.130252 | 0.130252 | 0.130252 | 0 | 0 | 0 | 0 | 0.046434 | 0.163662 | 721 | 19 | 70 | 37.947368 | 0.742952 | 0.224688 | 0 | 0 | 0 | 0 | 0.048689 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac17ff2df44015d75bec1cba8ddd57067048691 | 861 | py | Python | Python/lc_695_max_area_of_island.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | 6 | 2019-07-01T22:03:25.000Z | 2020-04-06T15:17:46.000Z | Python/lc_695_max_area_of_island.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | null | null | null | Python/lc_695_max_area_of_island.py | cmattey/leetcode_problems | fe57e668db23f7c480835c0a10f363d718fbaefd | [
"MIT"
] | 1 | 2020-04-01T22:31:41.000Z | 2020-04-01T22:31:41.000Z | # 695. Max Area of Island
# Time: O(size(grid))
# Space: O(1) except recursion stack, since modifying grid in-place else O(size(grid))
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
max_count = 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col]==1:
count = self.dfs(grid, row, col)
max_count = max(max_count, count)
return max_count
def dfs(self, grid, row, col):
if row not in range(len(grid)) or col not in range(len(grid[0])):
return 0
if grid[row][col]==1:
grid[row][col]='#'
return 1 + self.dfs(grid, row+1, col) + self.dfs(grid, row, col+1) + self.dfs(grid, row-1, col) + self.dfs(grid, row, col-1)
else:
return 0
| 25.323529 | 136 | 0.535424 | 130 | 861 | 3.515385 | 0.292308 | 0.137856 | 0.153173 | 0.153173 | 0.365427 | 0.221007 | 0.159737 | 0.159737 | 0.159737 | 0.159737 | 0 | 0.027491 | 0.324042 | 861 | 33 | 137 | 26.090909 | 0.757732 | 0.148664 | 0 | 0.235294 | 0 | 0 | 0.001372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac57ace8db8b61ce13a568229153f9e64dbb9ce | 1,293 | py | Python | marshmallow_pagination/pages.py | fecgov/marshmallow-pagination | 3180169f93680ee5e07baf97d28d0913d9a3f036 | [
"MIT"
] | 5 | 2016-02-15T19:51:53.000Z | 2020-07-02T16:25:07.000Z | marshmallow_pagination/pages.py | jmcarp/marshmallow-pagination | 626a6a97e71874565d453286ec211662ee226335 | [
"MIT"
] | 3 | 2015-11-29T01:23:59.000Z | 2019-10-18T20:32:03.000Z | marshmallow_pagination/pages.py | fecgov/marshmallow-pagination | 3180169f93680ee5e07baf97d28d0913d9a3f036 | [
"MIT"
] | 4 | 2015-08-31T04:23:09.000Z | 2020-10-02T08:52:38.000Z | # -*- coding: utf-8 -*-
import abc
import collections
import six
class BasePage(six.with_metaclass(abc.ABCMeta, collections.Sequence)):
"""A page of results.
"""
def __init__(self, paginator, results):
self.paginator = paginator
self.results = results
def __len__(self):
return len(self.results)
def __getitem__(self, index):
return self.results[index]
@abc.abstractproperty
def info(self):
pass
class OffsetPage(BasePage):
def __init__(self, paginator, page, results):
self.page = page
super(OffsetPage, self).__init__(paginator, results)
@property
def info(self):
return {
'page': self.page,
'count': self.paginator.count,
'pages': self.paginator.pages,
'per_page': self.paginator.per_page,
}
class SeekPage(BasePage):
@property
def last_indexes(self):
if self.results:
return self.paginator._get_index_values(self.results[-1])
return None
@property
def info(self):
return {
'count': self.paginator.count,
'pages': self.paginator.pages,
'per_page': self.paginator.per_page,
'last_indexes': self.last_indexes,
}
| 23.089286 | 70 | 0.598608 | 140 | 1,293 | 5.307143 | 0.3 | 0.174966 | 0.044415 | 0.053836 | 0.263795 | 0.196501 | 0.196501 | 0.196501 | 0.196501 | 0.196501 | 0 | 0.002176 | 0.28925 | 1,293 | 55 | 71 | 23.509091 | 0.806311 | 0.035576 | 0 | 0.35 | 0 | 0 | 0.041902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.025 | 0.075 | 0.1 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac6a2160ecfd9f8b512cd53db2ca3d7435549a9 | 11,805 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_snapshot.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_snapshot.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_snapshot.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_snapshot
short_description: Manage NetApp Sanpshots
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author:
- Chris Archibald (carchi@netapp.com), Kevin Hutton (khutton@netapp.com)
description:
- Create/Modify/Delete Ontap snapshots
options:
state:
description:
- If you want to create/modify a snapshot, or delete it.
choices: ['present', 'absent']
default: present
snapshot:
description:
Name of the snapshot to be managed.
The maximum string length is 256 characters.
required: true
volume:
description:
- Name of the volume on which the snapshot is to be created.
required: true
async_bool:
description:
- If true, the snapshot is to be created asynchronously.
type: bool
comment:
description:
A human readable comment attached with the snapshot.
The size of the comment can be at most 255 characters.
snapmirror_label:
description:
A human readable SnapMirror Label attached with the snapshot.
Size of the label can be at most 31 characters.
ignore_owners:
description:
- if this field is true, snapshot will be deleted
even if some other processes are accessing it.
type: bool
snapshot_instance_uuid:
description:
- The 128 bit unique snapshot identifier expressed in the form of UUID.
vserver:
description:
- The Vserver name
new_comment:
description:
A human readable comment attached with the snapshot.
The size of the comment can be at most 255 characters.
This will replace the existing comment
'''
EXAMPLES = """
- name: create SnapShot
tags:
- create
na_ontap_snapshot:
state=present
snapshot={{ snapshot name }}
volume={{ vol name }}
comment="i am a comment"
vserver={{ vserver name }}
username={{ netapp username }}
password={{ netapp password }}
hostname={{ netapp hostname }}
- name: delete SnapShot
tags:
- delete
na_ontap_snapshot:
state=absent
snapshot={{ snapshot name }}
volume={{ vol name }}
vserver={{ vserver name }}
username={{ netapp username }}
password={{ netapp password }}
hostname={{ netapp hostname }}
- name: modify SnapShot
tags:
- modify
na_ontap_snapshot:
state=present
snapshot={{ snapshot name }}
new_comment="New comments are great"
volume={{ vol name }}
vserver={{ vserver name }}
username={{ netapp username }}
password={{ netapp password }}
hostname={{ netapp hostname }}
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSnapshot(object):
"""
Creates, modifies, and deletes a Snapshot
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
snapshot=dict(required=True, type="str"),
volume=dict(required=True, type="str"),
async_bool=dict(required=False, type="bool", default=False),
comment=dict(required=False, type="str"),
snapmirror_label=dict(required=False, type="str"),
ignore_owners=dict(required=False, type="bool", default=False),
snapshot_instance_uuid=dict(required=False, type="str"),
vserver=dict(required=True, type="str"),
new_comment=dict(required=False, type="str"),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
# These are the required variables
self.state = parameters['state']
self.snapshot = parameters['snapshot']
self.vserver = parameters['vserver']
# these are the optional variables for creating a snapshot
self.volume = parameters['volume']
self.async_bool = parameters['async_bool']
self.comment = parameters['comment']
self.snapmirror_label = parameters['snapmirror_label']
# these are the optional variables for deleting a snapshot\
self.ignore_owners = parameters['ignore_owners']
self.snapshot_instance_uuid = parameters['snapshot_instance_uuid']
# These are the optional for Modify.
# You can NOT change a snapcenter name
self.new_comment = parameters['new_comment']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=self.vserver)
return
def create_snapshot(self):
"""
Creates a new snapshot
"""
snapshot_obj = netapp_utils.zapi.NaElement("snapshot-create")
# set up required variables to create a snapshot
snapshot_obj.add_new_child("snapshot", self.snapshot)
snapshot_obj.add_new_child("volume", self.volume)
# Set up optional variables to create a snapshot
if self.async_bool:
snapshot_obj.add_new_child("async", self.async_bool)
if self.comment:
snapshot_obj.add_new_child("comment", self.comment)
if self.snapmirror_label:
snapshot_obj.add_new_child(
"snapmirror-label", self.snapmirror_label)
try:
self.server.invoke_successfully(snapshot_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating snapshot %s: %s' %
(self.snapshot, to_native(error)),
exception=traceback.format_exc())
def delete_snapshot(self):
"""
Deletes an existing snapshot
"""
snapshot_obj = netapp_utils.zapi.NaElement("snapshot-delete")
# Set up required variables to delete a snapshot
snapshot_obj.add_new_child("snapshot", self.snapshot)
snapshot_obj.add_new_child("volume", self.volume)
# set up optional variables to delete a snapshot
if self.ignore_owners:
snapshot_obj.add_new_child("ignore-owners", self.ignore_owners)
if self.snapshot_instance_uuid:
snapshot_obj.add_new_child(
"snapshot-instance-uuid", self.snapshot_instance_uuid)
try:
self.server.invoke_successfully(snapshot_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting snapshot %s: %s' %
(self.snapshot, to_native(error)),
exception=traceback.format_exc())
def modify_snapshot(self):
"""
Modify an existing snapshot
:return:
"""
snapshot_obj = netapp_utils.zapi.NaElement("snapshot-modify-iter")
# Create query object, this is the existing object
query = netapp_utils.zapi.NaElement("query")
snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
snapshot_info_obj.add_new_child("name", self.snapshot)
query.add_child_elem(snapshot_info_obj)
snapshot_obj.add_child_elem(query)
# this is what we want to modify in the snapshot object
attributes = netapp_utils.zapi.NaElement("attributes")
snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
snapshot_info_obj.add_new_child("name", self.snapshot)
snapshot_info_obj.add_new_child("comment", self.new_comment)
attributes.add_child_elem(snapshot_info_obj)
snapshot_obj.add_child_elem(attributes)
try:
self.server.invoke_successfully(snapshot_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying snapshot %s: %s' %
(self.snapshot, to_native(error)),
exception=traceback.format_exc())
def does_snapshot_exist(self):
"""
Checks to see if a snapshot exists or not
:return: Return True if a snapshot exists, false if it dosn't
"""
snapshot_obj = netapp_utils.zapi.NaElement("snapshot-get-iter")
desired_attr = netapp_utils.zapi.NaElement("desired-attributes")
snapshot_info = netapp_utils.zapi.NaElement('snapshot-info')
comment = netapp_utils.zapi.NaElement('comment')
# add more desired attributes that are allowed to be modified
snapshot_info.add_child_elem(comment)
desired_attr.add_child_elem(snapshot_info)
snapshot_obj.add_child_elem(desired_attr)
# compose query
query = netapp_utils.zapi.NaElement("query")
snapshot_info_obj = netapp_utils.zapi.NaElement("snapshot-info")
snapshot_info_obj.add_new_child("name", self.snapshot)
snapshot_info_obj.add_new_child("volume", self.volume)
query.add_child_elem(snapshot_info_obj)
snapshot_obj.add_child_elem(query)
result = self.server.invoke_successfully(snapshot_obj, True)
return_value = None
# TODO: Snapshot with the same name will mess this up,
# need to fix that later
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
attributes_list = result.get_child_by_name('attributes-list')
snap_info = attributes_list.get_child_by_name('snapshot-info')
return_value = {'comment': snap_info.get_child_content('comment')}
return return_value
def apply(self):
"""
Check to see which play we should run
"""
changed = False
comment_changed = False
netapp_utils.ems_log_event("na_ontap_snapshot", self.server)
existing_snapshot = self.does_snapshot_exist()
if existing_snapshot is not None:
if self.state == 'absent':
changed = True
elif self.state == 'present' and self.new_comment:
if existing_snapshot['comment'] != self.new_comment:
comment_changed = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not existing_snapshot:
self.create_snapshot()
elif comment_changed:
self.modify_snapshot()
elif self.state == 'absent':
if existing_snapshot:
self.delete_snapshot()
self.module.exit_json(changed=changed)
def main():
"""
Creates, modifies, and deletes a Snapshot
"""
obj = NetAppOntapSnapshot()
obj.apply()
if __name__ == '__main__':
main()
| 36.890625 | 78 | 0.631173 | 1,362 | 11,805 | 5.269457 | 0.19163 | 0.033719 | 0.03344 | 0.027309 | 0.449352 | 0.374808 | 0.310854 | 0.28034 | 0.253031 | 0.253031 | 0 | 0.003173 | 0.279288 | 11,805 | 319 | 79 | 37.00627 | 0.840386 | 0.095553 | 0 | 0.356275 | 0 | 0 | 0.325267 | 0.009337 | 0 | 0 | 0 | 0.003135 | 0 | 1 | 0.02834 | false | 0.016194 | 0.020243 | 0 | 0.060729 | 0.004049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac822218bfb438f9537b18947c9a3111177db0b | 81,054 | py | Python | cloudbackup/client/agents.py | rackerlabs/cbu-sdk-python | 76647cdb2be19310f173b49a38e6a9b8077ee97d | [
"Apache-2.0"
] | 4 | 2015-02-10T14:28:12.000Z | 2016-12-26T22:52:07.000Z | cloudbackup/client/agents.py | rackerlabs/python-cloudbackup-sdk | 76647cdb2be19310f173b49a38e6a9b8077ee97d | [
"Apache-2.0"
] | 17 | 2015-01-22T21:58:36.000Z | 2018-01-25T19:47:43.000Z | cloudbackup/client/agents.py | rackerlabs/cbu-sdk-python | 76647cdb2be19310f173b49a38e6a9b8077ee97d | [
"Apache-2.0"
] | 9 | 2015-01-26T19:25:45.000Z | 2018-11-01T20:14:12.000Z | """
Rackspace Cloud Backup Agent API
"""
from __future__ import print_function
import datetime
import gzip
import hashlib
import json
import logging
import os
import requests
import time
import threading
import six
from cloudbackup.common.command import Command
requests.packages.urllib3.disable_warnings()
class ParameterError(Exception):
"""
Parameter Error Exception
"""
pass
# function for Agents class to use to keep a given agent awake
def _keep_agent_awake_thread_fn(my_notifier=None, userid=None, usertype=None,
credentials=None, method=None,
rse_app=None, rse_version=None,
rse_agentkey=None, rse_log=None,
rse_apihost=None, rse_period=None, apihost=None,
agent_id=None, api_version=None,
project_id=None):
"""
(Internal) Thread function that will periodically post the wake agent message and look for the specified agent
Aside from my_notifier, the function maintains its own objects internally in thread local data storage for thread-safety purposes
Require parameters:
my_notifier - threading.Event object instance that signals thread termination
userid - username for Keystone/Identity authentication
usertype - user type see cloudbackup.client.auth.Authentication for details
credentials - apikey for Keystone/Identity authentication
method - authentication method see cloudbackup.client.auth.Authentication for details
rse_app - RSE Application Name
rse_version - RSE Application Version
rse_agentkey - RSE Channel to listen to
rse_period - period between wake agent calls
apihost - Rackspace Cloud Backup API URL
api_verison - Rackspace Cloud Backup API version
agent_id - machine agent identifier for the agent to monitor for
project_id - for Rackspace Cloud Backup API version 2 and newer the tenantid (aka project_id) is required
Option parameters:
rse_log - Base log file name, the thread will append data to create a unique RSE log file name for the thread's RSE queries. If not desired, specify None
rse_apihost - RSE API URL See cloudbackup.clients.rse.Rse for details
"""
if None in (my_notifier, userid, usertype, credentials, method, rse_app, rse_version, rse_agentkey, rse_period, apihost, agent_id, api_version):
msg_missing = []
if my_notifier is None:
msg_missing.append('my_notifier')
if userid is None:
msg_missing.append('userid')
if usertype is None:
msg_missing.append('usertype')
if credentials is None:
msg_missing.append('credentials')
if method is None:
msg_missing.append('method')
if rse_app is None:
msg_missing.append('rse_app')
if rse_version is None:
msg_missing.append('rse_version')
if rse_agentkey is None:
msg_missing.append('rse_agentkey')
if rse_period is None:
msg_missing.append('rse_period')
if apihost is None:
msg_missing.append('apihost')
if agent_id is None:
msg_missing.append('agent_id')
if api_version is None:
msg_missing.append('api_version')
raise RuntimeError('Invalid parameters. Some required parameters were not properly specified. Missing parameters: {0}'.format(msg_missing))
if api_version > 1:
if project_id is None:
raise RuntimeError('Invalid parameters. api_version = {0} and project_id is missing.'
.format(api_version))
log = logging.getLogger(__name__)
# For threading simplicity we are going to create thread local version of each of the required objects
import cloudbackup.client.auth
import cloudbackup.client.rse
data = threading.local()
data.thread_id = threading.current_thread().ident
data.log_prefix = 'RSE Wakeup Thread[{0:}] Log'.format(data.thread_id)
data.auth_engine = cloudbackup.client.auth.Authentication(
userid,
credentials,
usertype=usertype,
method=method
)
data.agent_engine = cloudbackup.client.agents.Agents(True, data.auth_engine,
apihost, api_version,
project_id)
data.logfile = None
if rse_log is not None:
data.logfile = '{0:}.thread_{1:}'.format(rse_log, data.thread_id)
log.debug('{0:}: {1:}'.format(data.log_prefix, data.logfile))
log.debug('{0:}: Agent Id - {1:}'.format(data.log_prefix, agent_id))
log.debug('{0:}: RSE Period - {1:}'.format(data.log_prefix, rse_period))
data.rse_engine = cloudbackup.client.rse.Rse(rse_app, rse_version,
data.auth_engine, data.agent_engine,
rse_agentkey, logfile=data.logfile,
apihost=rse_apihost,
api_version=api_version,
project_id=project_id)
def __check_notifier(notifier):
"""
Simple wrapper to check the notifier and return whether or not the loop should exit
Parameters:
notifier - threading.Event object instance
Returns:
True if the loop should continue (event is not set)
False if the loop should terminate (event is set)
"""
if notifier.is_set():
notifier.clear()
log.debug('{0:}: Detected termination.'.format(data.log_prefix))
return False
return True
# 10 second timeout
rse_timeout = 10000
continue_loop = True
while continue_loop:
# Check the thread status before we try to wake the agent
continue_loop = __check_notifier(my_notifier)
if not continue_loop:
break
if data.agent_engine.WakeSpecificAgent(agent_id, data.rse_engine, rse_timeout):
# Agent is awake, so wait for the period before checking again
start_time = int(round(time.time() * 1000))
finish_time = start_time + rse_period
while ((int(round(time.time() * 1000))) < finish_time) and continue_loop:
# check the thread status every 1 second throughout the entire period wait
continue_loop = __check_notifier(my_notifier)
time.sleep(1)
else:
# Failed to wake the agent
log.debug('{0:}: Failed to wake agent - {1:}'.format(data.log_prefix, agent_id))
log.debug('{0:}: Terminating'.format(data.log_prefix))
class AgentDetailsNotAvailable(Exception):
"""
Agent Details are not available
"""
pass
class AgentConfigurationNotAvailable(Exception):
"""
Agent Configuraiton is not available
"""
pass
class AgentLogLevel(Command):
"""
Object controlling the log levels for agents
"""
def __init__(self, sslenabled, authenticator, apihost, api_version=1, project_id=None):
super(self.__class__, self).__init__(sslenabled, apihost, '/')
self.log = logging.getLogger(__name__)
# save the ssl status for the various reinits done for each API call supported
self.sslenabled = sslenabled
self.authenticator = authenticator
self.loglevel = {}
if type(api_version) is int:
self.api_version = api_version
else:
self.api_version = 1
self.project_id = project_id
def __del__(self):
try:
if len(self.loglevel):
for machine_agent in self.loglevel.keys():
while self.HasLogLevels(machine_agent):
self.PopLogLevel(machine_agent)
except:
pass
def GetLogLevel(self, machine_agent_id):
"""
Retrieve the current log level for the agent from the API
The returned value will be one of the following:
Fatal
Error
Warn
Info
Debug
Trace
All
"""
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/agent/logging/{0}'.format(machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}'.format(self.api_version,
self.project_id,
machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
if self.api_version == 1:
# the text will be data like "Warn" (with quotes) so remove the quotes.
return res.text.replace('"', '')
else:
return res.json()['log_level']
else:
self.log.error('Unable to retrieve agent log level for machine agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return ''
def SetLogLevel(self, machine_agent_id, level):
"""
Set the log level for the agent via the API
'level' must be one of the following:
Fatal
Error
Warn
Info
Debug
Trace
All
'level' may also be a numeric value inclusively between 1 and 7.
"""
if self.api_version == 1:
self.log.info('v1 Set Log Level')
if not level in ('Fatal', 'Error', 'Warn', 'Info', 'Debug', 'Trace', 'All', 1, 2, 3, 4, 5, 6, 7):
raise ValueError('Log Level (' + str(level) + ') is not valid.')
self.ReInit(self.sslenabled, "/v1.0/agent/logging")
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
o = {}
o['MachineAgentId'] = machine_agent_id
levels = {
'Fatal': 1,
'Error': 2,
'Warn': 3,
'Info': 4,
'Debug': 5,
'Trace': 6,
'All': 7
}
if level in levels:
o['LoggingLevelid'] = levels[level]
else:
o['LoggingLevelid'] = level
self.body = json.dumps(o)
res = requests.put(self.Uri, headers=self.Headers, data=self.Body)
else:
self.log.info('v{0} Set Log Level'.format(self.api_version))
# TODO: Need to rework this whole function
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}'.format(self.api_version,
self.project_id,
machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
o = {}
o['op'] = 'replace'
o['path'] = '/log_level'
o['value'] = level.lower()
l = []
l.append(o)
self.body = json.dumps(l)
self.log.debug('Updating Log Level: {0}'.format(o))
res = requests.patch(self.Uri, headers=self.Headers, data=self.Body)
if res.status_code == 204:
self.log.info('Updated log level to {0}'.format(level))
return True
else:
self.log.error('Unable to set the log level. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return False
def PushLogLevel(self, machine_agent_id, level):
"""
Save the current log level and set 'level' as the new log level.
See SetLogLevel() for valid values of 'level'
Note: Log Levels are stored as a Stack. Use PopLogLevel() to restore the log level to the value prior to calling PushLogLevel().
"""
if not machine_agent_id in self.loglevel:
self.loglevel[machine_agent_id] = list()
current = self.GetLogLevel(machine_agent_id)
self.loglevel[machine_agent_id].append(current)
self.SetLogLevel(machine_agent_id, level)
def HasLogLevels(self, machine_agent_id):
"""
Returns whether or not there are any log levels for the given machine agent id
"""
if machine_agent_id in self.loglevel:
if len(self.loglevel[machine_agent_id]):
return True
else:
return False
else:
return False
def PopLogLevel(self, machine_agent_id):
"""
Restore the previous log level if it exists.
If not log level has been saved, then it does nothing.
Note: Log Levels are stored as a Stack. Log Levels are added to the stack by calling PushLogLevel().
"""
if machine_agent_id in self.loglevel:
if len(self.loglevel[machine_agent_id]):
index = len(self.loglevel[machine_agent_id]) - 1
level = self.loglevel[machine_agent_id][index]
if self.SetLogLevel(machine_agent_id, level):
self.loglevel[machine_agent_id].pop(index)
self.log.info('Restored Machine Agent Id (' + str(machine_agent_id) + ') Log Level to ' + level)
else:
self.log.error('Error while resetting the log level for Machine Agent Id (' + str(machine_agent_id) + ') to ' + level)
else:
self.log.error('Machine Agent Id (' + str(machine_agent_id) + ') is already at the root log level. Nothing left to pop.')
else:
self.log.error('Machine Agent Id (' + str(machine_agent_id) + ') does not have any stacked log levels')
class AgentDetails(object):
"""
Object describing a given Agent instance described by the Agent Details API Endpoint
"""
def __init__(self, details, version=1):
# TODO: Replace this verification and use JSON Schema
self.version = version
if self.version == 1:
# Verify the details are at least what we expect before doing anything else
for prop in ('MachineAgentId', 'AgentVersion', 'Architecture', 'Flavor', 'BackupVaultSize', 'CleanupAllowed', 'Datacenter', 'IPAddress', 'IsDisabled', 'IsEncrypted', 'MachineName', 'OperatingSystem', 'OperatingSystemVersion', 'PublicKey', 'Status', 'TimeOfLastSuccessfulBackup', 'UseServiceNet', 'HostServerId'):
x = details[prop]
# TODO: Add JSON Schema validation for API v2
# Some cached data needed
self._details = details
@property
def agent_id(self):
"""
Agent ID
"""
if self.version == 1:
return self._details['MachineAgentId']
else:
return self._details['id']
@property
def AgentVersion(self):
"""
Agent Version
"""
if self.version == 1:
return self._details['AgentVersion']
else:
return self._details['version']
@property
def Architecture(self):
"""
System Architecture
"""
if self.version == 1:
return self._details['Architecture']
else:
return self._details['host']['os']['architecture']
@property
def Flavor(self):
"""
System Flavor
"""
if self.version == 1:
return self._details['Flavor']
else:
return self._details['host']['flavor']
@property
def BackupVaultSize(self):
"""
Current size of the Backup Vault
"""
# TODO: v2 does not have Backup Vault Size
return self._details['BackupVaultSize']
@property
def CleanupAllowed(self):
"""
Can Cleanup the Vault?
"""
# TODO: v2 does not have CleanupAllowed
return self._details['CleanupAllowed']
@property
def Datacenter(self):
"""
Which Datacenter does the system live in?
"""
if self.version == 1:
return self._details['Datacenter']
else:
return self._details['host']['region']
@property
def IPAddress(self):
"""
IP Address the agent registered with
"""
if self.version == 1:
return self._details['IPAddress']
else:
return next(address for address in
self._details['host']['addresses']
if address['version'] == 4)['addr']
@property
def IsDisabled(self):
"""
Is the Agent Disabled?
"""
if self.version == 1:
return self._details['IsDisabled']
else:
return not self._details['enabled']
@property
def IsEnabled(self):
"""
Is the Agent Enabled?
"""
return not self.IsDisabled
@property
def IsEncrypted(self):
"""
Are the backups encrypted?
"""
if self.version == 1:
return self._details['IsEncrypted']
else:
return self._details['vault']['encrypted']
@property
def MachineName(self):
"""
System Name as registered with Cloud Servers (Nova)
"""
if self.version == 1:
return self._details['MachineName']
else:
return self._details['name']
@property
def OperatingSystem(self):
"""
System Operating System
"""
if self.version == 1:
return self._details['OperatingSystem']
else:
return self._details['host']['os']['name']
@property
def OperatingSystemVersion(self):
"""
System Operating System Version
"""
if self.version == 1:
return self._details['OperatingSystemVersion']
else:
return self._details['host']['os']['version']
@property
def PublicKey(self):
"""
Public Key for encrypted backups
"""
if self.version == 1:
return self._details['PublicKey']
else:
# TODO: Content of rsa_public_key is a different from that of PublicKey. Is this function being used?
return self._details['rsa_public_key']
@property
def Status(self):
"""
Agent Status
"""
if self.version == 1:
return self._details['Status']
# TODO: API v2 provides http://docs.cloudbackupapi.apiary.io/#reference/agents/v2agentsidstatus/get-an-agent's-status
# to get a real-time status of the agent
@property
def TimeOfLastSuccessfulBackup(self):
"""
When was the agent last succcessful with its backup?
"""
if self.version == 1:
return self._details['TimeOfLastSuccessfulBackup']
# TODO: API v2 provides this a little differently:
# First call http://docs.cloudbackupapi.apiary.io/#reference/configurations/v2configurationsid/get-details-about-a-configuration
# to get the last time a given configuration was backed up, then retrieve the details via
# http://docs.cloudbackupapi.apiary.io/#reference/backups/v2backupsid/get-details-about-a-backup to find the time of that backup
@property
def DateTimeOfLastSuccessfulBackup(self):
"""
When was the agent last succcessful with its backup?
"""
if (not self.TimeOfLastSuccessfulBackup):
return None
a = self.TimeOfLastSuccessfulBackup.split('(')
b = a[1].split(')')
unix_epoch = b[0]
return datetime.datetime.utcfromtimestamp(float(unix_epoch) / 1000.0)
@property
def UseServiceNet(self):
"""
Use RAX ServiceNet?
"""
if self.version == 1:
return self._details['UseServiceNet']
else:
return self._details['vault']['use_internal']
@property
def HostServerId(self):
"""
System Host Server Identifier for Cloud Servers (Nova)
"""
if self.version == 1:
return self._details['HostServerId']
else:
return self._details['host']['machine']['id']
class AgentConfiguration(object):
"""
Object describing the various Agent configurations
"""
def __init__(self, configuration, version=1):
# TODO: Replace this verification and use JSON Schema
self.version = version
if self.version == 1:
# Verify the configurations are at least what we expect before doing anything else
for prop in ('Volumes', 'SystemPreferences', 'UserPreferences', 'BackupConfigurations'):
x = configuration[prop]
# TODO: Add JSON Schema validation for API v2
self.log = logging.getLogger(__name__)
# some cached data needed
self._configuration = configuration
# Volumes[]
# -> DataServices
# -> Uri
# -> FailoverUri
# -> EncryptionEnabled
# -> Password
# -> NetworkDrives
# -> BackupVaultId
@property
def Volumes(self):
if self.version == 1:
return self._configuration['Volumes']
else:
# TODO: This is not a one to one mapping. The key/values are different
return self._configuration['vaults']
# SystemPreferences See SystemPreferences
# ->RateLimit
# ->AutoUpdate
# --> Enabled
# --> LatestVersion
# -> Environment
# --> MinimumDiskSpaceMb
# ---> Backup See MinimumBackupDiskSpaceMb()
# ---> Restore See MinimumRestoreDiskSpaceMb()
# ---> Cleanup See MinimumCleanupDiskSpaceMb()
# -> Logging
# --> Level See ConfigLogLevel()
@property
def SystemPreferences(self):
if self.version == 1:
return self._configuration['SystemPreferences']
else:
# TODO: This is not a one to one mapping. The key/values are different
return self._configuration['system_preferences']
@property
def ConfigLogLevel(self):
if self.version == 1:
return self.SystemPreferences['Logging']['Level']
else:
return self._configuration['system_preferences']['logging']['level']
@property
def MinimumBackupDiskSpaceMb(self):
if self.version == 1:
return self.SystemPreferences['Environment']['MinimumDiskSpaceMb']['Backup']
else:
return self._configuration['system_preferences']['environment']['minimum_disk_space_mb']['backup']
@property
def MinimumRestoreDiskSpaceMb(self):
if self.version == 1:
return self.SystemPreferences['Environment']['MinimumDiskSpaceMb']['Restore']
else:
return self._configuration['system_preferences']['environment']['minimum_disk_space_mb']['restore']
@property
def MinimumCleanupDiskSpaceMb(self):
if self.version == 1:
return self.SystemPreferences['Environment']['MinimumDiskSpaceMb']['Cleanup']
else:
return self._configuration['system_preferences']['environment']['minimum_disk_space_mb']['cleanup']
# UserPreferences
# -> CacheDirectory
# -> ThrottleBandwidth
@property
def UserPreferences(self):
if self.version == 1:
return self._configuration['UserPreferences']
else:
# TODO: Need to look into the equivalent value/object
return None
# BackupConfigurations[] See GetBackupConfigurationById(), GetBackupConfigurationByName()
# -> BackupPrescript
# -> BackupPostscript
# -> Id See GetBackupIds(), GetBackupIdNameMap()
# -> VolumeUri
# -> VolumeFailoverUri
# -> Name See GetBackupNames(), GetBackupNameIdMap()
# -> IsEnabled
# -> DaysToKeepOldFileVersions
# -> KeepOldFileVersionsIndefinitely
# -> Schedules[]
# --> Start
# --> End
# --> InitialScheduledTime
# --> Frequency
# --> TimeOfDay
# --> DayOfWeek
# --> HourlyInterval
# --> IsDST
# --> Offset
# -> Inclusions[]
# --> Pattern
# --> Type
# --> Module
# --> Args
# -> Exclusions[]
# --> Pattern
# --> Type
# --> Module
# --> Args
@property
def BackupConfigurations(self):
if self.version == 1:
return self._configuration['BackupConfigurations']
else:
# TODO: This is not a one to one mapping. The key/values are different
return self._configuration['configurations']
# Rse See GetRse()
# -> Channel See GetRseChannel()
# -> HostName See GetRseHost()
# -> Polling See GetRsePollingConfig()
# --> Interval
# ---> Idle
# ---> Active
# ---> RealTime
# --> Timeout
# ---> Idle
# ---> Active
# ---> RealTime
# -> Heartbeat See GetRseHeartbeatConfig()
# --> Interval
# ---> Idle
# ---> Active
# ---> RealTime
# --> Timeout
# ---> Idle
# ---> Active
# ---> RealTime
@property
def Rse(self):
if self.version == 1:
return self.SystemPreferences['Rse']
else:
# TODO: This is not a one to one mapping. The key/values are different
return self._configuration['system_preferences']['events']['rse']
@property
def RseChannel(self):
if self.version == 1:
return self.Rse['Channel']
else:
return self._configuration['system_preferences']['events']['rse']['channel']
@property
def RseHost(self):
if self.version == 1:
return self.Rse['HostName']
else:
return self._configuration['system_preferences']['events']['rse']['host']
@property
def RsePollingConfig(self):
if self.version == 1:
return self.Rse['Polling']
else:
return self._configuration['system_preferences']['events']['rse']['polling']
@property
def RseHeartbeatConfig(self):
if self.version == 1:
return self.Rse['Heartbeat']
else:
return self._configuration['system_preferences']['events']['rse']['heartbeat']
def GetBackupIds(self):
"""
Retrieve the list of Backup Configuration Ids for the agent as reported by GetAgentConfiguration()
"""
if self.version == 1:
backup_id = 'Id'
else:
backup_id = 'id'
backupids = set()
for backupconfig in self.BackupConfigurations:
backupids.add(backupconfig[backup_id])
return backupids
def GetBackupNames(self):
"""
Retrieve the list of Backup Configuration Names for the agent as reported by GetAgentConfiguration()
"""
if self.version == 1:
backup_name = 'Name'
else:
backup_name = 'name'
backupnames = set()
for backupconfig in self.BackupConfigurations:
backupnames.add(backupconfig[backup_name])
return backupnames
def GetBackupNameIdMap(self):
"""
Retrieve the list of Backup Configuration Names for the agent as reported by GetAgentConfiguration()
"""
if self.version == 1:
backup_name = 'Name'
backup_id = 'Id'
else:
backup_name = 'name'
backup_id = 'id'
backupnamemap = {}
for backupconfig in self.BackupConfigurations:
backupnamemap[backupconfig[backup_name]] = backupconfig[backup_id]
return backupnamemap
def GetBackupIdNameMap(self):
"""
Retrieve the list of Backup Configuration Names for the agent as reported by GetAgentConfiguration()
"""
if self.version == 1:
backup_name = 'Name'
backup_id = 'Id'
else:
backup_name = 'name'
backup_id = 'id'
backupidmap = {}
for backupconfig in self.BackupConfigurations:
backupidmap[backupconfig[backup_id]] = backupconfig[backup_name]
return backupidmap
def GetBackupIdFromName(self, backup_name):
"""
Translate the backup name into a backup id based on the agent data reported by GetAgentConfiguration()
Note: It would be more performant to simply retrieve the configuration by the name instead of doing the translation
"""
backupnamemap = self.GetBackupNameIdMap()
return backupnamemap[backup_name]
def GetBackupNameFromId(self, backup_id):
"""
Translate the backup id into a backup name based on the agent data reported by GetAgentConfiguration()
Note: It would be more performant to simply retrieve the configuration by the id instead of doing the translation
"""
backupidmap = self.GetBackupIdNameMap()
return backupidmap[backup_id]
def GetBackupConfigurationById(self, backup_id):
"""
Retrieve the entire backup configuration for the agent given a backup id, data as reported by GetAgentConfiguration()
"""
if self.version == 1:
b_id = 'Id'
else:
b_id = 'id'
return next((backupconfig for backupconfig in self.BackupConfigurations if backupconfig[b_id] == backup_id), {})
def GetBackupConfigurationByName(self, backup_name):
"""
Retrieve the entire backup configuration for the agent given a backup id, data as reported by GetAgentConfiguration()
"""
if self.version == 1:
b_name = 'Name'
else:
b_name = 'name'
return next((backupconfig for backupconfig in self.BackupConfigurations if backupconfig[b_name] == backup_name), {})
def GetVaultDbContainer(self, backup_name=None):
"""
Retrieve the URI for the VaultDB, data as reported by GetAgentConfiguration()
"""
if self.version == 1:
container = None
if backup_name is not None:
backupconfig = self.GetBackupConfigurationByName(backup_name)
container = backupconfig['VolumeUri']
else:
container = self.Volumes[0]['Uri']
self.log.debug('VaultDB Container: ' + container)
return container[6:]
else:
vault_info = self._configuration['vaults'][0]
if vault_info['use_internal']:
vault_url = next(url for url in vault_info['links']
if url['rel'] == 'internalURL')['href']
else:
vault_url = next(url for url in vault_info['links']
if url['rel'] == 'publicURL')['href']
self.log.debug('VaultDB Container: ' + vault_url)
# strip the https:// section
return vault_url[8:]
def GetVaultDbPath(self, backup_name=None):
"""
Retrieve the URI for the VaultDB, data as reported by GetAgentConfiguration()
"""
try:
if self.version == 1:
vaultvolume = {}
if backup_name is not None:
backupconfig = self.GetBackupConfigurationByName(backup_name)
volumeuri = backupconfig['VolumeUri']
vaultvolume = {}
# As there may be numerous volumes we match it up against the backup configuration we are looking for
# Don't know if there is a better way or not...but this will work for now
for volume in self.Volumes:
if volume['Uri'] == volumeuri:
vaultvolume = volume
else:
vaultvolume = self.Volumes[0]
vaultdburi = 'BACKUPS/v2.0/' + vaultvolume['BackupVaultId']
self.log.debug('VaultDB Path: ' + vaultdburi)
return vaultdburi
else:
backupconfig = self.GetBackupConfigurationByName(backup_name)
vault_id = backupconfig['vault_id']
vaultdburi = 'BACKUPS/v2.0/{0}'.format(vault_id)
self.log.debug('VaultDB Path: ' + vaultdburi)
return vaultdburi
except LookupError:
self.log.error('Unable to access the Volume URI. Did GetAgentConfiguration get called first?')
return ''
def GetBundlePath(self, backup_name, bundle_id):
"""
Retrieve the URI for the Bundle
Depends on GetAgentConfiguration() to have already been called
"""
try:
if self.version == 1:
backupconfig = self.GetBackupConfigurationByName(backup_name)
volumeuri = backupconfig['VolumeUri']
vaultvolume = {}
# As there may be numerous volumes we match it up against the backup configuration we are looking for
# Don't know if there is a better way or not...but this will work for now
for volume in self.Volumes:
if volume['Uri'] == volumeuri:
vaultvolume = volume
vaultdburi = 'BACKUPS/v2.0/' + vaultvolume['BackupVaultId'] + '/BUNDLES/' + '{0:010}'.format(bundle_id)
self.log.debug('VaultDB Path: ' + vaultdburi)
return vaultdburi
else:
vault_db_url = self.GetVaultDbPath(backup_name)
vaultdburi = vault_db_url + '/BUNDLES/' + \
'{0:010}'.format(bundle_id)
self.log.debug('VaultDB Path: ' + vaultdburi)
return vaultdburi
except LookupError:
self.log.error('Unable to access the Volume URI. Did GetAgentConfiguration get called first?')
return ''
class Agents(Command):
"""
Object defining HTTP REST API calls for interactiving with the Rackspace Cloud Backup Agent
Presently supports the RAX v1.0 API
"""
def __init__(self, sslenabled, authenticator, apihost, api_version=1, project_id=None):
"""
Initialize the Agent access
sslenabled - True if using HTTPS; otherwise False
authenticator - instance of cloudbackup.client.auth.Authentication to use
apihost - server to use for API calls
api_version - version of the API
project_id - Project Id used by API v2
"""
super(self.__class__, self).__init__(sslenabled, apihost, '/')
self.log = logging.getLogger(__name__)
# save the ssl status for the various reinits done for each API call supported
self.sslenabled = sslenabled
self.authenticator = authenticator
# Some cached data needed, set to invalid values by default
self.agents = {}
self.configurations = {}
self.o = {}
self.snapshot_id = -1
self.wake_agent_threads = []
self.loglevel = AgentLogLevel(sslenabled, authenticator, apihost,
api_version, project_id)
if type(api_version) is int:
self.api_version = api_version
else:
self.api_version = 1
self.project_id = project_id
def __del__(self):
del self.loglevel
# Loop through and tell all threads to terminate
# Do not wait for them to terminate here so that all get the
# message in a timely manner
for a_thread in self.wake_agent_threads:
self.log.debug('Telling RSE Wakeup Thread {0:} to terminate'.format(a_thread['id']))
a_thread['terminator'].set()
# Now repeat and wait for them to terminate
for a_thread in self.wake_agent_threads:
self.log.debug('Waiting for RSE Wakeup Thread {0:} to rejoin'.format(a_thread['id']))
a_thread['thread'].join()
def WakeAgents(self):
"""
Using the API move all agents to active poll mode
Note: This may require up to 60 seconds for the agents to respond.
"""
if self.api_version == 1:
self.ReInit(self.sslenabled, "/v1.0/user/wakeupagents")
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.log.debug('headers: %s', self.Headers)
res = requests.post(self.Uri, headers=self.Headers)
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/events'.format(self.api_version,
self.project_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
self.o = {}
self.o['event'] = 'agent_activate'
self.o['mode'] = 'active'
self.body = json.dumps(self.o)
self.log.debug('headers: %s', self.Headers)
res = requests.post(self.Uri, headers=self.Headers, data=self.Body)
self.log.debug('Wake Agent: code = {0:}, reason = {1:}'.format(res.status_code, res.reason))
return res.status_code
def WakeSpecificAgent(self, machine_agent_id, rse, timeoutMilliseconds, keep_agent_awake=False, wake_period=None):
"""
Using the API to move all agents to active poll mode and then check that a specific agent is polling.
machine_agent_id - agent id for the specific agent to look for
rse - instance of the cloudbackup.client.rse.Rse class to use for listening to RSE
timeoutMilliseconds - maximum time to check RSE for the data
keep_agent_awake - whether or not to start a thread to keep posting the wake agent
wake_period - period between wake agent calls, should be less than the timeout interval for the current state of the agent
normally 70 seconds should be fine. If set to None, use the Real-Time Timeout as a basis and set appropriately defaulting to 70 if too small
"""
# For up to timeoutMilliseconds try to wake all the agents on the account in use
start_time = int(round(time.time() * 1000))
finish_time = start_time + timeoutMilliseconds
wokeall = False
wakeup_status_code = 0
while ((int(round(time.time() * 1000))) < finish_time):
wakeup_status_code = self.WakeAgents()
if self.api_version == 1:
valid_code = 200
else:
valid_code = 202
if wakeup_status_code == valid_code:
wokeall = True
break
if wokeall:
# For up to timeoutMilleseconds look for the specified agent's heart beat
start_time = int(round(time.time() * 1000))
finish_time = start_time + timeoutMilliseconds
woke_agent = False
while ((int(round(time.time() * 1000))) < finish_time):
if rse.MonitorForHeartBeat(machine_agent_id):
woke_agent = True
break
if not woke_agent:
# Unable to find the agent's heart beat within the timeout period
self.log.error('Unable to locate agent id (' + str(machine_agent_id) + ') in RSE Heartbeats')
if woke_agent:
if keep_agent_awake:
if wake_period is None:
self.GetAgentConfiguration(machine_agent_id)
agent_config = self.AgentConfiguration(machine_agent_id)
rse_heartbeat_config = agent_config.RseHeartbeatConfig
self.log.debug('Rse config: {0:}'.format(rse_heartbeat_config))
if self.api_version == 1:
wake_period = (rse_heartbeat_config['Timeout']
['RealTime'] / 1000)
else:
wake_period = (rse_heartbeat_config['timeout_ms']
['real_time'] / 1000)
# create a buffer
if wake_period > 6:
wake_period = wake_period - 5
elif wake_period > 2:
wake_period = wake_period - 1
else:
# if it's too small then default to a reasonable time frame
# UX uses approximately 70 seconds
wake_period = 70
self.KeepAgentAwake(machine_agent_id, rse, wake_period)
return woke_agent
else:
# Unable to use the API to wake the agents within the timeout period
self.log.error('Unable to wake all agents. Status Code = ' + str(wakeup_status_code))
return False
def KeepAgentAwake(self, machine_agent_id, rse, period):
"""
Start a thread that will periodically post Wake Agent and check that the agent is alive
Parameters:
machine_agent_id - machine agent id of the agent to monitor for heart beats
rse - RSE instance configured for the agent
period - period between posting wake agent messages
Note: period is starts after a successful find of the agent heartbeat
"""
wake_agent_thread = {}
wake_agent_thread['id'] = machine_agent_id
self.wake_agent_threads.append(wake_agent_thread)
user_credentials = self.authenticator.InitialAuthCredentials
for a_thread in self.wake_agent_threads:
if a_thread['id'] == machine_agent_id:
self.log.debug('Starting RSE Wakeup Thread for agent: {0:}'.format(machine_agent_id))
a_thread['terminator'] = threading.Event()
a_thread_kwargs = {
'my_notifier': wake_agent_thread['terminator'],
'userid': user_credentials['userid'],
'credentials': user_credentials['credentials'],
'usertype': user_credentials['usertype'],
'method': user_credentials['method'],
'rse_app': rse.rsedata.app,
'rse_version': rse.rsedata.appVersion,
'rse_agentkey': rse.agentkey,
'rse_period': period,
'apihost': self.apihost,
'api_version': self.api_version,
'agent_id': machine_agent_id,
'project_id': self.project_id,
'rse_log': rse.rselogfile,
'rse_apihost': rse.apihost
}
a_thread['thread'] = threading.Thread(target=_keep_agent_awake_thread_fn,
kwargs=a_thread_kwargs
)
a_thread['thread'].start()
break
def StopKeepAgentWake(self, machine_agent_id):
"""
Stop the thread that is posting the wake agents and monitoring for the given machine agent id
Parameters:
machine_agent_id - the machine agent identifier that is being monitored for
"""
for a_thread in self.wake_agent_threads:
if a_thread['id'] == machine_agent_id:
self.log.debug('Telling for RSE Wakeup Thread {0:} for agent {1:} to terminate'.format(a_thread['id'], machine_agent_id))
a_thread['terminator'].set()
self.log.debug('Waiting for RSE Wakeup Thread {0:} to rejoin'.format(a_thread['id']))
a_thread['thread'].join()
self.wake_agent_threads.remove(a_thread)
break
#
# Agent Details
#
def GetAgentDetails(self, machine_agent_id):
"""
Retrieve all the information regarding the specified Agent ID
"""
self.agents = {}
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/agent/{0}'.format(machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}'.format(self.api_version,
self.project_id,
machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
self.log.debug('Agent Details(id: {0:}) - {1:}'.format(machine_agent_id, res.json()))
self.agents[machine_agent_id] = AgentDetails(details=res.json(), version=self.api_version)
return True
else:
self.log.error('Unable to retrieve agent details for agent id ' + str(machine_agent_id) + ' system return code ' + str(res.status_code) + ' reason = ' + res.reason)
return False
def GetAgentsFromApi(self):
"""
Lookup the associated agents and return a list of their IDs
"""
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/user/agents')
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
result_list = []
results = res.json()
for agent in results:
result_list.append(agent['MachineAgentId'])
return result_list
else:
self.log.error('Unable to retrieve agent list system return code ' + str(res.status_code) + ' reason = ' + res.reason)
return []
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents'.format(
self.api_version,
self.project_id
))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
result_list = []
results = res.json()
for agent in results['agents']:
result_list.append(agent['id'])
return result_list
else:
self.log.error('Unable to retrieve agent list system return code ' + str(res.status_code) + ' reason = ' + res.reason)
return []
@property
def GetAgentIds(self):
"""
Return a list of known agent ids for agents details retrieved by GetAgentDetails()
"""
return self.agents.keys()
def AgentDetails(self, machine_agent_id):
"""
The AgentDetails object describing the agent with the given machine_agent_id
"""
try:
return self.agents[machine_agent_id]
except LookupError:
msg = 'Machine Agent Id ({0:}) not available. Did you call GetAgentDetails() for that agent?'.format(machine_agent_id)
self.log.error(msg)
raise AgentDetailsNotAvailable(msg)
#
# Agent Logs
#
def GetAgentLogFile(self, machine_agent_id):
"""
Request a log file upload from the agent
"""
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/agent/requestlog/{0}'.format(
machine_agent_id
))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
res = requests.post(self.Uri, headers=self.Headers)
if res.status_code == 200:
logfile_request_id = res.json()
return logfile_request_id
else:
self.log.error('Unable to request agent log file upload for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return None
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/logfiles'.format(
self.api_version,
self.project_id,
machine_agent_id
))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.post(self.Uri, headers=self.Headers)
if res.status_code == 202:
logfile_request_id = res.json()['id']
return logfile_request_id
else:
self.log.error('Unable to request agent log file upload for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return None
def GetExistingAgentLogFiles(self, machine_agent_id):
"""
List the existing agent log files
"""
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/agent/logfiles/{0}'.format(
machine_agent_id
))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/logfiles'.format(
self.api_version,
self.project_id,
machine_agent_id
))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
result = []
if res.status_code == 200:
if self.api_version == 1:
for logfile_entry in res.json():
e = {
'id': logfile_entry['DisplayName'],
'date': logfile_entry['DisplayName'],
'status': logfile_entry['CurrentState'],
'link': logfile_entry['FilePath']
}
result.append(e)
else:
for logfile_entry in res.json()['logfiles']:
e = {
'id': logfile_entry['id'],
'date': logfile_entry['date'],
'status': logfile_entry['state'],
'link': None
}
# Note: rel == logfile_temp_url is only present
# if the file has been uploaded
for url_set in logfile_entry['links']:
if url_set['rel'] == 'logfile_temp_url':
e['link'] = url_set['href']
result.append(e)
else:
self.log.error('Unable to list uploaded agent log files for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return result
def DownloadAgentLogFile(self, logfile_data, target_filename):
try:
try:
headers = {
'X-Auth-Token': self.authenticator.AuthToken
}
res = requests.get(
logfile_data['link'],
stream=True,
headers=self.headers
)
except requests.exceptions.SSLError as ex:
self.log.error('Requests SSLError: {0}'.format(str(ex)))
res = requests.get(logfile_data['link'], verify=False, stream=True)
if res.status_code == 404:
raise UserWarning('Temp URL invalid')
elif res.status_code >= 300:
raise UserWarning('Server responded unexpectedly during download (Code: ' + str(res.status_code) + ' )')
file_chunk_size = 4 * 1024 * 1024
etag_match = None
if 'Etag' in res.headers:
etag_match = res.headers['Etag']
meter = {}
meter['bytes-total'] = int(res.headers['Content-Length'])
meter['bytes-remaining'] = int(res.headers['Content-Length'])
meter['bar-count'] = 50
meter['bytes-per-bar'] = meter['bytes-remaining'] // meter['bar-count']
meter['block-size'] = min(file_chunk_size, meter['bytes-per-bar'])
meter['chunks-per-bar'] = meter['bytes-per-bar'] // meter['block-size']
meter['chunks'] = 0
meter['bars-remaining'] = meter['bar-count']
meter['bars-completed'] = 0
self.log.info('Downloading logfile(gz): {0} bytes...'.format(meter['bytes-remaining']))
self.log.info('[' + ' ' * meter['bar-count'] + ']')
gzip_file = target_filename + '.gz'
compressed_md5_hash = hashlib.md5()
with open(gzip_file, 'wb') as gzipped_db:
for lf_chunk in res.iter_content(chunk_size=meter['block-size']):
gzipped_db.write(lf_chunk)
compressed_md5_hash.update(lf_chunk)
gzipped_db.flush()
os.fsync(gzipped_db.fileno())
meter['chunks'] += 1
if meter['chunks'] == meter['chunks-per-bar']:
meter['chunks'] = 0
meter['bars-completed'] += 1
meter['bars-remaining'] -= 1
self.log.info('[' + '-' * meter['bars-completed'] + ' ' * meter['bars-remaining'] + ']')
if etag_match is not None:
if etag_match.upper() != compressed_md5_hash.hexdigest().upper():
raise UserWarning(
'Failed to download. {0} != {1}'.format(
etag_match.upper(),
compressed_md5_hash.hexdigest().upper()
)
)
self.log.info('Decompressing the file...')
gz_lf_file = gzip.open(gzip_file, 'rb')
with open(target_filename, 'wb') as lf_file:
decompress_continue_loop = True
while decompress_continue_loop:
filechunk = gz_lf_file.read(file_chunk_size)
if len(filechunk) == 0:
decompress_continue_loop = False
else:
lf_file.write(filechunk)
gz_lf_file.close()
return True
except Exception as ex:
self.log.error('Failed to download file: {0}'.format(ex))
return False
#
# Agent Configurations
#
def GetAgentConfiguration(self, machine_agent_id):
"""
Retrieve the Configuration for the given agent
"""
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/agent/configuration/{0}'.format(
machine_agent_id
)
)
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/configuration'.format(
self.api_version,
self.project_id,
machine_agent_id
)
)
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
self.configurations[machine_agent_id] = AgentConfiguration(
configuration=res.json(), version=self.api_version)
return True
else:
self.log.error('Unable to retrieve agent configuration for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return False
@property
def AgentConfigurationIds(self):
"""
Return a list of known agent ids for agent configurations retrieved by GetAgentConfiguration()
"""
return self.configurations.keys()
def AgentConfiguration(self, machine_agent_id):
"""
Return the AgentConfiguration object containing the configuration for the agent with the given machine_agent_id
"""
try:
return self.configurations[machine_agent_id]
except LookupError:
msg = 'Machine Agent Id ({0:}) not available. Did you call GetAgentConfiguration() for that agent?'.format(machine_agent_id)
self.log.error(msg)
raise AgentConfigurationNotAvailable(msg)
#
# Agent Activity
#
def GetAgentLatestActivity(self, machine_agent_id):
"""
Retrieve the current activities of the agent
"""
# Get the agent configuration so that we know we can lookup the backup configs in order
# to display a useful name about the activity to the user
self.GetAgentConfiguration(machine_agent_id)
agent_config = self.AgentConfiguration(machine_agent_id)
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/{0}/system/activity{1}'.format(
self.authenticator.AuthTenantId,
machine_agent_id
)
)
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/activities'.format(
self.api_version,
self.project_id,
machine_agent_id
)
)
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
results = []
if self.api_version == 1:
for activity in res.json():
activity_name = ''
if activity['ParentId'] != 0:
try:
activity_name = '{0} - {1}'.format(
activity['Type'],
agent_config.GetBackupNameFromId(
activity['ParentId']
)
)
except:
activity_name = '{0} - UNKNOWN({1})'.format(
activity['Type'],
activity['ParentId']
)
else:
activity_name = '{0} - {1}'.format(
activity['Type'],
activity['DisplayName']
)
results.append(
{
'id': activity['Id'],
'name': activity_name,
'type': activity['Type'],
'state': activity['CurrentState'],
'time': activity['TimeOfActivity']
}
)
else:
activities = res.json()['activities']
activities.reverse()
for activity in activities:
activity_name = ''
if 'configuration' in activity.keys():
try:
activity_name = '{0} - {1}'.format(
activity['type'],
agent_config.GetBackupNameFromId(
activity['configuration']['id']
)
)
except:
activity_name = '{0} - UNKNOWN({1})'.format(
activity['type'],
activity['configuration']['id']
)
else:
activity_name = activity['type']
results.append(
{
'id': activity['id'],
'name': activity_name,
'type': activity['type'],
'state': activity['state'],
'time': activity['last_updated_time']
}
)
return results
else:
self.log.error('Unable to retrieve latest agent activities for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return []
def GetAgentEventsSince(self, machine_agent_id, last_event_id, event_limit=100, results={}):
"""
Retrieve the events of the agent since the last events id specified
{
'heartbeats': [
<event>,..
],
'backup <id>: [
<event>,..
],
'restore <id>: [
<event>,..
],..
"""
# Get the agent configuration so that we know we can lookup the backup configs in order
# to display a useful name about the activity to the user
self.GetAgentConfiguration(machine_agent_id)
agent_config = self.AgentConfiguration(machine_agent_id)
flip_activity_list_due_to_api_inconsistency = False
if self.api_version == 1:
self.ReInit(self.sslenabled,
'/v1.0/{0}/system/activity{1}'.format(
self.authenticator.AuthTenantId,
machine_agent_id
)
)
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
else:
if last_event_id is None:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/events'.format(
self.api_version,
self.project_id,
machine_agent_id
)
)
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}/events?marker={3}&limit={4}&sort_dir=asc'.format(
self.api_version,
self.project_id,
machine_agent_id,
last_event_id,
event_limit
)
)
flip_activity_list_due_to_api_inconsistency = True
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.headers['X-Project-Id'] = self.project_id
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
new_last_event_id = None
if self.api_version == 1:
# TODO: v1 doesn't have Events like V2 does...
# v1 API we look at the Id as it is monotonously increasing
if last_event_id is None:
new_last_event_id = 0
else:
new_last_event_id = last_event_id
for activity in res.json():
activity_name = ''
if activity['ParentId'] != 0:
try:
activity_name = '{0} - {1}'.format(
activity['Type'],
agent_config.GetBackupNameFromId(
activity['ParentId']
)
)
except:
activity_name = '{0} - UNKNOWN({1})'.format(
activity['Type'],
activity['ParentId']
)
else:
activity_name = '{0} - {1}'.format(
activity['Type'],
activity['DisplayName']
)
if activity['Id'] > last_event_id:
results.append(
{
'id': activity['Id'],
'name': activity_name,
'type': activity['Type'],
'state': activity['CurrentState'],
'time': activity['TimeOfActivity']
}
)
new_last_event_id = activity['Id']
# Reverse the order so that the newest is at the start
results.reverse()
else:
events = res.json()['events']
if flip_activity_list_due_to_api_inconsistency:
events.reverse()
new_last_event_id = last_event_id
if new_last_event_id is None:
new_last_event_id = 0
if not 'heartbeats' in results.keys():
results['heartbeats'] = []
for event in events:
# update the last event
if event['id'] > new_last_event_id:
new_last_event_id = event['id']
# filter heart beats
if 'event' in event.keys():
if 'heartbeat' in event['event']:
results['heartbeats'].append(event)
else:
event_name = None
# filter backups
if 'backup' in event.keys():
if 'id' in event['backup'].keys():
event_name = 'Backup {0}'.format(
event['backup']['id']
)
# filter restores
elif 'restore' in event.keys():
if 'id' in event['restore'].keys():
event_name = 'Restore {0}'.format(
event['restore']['id']
)
if not event_name is None:
if not event_name in results.keys():
results[event_name] = []
results[event_name].append(event)
else:
# Dump everything else based on the size of the results
# so that they enter in order
results[len(results)] = event
return (results, new_last_event_id)
else:
self.log.error('Unable to retrieve latest agent events for agent id ' + str(machine_agent_id) + '. Server returned ' + str(res.status_code) + ': ' + res.text + ' Reason: ' + res.reason)
return ([], last_event_id)
#
# Agent Cleanup
#
def GetAllAgentsForHost(self, cloud_server_name=None, cloud_server_id=None, cloud_server_ips=None):
"""
Retrieve a list (set) of agent identifiers for a given cloud server
cloud_server_name - the name of the cloud server from Rackspace ControlPanel, also available via the bootstrap details and GetAgentDetails()
Returns a set of dictionaries containing the following data:
AgentVersion
Architecture
Flavor
BackupVaultSize
CleanupAllowed
Datacenter
IPAddress
IsDisabled
IsEncrypted
MachineAgentId
MachineName
OperatingSystem
OperatingSystemVersion
PublicKey
Status
TimeOfLastSuccessfulBackup
UseServiceNet
HostServerId
"""
if cloud_server_name is None and cloud_server_id is None and cloud_server_ips is None:
raise ParameterError('Neither Cloud Server Name nor Cloud Server Id (HostServerId) nor Cloud Server IPs were specified. Unable to match a server.')
if self.api_version == 1:
self.ReInit(self.sslenabled, "/v1.0/user/agents")
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
agentlist = list()
try:
usersagentlist = res.json()
for agent in usersagentlist:
self.log.debug('Agent: ' + str(agent))
if (cloud_server_id is not None and
'HostServerId' in agent):
self.log.debug(
'Checking Id Match: {0:} == {1:}'
.format(cloud_server_id,
agent['HostServerId']))
if agent['HostServerId'] == cloud_server_id:
self.log.debug('Id Matched: Adding ' +
str(agent))
agentlist.append(agent)
continue
if (cloud_server_name is not None and
'MachineName' in agent):
self.log.debug(
'Checking Name Match: {0:} == {1:}'
.format(cloud_server_name,
agent['MachineName']))
if agent['MachineName'] == cloud_server_name:
self.log.debug('Name Matched: Adding ' +
str(agent))
agentlist.append(agent)
continue
if (cloud_server_ips is not None and
'IPAddress' in agent):
self.log.debug(
'Checking IP Match: {0:} in {1:}'
.format(agent['IPAddress'],
cloud_server_ips))
if agent['IPAddress'] in cloud_server_ips:
self.log.debug('IP Matched: Adding ' +
str(agent))
agentlist.append(agent)
continue
except LookupError:
self.log.error('Unable to retrieve all agents from the '
'returned agent list')
self.log.error('system response: ' + res.text)
self.log.error('system reason: ' + res.reason)
return agentlist
else:
if cloud_server_name is not None:
self.log.error('Unable to retrieve all agents for cloud '
'server (name: ' + cloud_server_name +
') system return code ' +
str(res.status_code))
if cloud_server_id is not None:
self.log.error('Unable to retrieve all agents for cloud '
'server (id: ' + cloud_server_id +
') system return code ' +
str(res.status_code))
self.log.error('system response: ' + res.text)
self.log.error('system reason: ' + res.reason)
return list()
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents'.format(self.api_version,
self.project_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
res = requests.get(self.Uri, headers=self.Headers)
if res.status_code == 200:
resp_body = res.json()
agentlist = list()
for entry in resp_body['agents']:
if cloud_server_id:
if entry['host']['machine']['id'] == cloud_server_id:
agentlist.append(entry)
continue
if cloud_server_name:
if entry['name'] == cloud_server_name:
agentlist.append(entry)
continue
if cloud_server_ips:
for address in entry['host']['addresses']:
if address['addr'] in cloud_server_ips:
agentlist.append(entry)
continue
return agentlist
else:
if cloud_server_name is not None:
self.log.error('Unable to retrieve all agents for cloud '
'server (name: ' + cloud_server_name +
') system return code ' +
str(res.status_code))
if cloud_server_id is not None:
self.log.error('Unable to retrieve all agents for cloud '
'server (id: ' + cloud_server_id +
') system return code ' +
str(res.status_code))
self.log.error('system response: ' + res.text)
self.log.error('system reason: ' + res.reason)
return list()
def RemoveAgent(self, machine_agent_id):
"""
De-register the agent from the Rackspace Cloud Backup API
"""
if self.api_version == 1:
self.ReInit(self.sslenabled, '/v1.0/agent/delete')
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.o = {}
self.o['MachineAgentId'] = machine_agent_id
self.body = json.dumps(self.o)
res = requests.post(self.Uri, headers=self.Headers, data=self.Body)
if res.status_code == 204:
self.log.info('Removed agent id ' + str(machine_agent_id))
self.log.warn('Please restart the process to lookup this agent again as the agent id may have changed.')
return True
else:
self.log.error('Unable to remove agent id ' + str(machine_agent_id) + ' system return code ' + str(res.status_code) + ' Reason: ' + res.reason)
return False
else:
self.ReInit(self.sslenabled,
'/v{0}/{1}/agents/{2}'.format(self.api_version,
self.project_id,
machine_agent_id))
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
res = requests.delete(self.Uri, headers=self.Headers)
if res.status_code == 204:
self.log.info('Removed agent id ' + str(machine_agent_id))
self.log.warn('Please restart the process to lookup this '
'agent again as the agent id may have changed.')
return True
else:
self.log.error('Unable to remove agent id ' +
str(machine_agent_id) + ' system return code ' +
str(res.status_code) + ' Reason: ' + res.reason)
return False
def RemoveAllAgentsForHost(self, agent_list):
"""
Remove all agents in the system registered to the same user using the same host server id
host_server_id - the host server id to remove agents from,
"""
agents_removed = []
for agent in agent_list:
if self.RemoveAgent(agent['MachineAgentId']):
agents_removed.append(agent['MachineAgentId'])
return agents_removed
def EnableDisableAgent(self, machine_agent_id, enabled=True):
"""
Enable or Disable an agent
"""
# TODO: update for v2 API
self.ReInit(self.sslenabled, "/v1.0/agent/enable")
self.headers['X-Auth-Token'] = self.authenticator.AuthToken
self.headers['Content-Type'] = 'application/json; charset=utf-8'
self.o = {}
self.o['MachineAgentId'] = machine_agent_id
self.o['Enable'] = enabled
self.body = json.dumps(self.o)
res = requests.post(self.Uri, headers=self.Headers, data=self.Body)
if res.status_code == 204:
# success
self.log.info('Changed Agent Status - Machine Agent Id: {0:}, Enabled: {1:}'.format(machine_agent_id, enabled))
return True
elif res.status_code == 401:
# bad credentials
self.log.warn('Invalid AuthToken')
return False
elif res.status_code == 403:
# no permissions
self.log.warn('User does not have permission to enable/disable this system.')
return False
else:
# other issue - 400, 500, 503, or something else
self.log.error('Error (code: {0:}): {1:}'.format(res.status_code, res.text))
return False
| 40.915699 | 324 | 0.530412 | 8,202 | 81,054 | 5.118264 | 0.098756 | 0.022511 | 0.03535 | 0.01334 | 0.547904 | 0.498333 | 0.453168 | 0.426298 | 0.388566 | 0.374083 | 0 | 0.008946 | 0.375231 | 81,054 | 1,980 | 325 | 40.936364 | 0.820043 | 0.171503 | 0 | 0.492378 | 0 | 0.001524 | 0.142773 | 0.0095 | 0.000762 | 0 | 0 | 0.005051 | 0 | 1 | 0.058689 | false | 0.003049 | 0.010671 | 0 | 0.170732 | 0.000762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac87337edea7980e0ccd0e7caedefd311d386c3 | 987 | py | Python | tests/test_graphql.py | timfeirg/flask-graphene-boilerplate | fea654f76bd38ed46effe36fb644d4b2ce27bd0f | [
"MIT"
] | 8 | 2018-04-06T12:50:10.000Z | 2021-07-09T11:50:28.000Z | tests/test_graphql.py | timfeirg/flask-graphene-boilerplate | fea654f76bd38ed46effe36fb644d4b2ce27bd0f | [
"MIT"
] | null | null | null | tests/test_graphql.py | timfeirg/flask-graphene-boilerplate | fea654f76bd38ed46effe36fb644d4b2ce27bd0f | [
"MIT"
] | 2 | 2021-01-29T14:43:24.000Z | 2021-06-24T07:54:27.000Z | import json
def dumpdump(s):
"""double json.dumps string"""
return json.dumps(json.dumps(s))
def test_crud(graphene_client):
# test create object
sample_key = 'whatever'
sample_value = {'foo': 'bar'}
query = '''
mutation testCreateItem {
createItem(key: %s, value: %s) {
ok
item {
key
value
}
}
}
''' % (json.dumps(sample_key), dumpdump(sample_value))
res = graphene_client.execute(query)
assert res['data']['createItem']['ok'] is True
created_item = res['data']['createItem']['item']
assert created_item['key'] == sample_key
assert json.loads(created_item['value']) == sample_value
# test delete object
query = '''
mutation testDeleteItem {
deleteItem(key: %s) {
ok
}
}
''' % (json.dumps(sample_key))
res = graphene_client.execute(query)
assert res['data']['deleteItem']['ok'] is True
| 24.675 | 60 | 0.56231 | 107 | 987 | 5.056075 | 0.35514 | 0.083179 | 0.055453 | 0.066543 | 0.155268 | 0.155268 | 0.155268 | 0.155268 | 0 | 0 | 0 | 0 | 0.29382 | 987 | 39 | 61 | 25.307692 | 0.776184 | 0.06383 | 0 | 0.193548 | 0 | 0 | 0.381679 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.064516 | false | 0 | 0.032258 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fac919db5169d18f0fdc7a8a783370177da14510 | 2,226 | py | Python | src/resize.py | MikeLrUC/IA-Project | 12d1ec80a8253b5493ea928809e3357dfe5d596e | [
"MIT"
] | null | null | null | src/resize.py | MikeLrUC/IA-Project | 12d1ec80a8253b5493ea928809e3357dfe5d596e | [
"MIT"
] | null | null | null | src/resize.py | MikeLrUC/IA-Project | 12d1ec80a8253b5493ea928809e3357dfe5d596e | [
"MIT"
] | 1 | 2022-02-19T23:43:19.000Z | 2022-02-19T23:43:19.000Z | import os
import cv2 # OpenCV lib for image manipulation
RAW_ROOT = "../data/PVTL_dataset/"
PCD_ROOT = "../data/processed/"
TRAIN_PATH = "train/"
VAL_PATH = "val/"
def get_image_names(path_type):
return [f"{path_type}{img_class}/{image}" for img_class in os.listdir(path_type) for image in os.listdir(f"{path_type}{img_class}")]
def get_sizes(images):
widths, heights = [], []
for image in images:
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
widths.append(img.shape[1])
heights.append(img.shape[0])
return widths, heights
def resize_and_pad_images(images, w_target, h_target):
for image in images:
# Image
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
# Getting minimum ratio (to maintain image format)
ratio_w = w_target / img.shape[1]
ratio_h = h_target / img.shape[0]
desired_ratio = min(ratio_w, ratio_h)
# Resizing
desired_size = [int(desired_ratio * img.shape[0]), int(desired_ratio * img.shape[1])]
resized = cv2.resize(img, (desired_size[1], desired_size[0]))
# Padding and Centering Image
delta_w = w_target - desired_size[1]
delta_h = h_target - desired_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
padded = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0])
# Writing Image to Processed Path
cv2.imwrite(PCD_ROOT + image[len(RAW_ROOT):], padded)
if __name__ == "__main__":
# Getting Training and Validation Images
train_images = get_image_names(RAW_ROOT + TRAIN_PATH)
validation_images = get_image_names(RAW_ROOT + VAL_PATH)
total_images = train_images + validation_images
# Getting Maximum sizes
widths, heights = get_sizes(total_images)
w_max, h_max = max(widths), max(heights)
w_max, h_max = 224, 224 # Hardcoded
# Resizing and Padding Images to maximum Width and Height, maintaining picture ratio
resize_and_pad_images(total_images, w_max, h_max)
print(f"Resized and Padded Images to w,h = ({w_max}, {h_max})") # (388, 884)
| 37.1 | 136 | 0.657682 | 320 | 2,226 | 4.3125 | 0.278125 | 0.034783 | 0.014493 | 0.023188 | 0.173913 | 0.115942 | 0.050725 | 0 | 0 | 0 | 0 | 0.022157 | 0.22956 | 2,226 | 59 | 137 | 37.728814 | 0.782507 | 0.144654 | 0 | 0.105263 | 0 | 0 | 0.085624 | 0.038584 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0.026316 | 0.184211 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
facbc305d420d17489ac86c0fe411bc028552bc2 | 2,622 | py | Python | roughtime/timeserver/gps_time_daemon/ntptimesource.py | matzf/scion-apps | 5927b00b3f97697b268a34cada5d683d36ed5dae | [
"Apache-2.0"
] | 3 | 2018-02-22T16:50:42.000Z | 2018-06-15T12:50:23.000Z | roughtime/timeserver/gps_time_daemon/ntptimesource.py | matzf/scion-apps | 5927b00b3f97697b268a34cada5d683d36ed5dae | [
"Apache-2.0"
] | 21 | 2017-11-24T16:22:19.000Z | 2018-08-30T06:27:22.000Z | roughtime/timeserver/gps_time_daemon/ntptimesource.py | matzf/scion-apps | 5927b00b3f97697b268a34cada5d683d36ed5dae | [
"Apache-2.0"
] | 12 | 2017-11-23T08:20:10.000Z | 2018-07-26T14:37:58.000Z | import threading
import ntplib
from time import ctime
from datetime import datetime
from dateutil import tz
from dateutil.tz import tzlocal
def query_ntp_server(server_url, request_timeout, result_handler):
client = ntplib.NTPClient()
try:
response = client.request(server_url, version=3, timeout=request_timeout)
result_handler(response)
except:
print("Error getting time from ntp server: %s" % (server_url))
class TimeResult:
def __init__(self):
self.responses=[]
self.response_lock=threading.Lock()
def response_received(self, response):
self.response_lock.acquire()
self.responses.append(response)
self.response_lock.release()
def get_time(self):
times=[]
for r in self.responses:
times.append(r.tx_time)
return self._find_max_window_time(times)
def _find_max_window_time(self, obtained_times):
""" We take time that has largest number of occurrences, within delta """
if len(obtained_times)==0:
return 0, 0
obtained_times.sort()
start=0
end=0
max_window=0
t=0
while end<len(obtained_times):
delta=obtained_times[end]-obtained_times[start]
if delta <= NTPTimeSource.MAX_DELTA_SEC:
end=end+1
window_size=end-start
if window_size>max_window:
max_window=window_size
t=obtained_times[start]
else:
start=start+1
return t, max_window
class NTPTimeSource:
MAX_DELTA_SEC=2
def __init__(self, ntp_servers, request_timeout):
self.servers=ntp_servers
self.timeout=request_timeout
def get_ntp_time(self):
""" Query all ntp servers in different threads, take the time that has most occurrences """
response=TimeResult()
workers=[]
for server in self.servers:
t=threading.Thread(target=query_ntp_server, args=(server, self.timeout, response.response_received, ))
t.start()
workers.append(t)
for worker in workers:
worker.join()
timestamp, server_num = response.get_time()
return datetime.fromtimestamp(timestamp).replace(tzinfo=tzlocal()), server_num
if __name__ == "__main__":
print("Sending query to NTP servers")
ntp_servers=["0.pool.ntp.org", "3.ch.pool.ntp.org", "3.europe.pool.ntp.org", "europe.pool.ntp.org"]
ntp_source=NTPTimeSource(ntp_servers, 5)
t, server_num = ntp_source.get_ntp_time()
print(t)
| 29.133333 | 114 | 0.638063 | 328 | 2,622 | 4.868902 | 0.310976 | 0.056982 | 0.025047 | 0.033813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007829 | 0.26926 | 2,622 | 89 | 115 | 29.460674 | 0.825679 | 0.057208 | 0 | 0 | 0 | 0 | 0.058991 | 0.008544 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.089552 | 0 | 0.298507 | 0.044776 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
facd051b66ae50bb7afdfaf52a9dd8b00737ecd8 | 3,707 | py | Python | binary_tree/functions.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | 20 | 2021-12-03T13:20:17.000Z | 2022-03-20T18:58:06.000Z | binary_tree/functions.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | null | null | null | binary_tree/functions.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | null | null | null | import torch
import sys
sys.path.append('../')
from binary_tree.utils import BinaryTree, build_tree
from estimators import uniform_to_exp
def bin_tree_struct(exp, lengths=None, **kwargs):
'''
Defines F_struct for binary tree
Applies the divide and conquer algorithm from the paper
Input
--------------------
exp : torch.Tensor | batch_size x dim |
Contains a batch of arrays
lengths : torch.Tensor | batch_size |
Contains lengths of arrays in the batch (lengths[i] <= dim)
**kwargs : Needed to support usage of different F_struct in the estimators' implementation
Output
--------------------
struct_var : BinaryTree (defined in binary_tree.utils)
'''
batch_size = exp.shape[0]
dim = exp.shape[1]
masks = -torch.log(torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1))
trees = []
heights = torch.zeros(batch_size)
for batch_idx in range(batch_size):
if lengths is None:
right = dim
else:
right = lengths[batch_idx].item()
left = 0
level = 0
tree = build_tree(batch_idx, exp, left, right, level, masks, heights)
trees.append(tree)
struct_var = BinaryTree(masks, trees, heights)
return struct_var
def bin_tree_log_prob(struct_var, logits, **kwargs):
'''
Defines F_log_prob for binary tree
Calculates the log probability log(p(X)) of the binary tree
Note: here the execution trace is in one-to-one correspondance with the binary tree itself
Input
--------------------
struct_var : BinaryTree (defined in binary_tree.utils)
logits : torch.Tensor | batch_size x dim |
Contains parameters (log(mean)) of the exponential distributions of elements in arrays
**kwargs : Needed to support usage of different F_log_prob in the estimators' implementation
Output
--------------------
log_prob : torch.Tensor | batch_size |
Contains log probabilities of the binary trees
'''
batch_size = logits.shape[0]
dim = logits.shape[1]
logits_expanded = logits.unsqueeze(1).repeat((1, dim, 1))
masked_logits = struct_var.masks + logits_expanded
log_probs = -logits - torch.logsumexp(-masked_logits, dim=-1)
return log_probs.sum(dim=-1)
def bin_tree_cond(struct_var, logits, uniform, **kwargs):
'''
Defines F_cond for arborescence
Samples from the conditional distribution p(E | T) of exponentials given the execution trace
Input
--------------------
struct_var : BinaryTree (defined in binary_tree.utils)
logits : torch.Tensor | batch_size x dim |
Contains parameters (log(mean)) of the exponential distributions of elements in arrays
uniform : torch.Tensor | batch_size x dim |
Contains realizations of the independent uniform variables, that will be transformed to conditional samples
**kwargs : Needed to support usage of different F_cond in the estimators' implementation
Output
--------------------
cond_exp : torch.Tensor | batch_size x dim |
Contains conditional samples from p(E | X) = p(E | T)
'''
batch_size = logits.shape[0]
dim = logits.shape[1]
logits_expanded = logits.unsqueeze(1).repeat((1, dim, 1))
masked_logits = struct_var.masks + logits_expanded
min_logits = -torch.logsumexp(-masked_logits, dim=-1)
minimums = uniform_to_exp(logits=min_logits, uniform=uniform)
bin_mask = torch.exp(-struct_var.masks)
cond_exp = (minimums.unsqueeze(-1).repeat((1, 1, dim)) * bin_mask).sum(dim=1)
return cond_exp
| 33.098214 | 125 | 0.641489 | 482 | 3,707 | 4.790456 | 0.238589 | 0.050671 | 0.048506 | 0.060632 | 0.444348 | 0.374621 | 0.374621 | 0.32958 | 0.231269 | 0.231269 | 0 | 0.008587 | 0.246021 | 3,707 | 111 | 126 | 33.396396 | 0.817531 | 0.517939 | 0 | 0.205128 | 0 | 0 | 0.001879 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.102564 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
facd77efc7419405e7d0573742a3ad604a3fd07b | 569 | py | Python | python365/dash_app.py | JonasPC/WebApp_dummy | d12fc804eda42dec495d828c85fe504861fd903b | [
"MIT"
] | null | null | null | python365/dash_app.py | JonasPC/WebApp_dummy | d12fc804eda42dec495d828c85fe504861fd903b | [
"MIT"
] | 2 | 2021-03-31T18:55:10.000Z | 2021-12-13T19:49:21.000Z | python365/dash_app.py | JonasPC/WebApp_dummy | d12fc804eda42dec495d828c85fe504861fd903b | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
from flask import Flask
server = Flask(__name__)
app = dash.Dash(server=server)
app.css.append_css({
'external_url': (
'https://cdn.rawgit.com/chriddyp/0247653a7c52feb4c48437e1c1837f75'
'/raw/a68333b876edaf62df2efa7bac0e9b3613258851/dash.css'
)
})
app.layout = html.Div([
html.H1(children='Hello Dash'),
html.Div(children='Dash: A web application framework for Python.')
])
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0')
| 22.76 | 74 | 0.718805 | 74 | 569 | 5.27027 | 0.567568 | 0.076923 | 0.015385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 0.154657 | 569 | 24 | 75 | 23.708333 | 0.702703 | 0 | 0 | 0 | 0 | 0 | 0.351494 | 0.094903 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faceab5bf72239cbfa42db14f25288736d2a6601 | 1,022 | py | Python | examples/drawing/draw_many.py | xxao/pero | a7f0c84fae0b21fe120204e798bd61cdab3a125d | [
"MIT"
] | 13 | 2019-07-15T17:51:21.000Z | 2022-03-15T06:13:43.000Z | examples/drawing/draw_many.py | xxao/pero | a7f0c84fae0b21fe120204e798bd61cdab3a125d | [
"MIT"
] | 1 | 2021-12-29T00:46:44.000Z | 2022-01-21T16:18:48.000Z | examples/drawing/draw_many.py | xxao/pero | a7f0c84fae0b21fe120204e798bd61cdab3a125d | [
"MIT"
] | 3 | 2020-09-27T14:31:45.000Z | 2022-01-22T14:28:15.000Z | # Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
import pero
import numpy
# init size
width = 400
height = 300
padding = 50
# init data
x_data = numpy.linspace(-numpy.pi, numpy.pi, 50)
y_data = numpy.sin(x_data)
# init scales
x_scale = pero.LinScale(
in_range = (min(x_data), max(x_data)),
out_range = (padding, width-padding))
y_scale = pero.LinScale(
in_range = (-1, 1),
out_range = (height-padding, padding))
color_scale = pero.GradientScale(
in_range = (-1, 1),
out_range = pero.colors.Spectral)
# init marker
marker = pero.Circle(
size = 8,
x = lambda d: x_scale.scale(d[0]),
y = lambda d: y_scale.scale(d[1]),
line_color = lambda d: color_scale.scale(d[1]).darker(.2),
fill_color = lambda d: color_scale.scale(d[1]))
# init image
image = pero.Image(width=width, height=height)
# fill
image.fill("w")
# draw points
marker.draw_many(image, zip(x_data, y_data))
# show image
image.show()
| 21.291667 | 63 | 0.641879 | 155 | 1,022 | 4.083871 | 0.367742 | 0.039494 | 0.06951 | 0.056872 | 0.210111 | 0.14534 | 0.091627 | 0.091627 | 0 | 0 | 0 | 0.025126 | 0.221135 | 1,022 | 47 | 64 | 21.744681 | 0.770101 | 0.151663 | 0 | 0.076923 | 0 | 0 | 0.001238 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fad681be96329f06b12884a3b3c5e3a09957a82d | 703 | py | Python | SuperGlue/AttentionalPropagation.py | fun-math/SLAM_with_ML | be5cc996baac3d67e2c65e60fadc6bada3f80b42 | [
"MIT"
] | 1 | 2021-08-19T06:55:53.000Z | 2021-08-19T06:55:53.000Z | SuperGlue/AttentionalPropagation.py | fun-math/SLAM_with_ML | be5cc996baac3d67e2c65e60fadc6bada3f80b42 | [
"MIT"
] | null | null | null | SuperGlue/AttentionalPropagation.py | fun-math/SLAM_with_ML | be5cc996baac3d67e2c65e60fadc6bada3f80b42 | [
"MIT"
] | null | null | null | import tensorflow as tf
from MultiHeadAttention import *
from MLP import *
class AttentionalPropagation(tf.keras.layers.Layer):
def __init__(self, feature_dim, num_heads):
super(AttentionalPropagation,self).__init__()
self.attention = MultiHeadAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
# tf.zeros_like(self.mlp[-1].bias) Set bias to zero
def call(self, x, source):
msg = self.attention(x, source, source)
return self.mlp(tf.concat([x,msg], axis=1))
if __name__=='__main__':
layer=AttentionalPropagation(256,4)
x=tf.random.normal(shape=(2,256,4))
y=tf.random.normal(shape=(2,256,5))
print(layer(x,y).shape)
| 30.565217 | 63 | 0.719772 | 104 | 703 | 4.634615 | 0.451923 | 0.103734 | 0.045643 | 0.074689 | 0.161826 | 0.095436 | 0 | 0 | 0 | 0 | 0 | 0.029703 | 0.13798 | 703 | 22 | 64 | 31.954545 | 0.765677 | 0.069701 | 0 | 0 | 0 | 0 | 0.012289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.4375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fad77b7c1138a226935346eeaa3c8ba842dc09b3 | 1,683 | py | Python | dynamodb_doctor/relationships/test_many.py | c-py/dynamodb_doctor | 966a5844b92206b2a66e139cdbe4a274579be650 | [
"MIT"
] | null | null | null | dynamodb_doctor/relationships/test_many.py | c-py/dynamodb_doctor | 966a5844b92206b2a66e139cdbe4a274579be650 | [
"MIT"
] | null | null | null | dynamodb_doctor/relationships/test_many.py | c-py/dynamodb_doctor | 966a5844b92206b2a66e139cdbe4a274579be650 | [
"MIT"
] | null | null | null | import pytest
import aioboto3
from dynamodb_doctor import Model, String, Many
from dynamodb_doctor.exceptions import MissingAttributeException
ENDPOINT_URL = "http://localhost:58000"
@pytest.mark.asyncio
async def test_model_with_many_to_model_relationship(table_fixture):
class TestModelA(Model):
name = String()
class Meta:
table = table_fixture
class TestModelB(Model):
relation = Many(TestModelA)
class Meta:
table = table_fixture
test_model = TestModelB()
await test_model.save()
session = aioboto3.Session()
async with session.resource('dynamodb', endpoint_url=ENDPOINT_URL) as resource:
table = await resource.Table(table_fixture._name)
item = await table.get_item(Key={"pk": test_model._pk, "sk": test_model._sk})
assert("Item" in item)
@pytest.mark.asyncio
async def test_can_add_to_model_with_many_relationship(table_fixture):
class TestModelA(Model):
name = String()
class Meta:
table = table_fixture
class TestModelB(Model):
relation = Many(TestModelA)
class Meta:
table = table_fixture
test_model = TestModelB()
test_model.relation.add(name="number1")
test_model.relation.add({"name": "number2"})
await test_model.save()
test_models = await TestModelB.all()
assert(len(test_models) == 1)
assert(len(test_models[0].relation) == 2)
assert test_models[0].relation[0].name == "number1"
assert test_models[0].relation[1].name == "number2"
@pytest.mark.asyncio
async def test_model_with_many_to_attribute_relationship_fails(table_fixture):
... | 25.5 | 85 | 0.688057 | 206 | 1,683 | 5.378641 | 0.271845 | 0.081227 | 0.076715 | 0.068592 | 0.495487 | 0.40704 | 0.380866 | 0.380866 | 0.380866 | 0.380866 | 0 | 0.013564 | 0.211527 | 1,683 | 66 | 86 | 25.5 | 0.821402 | 0 | 0 | 0.522727 | 0 | 0 | 0.041568 | 0 | 0 | 0 | 0 | 0 | 0.113636 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fad888a3651e1b2e3993ef5bd617082eb49b208a | 1,179 | py | Python | Back-End/Python/MultiProcessing-Threading/MultiProcessing/process_name_2.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 25 | 2021-04-28T02:51:26.000Z | 2022-03-24T13:58:04.000Z | Back-End/Python/MultiProcessing-Threading/MultiProcessing/process_name_2.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 1 | 2022-03-03T23:33:41.000Z | 2022-03-03T23:35:41.000Z | Back-End/Python/MultiProcessing-Threading/MultiProcessing/process_name_2.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 15 | 2021-05-30T01:35:20.000Z | 2022-03-25T12:38:25.000Z | from multiprocessing import Process, current_process
import time
import os
def worker():
name = current_process().name
print('==='*15 + ' < ' + f'{name}' + ' > ' + '==='*15)
time.sleep(1)
print(f'{name} Exiting...')
def worker_1():
name = current_process().name
print('===' * 15 + ' < ' + f'{name}' + ' > ' + '===' * 15)
time.sleep(1)
print(f'{name} Exiting...')
def service_a():
name = current_process().name
print('===' * 15 + ' < ' + f'{name}' + ' > ' + '===' * 15)
time.sleep(1)
print(f'{name} Exiting...')
def service_b():
name = current_process().name
print('===' * 15 + ' < ' + f'{name}' + ' > ' + '===' * 15)
time.sleep(1)
print(f'{name} Exiting...')
if __name__ == '__main__':
serviceA = Process(name='Service A', target=service_a)
serviceB = Process(name='Service B', target=service_b)
worker_one = Process(name='Worker 1', target=worker)
worker_two = Process(name='Worker 2', target=worker_1)
serviceA.start()
serviceA.join()
serviceB.start()
serviceB.join()
worker_one.start()
worker_one.join()
worker_two.start()
worker_two.join() | 21.053571 | 62 | 0.557252 | 141 | 1,179 | 4.48227 | 0.205674 | 0.139241 | 0.113924 | 0.139241 | 0.435127 | 0.435127 | 0.435127 | 0.435127 | 0.435127 | 0.435127 | 0 | 0.026461 | 0.230704 | 1,179 | 56 | 63 | 21.053571 | 0.670342 | 0 | 0 | 0.444444 | 0 | 0 | 0.154237 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.083333 | 0 | 0.194444 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fad8908e9fc92fffbd4a6f5fdab2d8f88c32a5eb | 1,238 | py | Python | main.py | Goneiross/PDFtools | 1625b422a1f5f0fc436c22d36d3d72d232d1c40c | [
"MIT"
] | null | null | null | main.py | Goneiross/PDFtools | 1625b422a1f5f0fc436c22d36d3d72d232d1c40c | [
"MIT"
] | null | null | null | main.py | Goneiross/PDFtools | 1625b422a1f5f0fc436c22d36d3d72d232d1c40c | [
"MIT"
] | null | null | null | import PyPDF4
import slate3k as slate
import os
import logging
logging.propagate = False
logging.getLogger().setLevel(logging.ERROR)
def findWords(documentName) :
with open(documentName, 'rb') as f:
extracted_text = slate.PDF(f)
with open("names.txt") as n:
names = [name.rstrip() for name in n]
pageNumber = len(extracted_text)
print ("Document :", documentName)
print ("Nombre de pages :", pageNumber)
print("-------------------------------------------------")
for name in names :
pages = []
for pageIndex in range (0, len(extracted_text)) :
if (extracted_text[pageIndex].find(name) != -1) :
pages.append(pageIndex + 1)
if (pages != []) :
print ("Mot trouvé :", name)
print (pages)
f.close()
n.close()
def main():
documentNames = []
dir_path = os.path.dirname(os.path.realpath(__file__))
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith('.pdf'):
print(file)
documentNames.append(file)
for documentName in documentNames :
print ("Document :", documentName)
findWords(documentName)
main() | 29.47619 | 62 | 0.571082 | 138 | 1,238 | 5.050725 | 0.42029 | 0.074605 | 0.025825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005556 | 0.273021 | 1,238 | 42 | 63 | 29.47619 | 0.768889 | 0 | 0 | 0.054054 | 0 | 0 | 0.091203 | 0.039548 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.108108 | 0 | 0.162162 | 0.189189 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fad8e5dd6c90574808d1e135153f4d55747e2069 | 624 | py | Python | demo.py | nspin/cloudpiercer | 5bfe05f2950d62999db93e980de0c52b99fddfbe | [
"MIT"
] | 2 | 2020-03-20T22:50:00.000Z | 2020-06-12T21:13:37.000Z | demo.py | nspin/cloudpiercer | 5bfe05f2950d62999db93e980de0c52b99fddfbe | [
"MIT"
] | 8 | 2020-04-06T17:43:22.000Z | 2022-02-17T08:25:06.000Z | demo.py | nspin/cloudpiercer | 5bfe05f2950d62999db93e980de0c52b99fddfbe | [
"MIT"
] | 1 | 2020-03-20T22:50:01.000Z | 2020-03-20T22:50:01.000Z | import asyncio
from argparse import ArgumentParser
from aiohttp import ClientSession
from cloudpiercer import CloudPiercer
SOLVER_ENDPOINT = 'http://localhost:8081/solve'
def main():
parser = ArgumentParser()
parser.add_argument('url')
args = parser.parse_args()
cloudpiercer = CloudPiercer(SOLVER_ENDPOINT)
async def go():
async with ClientSession() as sess:
resp, text = await cloudpiercer.fetch(sess, args.url, with_text=True)
print(resp)
print(text)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
if __name__ == '__main__':
main()
| 24 | 81 | 0.695513 | 73 | 624 | 5.712329 | 0.561644 | 0.086331 | 0.1247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008081 | 0.206731 | 624 | 25 | 82 | 24.96 | 0.834343 | 0 | 0 | 0 | 0 | 0 | 0.060897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.263158 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fadb4c879e499931d60f72ed2be27736908649d8 | 4,074 | py | Python | pyramid_swagger/ingest.py | gchin/pyramid_swagger | 97bd662e3731bda0e29677915457ec2e3b697495 | [
"BSD-3-Clause"
] | null | null | null | pyramid_swagger/ingest.py | gchin/pyramid_swagger | 97bd662e3731bda0e29677915457ec2e3b697495 | [
"BSD-3-Clause"
] | null | null | null | pyramid_swagger/ingest.py | gchin/pyramid_swagger | 97bd662e3731bda0e29677915457ec2e3b697495 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import simplejson
from .load_schema import load_schema
from .model import SwaggerSchema
from .spec import validate_swagger_schemas
API_DOCS_FILENAME = 'api_docs.json'
class ResourceListingNotFoundError(Exception):
pass
class ApiDeclarationNotFoundError(Exception):
pass
def find_resource_names(api_docs_json):
return [
api['path'].lstrip('/')
for api in api_docs_json['apis']
]
def build_schema_mapping(schema_dir):
"""Discovers schema file locations and relations.
:param schema_dir: the directory schema files live inside
:type schema_dir: string
:returns: A tuple of (resource listing filepath, mapping) where the mapping
is between resource name and file path
:rtype: (string, dict)
"""
def resource_name_to_filepath(name):
return os.path.join(schema_dir, '{0}.json'.format(name))
listing, listing_json = _load_resource_listing(schema_dir)
return (
listing,
dict(
(resource, resource_name_to_filepath(resource))
for resource in find_resource_names(listing_json)
)
)
def _load_resource_listing(schema_dir):
"""Load the resource listing from file, handling errors.
:param schema_dir: the directory schema files live inside
:type schema_dir: string
:returns: (resource listing filepath, resource listing json)
"""
resource_listing = os.path.join(schema_dir, API_DOCS_FILENAME)
try:
with open(resource_listing) as resource_listing_file:
resource_listing_json = simplejson.load(resource_listing_file)
# If not found, raise a more user-friendly error.
except IOError:
raise ResourceListingNotFoundError(
'No resource listing found at {0}. Note that your json file '
'must be named {1}'.format(resource_listing, API_DOCS_FILENAME)
)
return resource_listing, resource_listing_json
def compile_swagger_schema(schema_dir, should_validate_schemas):
"""Build a SwaggerSchema from various files.
:param schema_dir: the directory schema files live inside
:type schema_dir: string
:param should_validate_schemas: if True, check schemas for correctness
:type should_validate_schemas: boolean
:returns: a SwaggerSchema object
"""
listing, mapping = build_schema_mapping(schema_dir)
schema_resolvers = ingest_resources(
listing,
mapping,
schema_dir,
should_validate_schemas,
)
return SwaggerSchema(
listing,
mapping,
schema_resolvers,
)
def ingest_resources(listing, mapping, schema_dir, should_validate_schemas):
"""Consume the Swagger schemas and produce a queryable datastructure.
:param listing: Filepath to a resource listing
:type listing: string
:param mapping: Map from resource name to filepath of its api declaration
:type mapping: dict
:param schema_dir: the directory schema files live inside
:type schema_dir: string
:param should_validate_schemas: if True, check schemas for correctness
:type should_validate_schemas: boolean
:returns: A list of SchemaAndResolver objects
"""
resource_filepaths = mapping.values()
ingested_resources = []
for name, filepath in mapping.items():
try:
ingested_resources.append(load_schema(filepath))
# If we have trouble reading any files, raise a more user-friendly
# error.
except IOError:
raise ApiDeclarationNotFoundError(
'No api declaration found at {0}. Attempted to load the `{1}` '
'resource relative to the schema_directory `{2}`. Perhaps '
'your resource name and API declaration file do not '
'match?'.format(filepath, name, schema_dir)
)
if should_validate_schemas:
validate_swagger_schemas(
listing,
resource_filepaths
)
return ingested_resources
| 30.863636 | 79 | 0.691213 | 481 | 4,074 | 5.640333 | 0.272349 | 0.059713 | 0.061924 | 0.025065 | 0.316992 | 0.251382 | 0.251382 | 0.251382 | 0.251382 | 0.174714 | 0 | 0.002262 | 0.240304 | 4,074 | 131 | 80 | 31.099237 | 0.874313 | 0.332106 | 0 | 0.171429 | 0 | 0 | 0.108536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0.028571 | 0.085714 | 0.028571 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fadd44baffd42de642862a0a9c740b7396f48977 | 4,038 | py | Python | pxr/usd/plugin/usdAbc/testenv/testUsdAbcConversionHermiteCurves.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 3,680 | 2016-07-26T18:28:11.000Z | 2022-03-31T09:55:05.000Z | pxr/usd/plugin/usdAbc/testenv/testUsdAbcConversionHermiteCurves.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 1,759 | 2016-07-26T19:19:59.000Z | 2022-03-31T21:24:00.000Z | pxr/usd/plugin/usdAbc/testenv/testUsdAbcConversionHermiteCurves.py | DougRogers-DigitalFish/USD | d8a405a1344480f859f025c4f97085143efacb53 | [
"BSD-2-Clause"
] | 904 | 2016-07-26T18:33:40.000Z | 2022-03-31T09:55:16.000Z | #!/pxrpythonsubst
#
# Copyright 2020 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import unittest
from pxr import Gf
from pxr import Usd
from pxr import UsdAbc
from pxr import UsdGeom
class TestUsdAbcConversionHermiteCurves(unittest.TestCase):
@classmethod
def setUpClass(cls):
usdFile = 'original.usda'
abcFile = 'converted.abc'
UsdAbc._WriteAlembic(usdFile, abcFile)
cls.stage = Usd.Stage.Open(abcFile)
def _assertElementsAlmostEqual(self, seq1, seq2):
self.assertTrue(all(Gf.IsClose(e1, e2, 1e-5)
for e1, e2 in zip(seq1, seq2)))
def _assertEmpty(self, sequence):
self.assertFalse(sequence)
def test_RoundTripHermite(self):
time = Usd.TimeCode.EarliestTime()
prim = self.stage.GetPrimAtPath('/Cubic/Ribbons/VaryingWidth')
schema = UsdGeom.HermiteCurves(prim)
# Interpolation metadata
normalsInterpolation = schema.GetNormalsInterpolation()
widthsInterpolation = schema.GetWidthsInterpolation()
self.assertEqual(normalsInterpolation, UsdGeom.Tokens.varying)
self.assertEqual(widthsInterpolation, UsdGeom.Tokens.varying)
# These attributes may be varying time sampled
curveVertexCounts = schema.GetCurveVertexCountsAttr().Get(time)
points = schema.GetPointsAttr().Get(time)
tangents = schema.GetTangentsAttr().Get(time)
widths = schema.GetWidthsAttr().Get(time)
normals = schema.GetNormalsAttr().Get(time)
self._assertElementsAlmostEqual(
points, [(0, 0, 0), (1, 1, 0), (2, 0, 0)])
self._assertElementsAlmostEqual(
tangents, [(0, 1, 0), (1, 0, 0), (0, -1, 0)])
self._assertElementsAlmostEqual(widths, [0, .5, 0])
self._assertElementsAlmostEqual(
normals, [(0, 0, 1), (0, 0, 1), (0, 0, 1)])
self.assertEqual(list(curveVertexCounts), [3])
def test_RoundTripHermiteWithVelocities(self):
"""Round tripping velocities is ambiguous"""
time = Usd.TimeCode.EarliestTime()
prim = self.stage.GetPrimAtPath('/Cubic/Tubes/WithVelocities')
schema = UsdGeom.HermiteCurves(prim)
# Interpolation metadata
widthsInterpolation = schema.GetWidthsInterpolation()
self.assertEqual(widthsInterpolation, UsdGeom.Tokens.varying)
# These attributes may be varying time sampled
curveVertexCounts = schema.GetCurveVertexCountsAttr().Get(time)
points = schema.GetPointsAttr().Get(time)
velocities = schema.GetVelocitiesAttr().Get(time)
tangents = schema.GetTangentsAttr().Get(time)
widths = schema.GetWidthsAttr().Get(time)
self._assertElementsAlmostEqual(points, [(0, 0, 0), (1, 1, 0)])
self._assertElementsAlmostEqual(tangents, [(0, 1, 0), (1, 0, 0)])
self._assertElementsAlmostEqual(widths, [0, .5])
self._assertEmpty(velocities)
self.assertEqual(list(curveVertexCounts), [2])
if __name__ == '__main__':
unittest.main()
| 39.588235 | 74 | 0.689946 | 455 | 4,038 | 6.076923 | 0.391209 | 0.007957 | 0.040506 | 0.005787 | 0.409403 | 0.353345 | 0.286076 | 0.286076 | 0.286076 | 0.244123 | 0 | 0.02101 | 0.210253 | 4,038 | 101 | 75 | 39.980198 | 0.846033 | 0.298167 | 0 | 0.339286 | 0 | 0 | 0.031406 | 0.019272 | 0 | 0 | 0 | 0 | 0.303571 | 1 | 0.089286 | false | 0 | 0.089286 | 0 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fade7ee830d7c849f1246e21fdd7444efaa21bfb | 3,218 | py | Python | testsuite/tests/Q226-006__UT_bad_file_checker/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | 2 | 2017-10-22T18:04:26.000Z | 2020-03-06T11:07:41.000Z | testsuite/tests/Q226-006__UT_bad_file_checker/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | null | null | null | testsuite/tests/Q226-006__UT_bad_file_checker/run_test.py | AdaCore/style_checker | 17108ebfc44375498063ecdad6c6e4430458e60a | [
"CNRI-Python"
] | 4 | 2018-05-22T12:08:54.000Z | 2020-12-14T15:25:27.000Z | import pytest
def test_bad_file_checker(style_checker):
"""Check behavior when pep8 is missing
"""
style_checker.enable_unit_test()
# Derive the TypificChecker class without providing the mandatory
# methods which are otherwise abstract.
from asclib.checkers.typific import TypificChecker, TypificCheckerInfo
from asclib.checkers.rulific.all_checkers import ALL_RULIFIC_CHECKERS
class BadFileChecker(TypificChecker):
# Do provide a complete rulific_decision_map attribute, though,
# as the contents of that dictionary is checked during
# the object's initialization.
rulific_decision_map = dict(
(checker.RULE_CONFIG_NAME, False)
for checker in ALL_RULIFIC_CHECKERS)
# Same the typific_info attribute...
typific_info = TypificCheckerInfo(comment_line_re='#',
ada_RM_spec_p=False,
copyright_box_r_edge_re=None)
bad_checker = BadFileChecker('src/simple.py', None)
# Now verify that calling those methods cause an exception.
from asclib.checkers import FileCheckerError
with pytest.raises(FileCheckerError) as cm:
print(bad_checker.file_type)
expected_output = \
'abstract TypificChecker.file_type property unexpectedly called.'
style_checker.assertOutputEqual(expected_output, str(cm.value))
with pytest.raises(FileCheckerError) as cm:
bad_checker.run_external_checker()
expected_output = \
'abstract TypificChecker.run_external_checker method' \
' unexpectedly called.'
style_checker.assertOutputEqual(expected_output, str(cm.value))
def test_missing_entry_in_rulific_decision_map(style_checker):
"""Test when missing an entry in rulific_decision_map.
"""
style_checker.enable_unit_test()
# Derive the TypificChecker class only providing some of
# the mandatory overrides. In particular, only provide
# an incomplete rulific_decision_map attribute, so as to
# make sure we get an error when trying to instantiate
# that broken class.
#
# We also need to provide the file_type attribute as it is used
# to produce a human-readable error message.
from asclib.checkers.typific import TypificChecker, TypificCheckerInfo
from asclib.checkers.rulific.all_checkers import ALL_RULIFIC_CHECKERS
class IncompleteFileChecker(TypificChecker):
rulific_decision_map = dict(
(checker.RULE_CONFIG_NAME, False)
for checker in ALL_RULIFIC_CHECKERS[:-1])
typific_info = TypificCheckerInfo(comment_line_re='#',
ada_RM_spec_p=False,
copyright_box_r_edge_re=None)
file_type = 'Python script'
# Now verify that calling those methods cause a failed
# assertion.
with pytest.raises(AssertionError) as cm:
IncompleteFileChecker('src/simple.py', None)
expected_output = \
'Python script checker missing config about' \
' %s rule' % ALL_RULIFIC_CHECKERS[-1].RULE_CONFIG_NAME
style_checker.assertOutputEqual(expected_output, str(cm.value))
| 41.792208 | 74 | 0.697017 | 374 | 3,218 | 5.76738 | 0.358289 | 0.038943 | 0.05007 | 0.05146 | 0.502086 | 0.502086 | 0.468707 | 0.405656 | 0.381085 | 0.331015 | 0 | 0.001227 | 0.240211 | 3,218 | 76 | 75 | 42.342105 | 0.880982 | 0.261342 | 0 | 0.55814 | 0 | 0 | 0.09617 | 0.025106 | 0 | 0 | 0 | 0 | 0.093023 | 1 | 0.046512 | false | 0 | 0.139535 | 0 | 0.348837 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fae0351c02946227f272f5e18e6321f3f13a21fe | 10,980 | py | Python | train.py | francois-rozet/amsi | 0eb1bdfdf2fec37568fb03189d3a51cb794dcac8 | [
"MIT"
] | 1 | 2021-01-27T17:34:20.000Z | 2021-01-27T17:34:20.000Z | train.py | francois-rozet/amsi | 0eb1bdfdf2fec37568fb03189d3a51cb794dcac8 | [
"MIT"
] | null | null | null | train.py | francois-rozet/amsi | 0eb1bdfdf2fec37568fb03189d3a51cb794dcac8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import h5py
import json
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from datetime import datetime
from tqdm import tqdm
from typing import List, Tuple
import amnre
from amnre.simulators.slcp import SLCP
from amnre.simulators.gw import GW
from amnre.simulators.hh import HH
def build_embedding(input_size: torch.Size, arch: str = None, **kwargs) -> tuple:
flatten = nn.Flatten(-len(input_size))
if arch == 'MLP':
net = amnre.MLP(input_size.numel(), **kwargs)
return nn.Sequential(flatten, net), net.output_size
elif arch == 'ResNet':
net = amnre.ResNet(input_size.numel(), **kwargs)
return nn.Sequential(flatten, net), net.output_size
else:
return flatten, input_size.numel()
def build_instance(settings: dict) -> tuple:
# Simulator
live = None
if settings['simulator'] == 'GW':
simulator = GW()
if settings.get('live', True):
live = simulator.noise
elif settings['simulator'] == 'HH':
simulator = HH()
else: # settings['simulator'] == 'SCLP'
simulator = SLCP()
simulator.to(settings['device'])
# Dataset
if settings['samples'] is None:
dataset = amnre.OnlineDataset(simulator, batch_size=settings['bs'])
theta, x = simulator.joint()
else:
dataset = amnre.OfflineDataset(settings['samples'], batch_size=settings['bs'], device=settings['device'], live=live)
theta, x = dataset[0]
if theta is None:
theta = simulator.prior.sample()
theta_size = theta.numel()
x_size = x.numel()
# Moments
if settings['weights'] is None:
theta = simulator.prior.sample((2 ** 18,))
moments = torch.mean(theta, dim=0), torch.std(theta, dim=0)
else:
moments = torch.zeros(theta_size), torch.ones(theta_size)
# Model & embedding
embedding, x_size = build_embedding(x.shape, **settings['embedding'])
model_args = settings['model'].copy()
model_args['embedding'] = embedding
model_args['moments'] = moments
if settings['arbitrary']:
model_args['hyper'] = settings['hyper']
model = amnre.AMNRE(theta_size, x_size, **model_args)
else:
masks = amnre.list2masks(settings['masks'], theta_size, settings['filter'])
if len(masks) == 0:
if settings['flow']:
model = amnre.NPE(theta_size, x_size, prior=simulator.prior, **model_args)
else:
model = amnre.NRE(theta_size, x_size, **model_args)
else:
if settings['flow']:
model = amnre.MNPE(masks, x_size, priors=[simulator.masked_prior(m) for m in masks], **model_args)
else:
model = amnre.MNRE(masks, x_size, **model_args)
## Weights
if settings['weights'] is not None:
weights = torch.load(settings['weights'], map_location='cpu')
model.load_state_dict(weights)
model.to(settings['device'])
# Adversary
if os.path.isfile(settings['adversary']) and type(model) in [amnre.NRE, amnre.MNRE]:
adversary = load_model(settings['adversary'])
adversary.to(settings['device'])
adversary.eval()
if type(adversary) in [amnre.NPE, amnre.MNPE]:
adversary.ratio()
if type(model) is amnre.MNRE:
if type(adversary) in [amnre.MNRE, amnre.MNPE]:
adversary.filter(model.masks)
elif type(adversary) in [amnre.AMNRE]:
adversary[model.masks]
else:
adversary = amnre.Dummy()
for p in adversary.parameters():
p.requires_grad = False
return simulator, dataset, model, adversary
def load_settings(filename: str) -> dict:
with open(filename) as f:
settings = json.load(f)
return settings
def load_model(filename: str) -> nn.Module:
settings = load_settings(filename.replace('.pth', '.json'))
settings['weights'] = filename
_, _, model, _ = build_instance(settings)
return model
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('-device', default='cpu', choices=['cpu', 'cuda'])
parser.add_argument('-simulator', default='SLCP', choices=['SLCP', 'GW', 'HH'])
parser.add_argument('-samples', default=None, help='samples file (H5)')
parser.add_argument('-live', default=False, action='store_true', help='live samples') # only GW
parser.add_argument('-model', type=json.loads, default={}, help='model architecture')
parser.add_argument('-hyper', type=json.loads, default=None, help='hypernet architecture')
parser.add_argument('-embedding', type=json.loads, default={}, help='embedding architecture')
parser.add_argument('-flow', default=False, action='store_true', help='normalizing flow')
parser.add_argument('-arbitrary', default=False, action='store_true', help='arbitrary architecture')
parser.add_argument('-masks', nargs='+', default=[], help='marginalzation masks')
parser.add_argument('-filter', default=None, help='mask filter')
parser.add_argument('-weights', default=None, help='warm-start weights')
parser.add_argument('-criterion', default='NLL', choices=['NLL', 'FL', 'PL', 'QS'], help='optimization criterion')
parser.add_argument('-adversary', default='notafile.pth', help='adversary network file (PTH)')
parser.add_argument('-inverse', default=False, action='store_true', help='inverse adversary')
parser.add_argument('-epochs', type=int, default=256, help='number of epochs')
parser.add_argument('-descents', type=int, default=256, help='descents per epoch')
parser.add_argument('-bs', type=int, default=1024, help='batch size')
parser.add_argument('-lr', type=float, default=1e-3, help='initial learning rate')
parser.add_argument('-wd', type=float, default=1e-4, help='weight decay')
parser.add_argument('-amsgrad', type=bool, default=False, help='AMS gradient')
parser.add_argument('-scheduler', default='plateau', choices=['plateau', 'exp', 'cosine'], help='learning rate scheduler')
parser.add_argument('-patience', type=int, default=7, help='scheduler patience')
parser.add_argument('-threshold', type=float, default=1e-2, help='scheduler threshold')
parser.add_argument('-factor', type=float, default=5e-1, help='scheduler factor')
parser.add_argument('-min-lr', type=float, default=1e-6, help='minimum learning rate')
parser.add_argument('-clip', type=float, default=1e1, help='gradient norm')
parser.add_argument('-valid', default=None, help='validation samples file (H5)')
parser.add_argument('-o', '--output', default='products/models/out.pth', help='output file (PTH)')
args = parser.parse_args()
args.date = datetime.now().strftime(r'%Y-%m-%d %H:%M:%S')
# Output directory
if os.path.dirname(args.output):
os.makedirs(os.path.dirname(args.output), exist_ok=True)
# Simulator & Model
settings = vars(args)
simulator, dataset, model, adversary = build_instance(settings)
## Arbitrary masks
if args.arbitrary:
theta_size = simulator.prior.sample().numel()
if not args.masks:
args.masks.append('uniform')
if args.masks[0] == 'poisson':
mask_sampler = amnre.PoissonMask(theta_size, args.filter)
elif args.masks[0] == 'uniform':
mask_sampler = amnre.UniformMask(theta_size, args.filter)
else:
masks = amnre.list2masks(args.masks, theta_size, args.filter)
mask_sampler = amnre.SelectionMask(masks)
mask_sampler.to(args.device)
else:
mask_sampler = None
# Criterion(s)
if args.flow:
criterion = amnre.NLL()
elif args.criterion == 'FL':
criterion = amnre.FocalWithLogitsLoss()
elif args.criterion == 'PL':
criterion = amnre.PeripheralWithLogitsLoss()
elif args.criterion == 'QS':
criterion = amnre.QSWithLogitsLoss()
else: # args.criterion == 'NLL':
criterion = amnre.NLLWithLogitsLoss()
# Optimizer & Scheduler
optimizer = optim.AdamW(
model.parameters(),
lr=args.lr,
weight_decay=args.wd,
amsgrad=args.amsgrad,
)
if args.scheduler == 'cosine':
scheduler = amnre.CosineAnnealingLR(
optimizer,
T_max=args.epochs,
eta_min=args.min_lr,
)
elif args.scheduler == 'exp':
scheduler = amnre.ExponentialLR(
optimizer,
gamma=(args.lr / args.min_lr) ** (-1 / args.epochs),
)
else: # args.scheduler == 'plateau':
scheduler = amnre.ReduceLROnPlateau(
optimizer,
factor=args.factor,
patience=args.patience,
threshold=args.threshold,
min_lr=args.min_lr,
)
# Datasets
trainset = amnre.LTEDataset(dataset)
if args.valid is not None:
validset = amnre.OfflineDataset(args.valid, batch_size=args.bs, device=args.device)
validset = amnre.LTEDataset(validset)
# Training
stats = []
for epoch in tqdm(range(1, args.epochs + 1)):
model.train()
duration, losses = amnre.routine(
model,
trainset,
criterion,
optimizer=optimizer,
adversary=adversary,
inverse=args.inverse,
descents=args.descents,
flow=args.flow,
mask_sampler=mask_sampler,
clip=args.clip,
)
stats.append({
'epoch': epoch,
'time': duration,
'lr': scheduler.lr,
'mean': losses.mean(dim=0).tolist(),
'std': losses.std(dim=0).tolist(),
})
if args.valid is not None:
with torch.no_grad():
model.eval()
_, v_losses = amnre.routine(
model,
validset,
criterion,
optimizer=None,
adversary=adversary,
inverse=args.inverse,
flow=args.flow,
mask_sampler=mask_sampler,
)
stats[-1].update({
'v_mean': v_losses.mean(dim=0).tolist(),
'v_std': v_losses.std(dim=0).tolist(),
})
scheduler.step(v_losses.mean())
else:
scheduler.step(losses.mean())
df = pd.DataFrame(stats)
df.to_csv(args.output.replace('.pth', '.csv'), index=False)
if scheduler.bottom:
break
# Outputs
## Weights
if hasattr(model, 'clear'):
model.clear()
torch.save(model.cpu().state_dict(), args.output)
## Settings
with open(args.output.replace('.pth', '.json'), 'w') as f:
json.dump(settings, f, indent=4)
| 33.577982 | 126 | 0.613479 | 1,275 | 10,980 | 5.186667 | 0.213333 | 0.039468 | 0.07455 | 0.013912 | 0.159081 | 0.080145 | 0.036897 | 0.018449 | 0.018449 | 0.018449 | 0 | 0.00557 | 0.247814 | 10,980 | 326 | 127 | 33.680982 | 0.795133 | 0.027778 | 0 | 0.153527 | 0 | 0 | 0.108732 | 0.00216 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016598 | false | 0 | 0.06639 | 0 | 0.107884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fae1a1fa4ccde0721ac41c806e9039d82ded972b | 1,039 | py | Python | templates and examples/announcements example/GET_announcements_example.py | elmiguel/Bb_rest_helper | 15b951bd629586fd672ddb4a9e68c29fb4e77709 | [
"BSD-3-Clause"
] | null | null | null | templates and examples/announcements example/GET_announcements_example.py | elmiguel/Bb_rest_helper | 15b951bd629586fd672ddb4a9e68c29fb4e77709 | [
"BSD-3-Clause"
] | null | null | null | templates and examples/announcements example/GET_announcements_example.py | elmiguel/Bb_rest_helper | 15b951bd629586fd672ddb4a9e68c29fb4e77709 | [
"BSD-3-Clause"
] | null | null | null | #imports
from Bb_rest_helper import Get_Config
from Bb_rest_helper import Auth_Helper
from Bb_rest_helper import Bb_Requests
from Bb_rest_helper import Bb_Utils
def main():
#Initialize an instance of the Get_Config class, passing the file path of the configuration file as argument.
config=Get_Config("./learn_config.json")
#Get configration values from config.json.
url=config.get_url()
key=config.get_key()
secret=config.get_secret()
#Set logging
utils= Bb_Utils()
utils.set_logging()
#Authentication
auth=Auth_Helper(url,key,secret)
token=auth.learn_auth()
#Prepare the request
GET_announcements_endpoint=f'{url}/learn/api/public/v1/announcements'
params={
"limit":"10",
"fields":"id,title,body"
}
#request
req= Bb_Requests()
GET_announcements=req.Bb_GET(GET_announcements_endpoint,token,params)
#Pretty print results to the console.
utils.pretty_printer(GET_announcements)
if __name__ == "__main__":
main()
| 25.341463 | 112 | 0.710298 | 141 | 1,039 | 4.957447 | 0.425532 | 0.034335 | 0.057225 | 0.091559 | 0.131617 | 0.06867 | 0 | 0 | 0 | 0 | 0 | 0.003597 | 0.197305 | 1,039 | 40 | 113 | 25.975 | 0.834532 | 0.237729 | 0 | 0 | 0 | 0 | 0.117347 | 0.049745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.217391 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fae3a6d7d10b9c8097dabd75c90ca0b71987eba6 | 1,376 | py | Python | numba/roc/tests/hsapy/test_positioning.py | tolysz/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | numba/roc/tests/hsapy/test_positioning.py | tolysz/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | 1 | 2019-02-11T13:46:30.000Z | 2019-02-11T13:46:30.000Z | numba/roc/tests/hsapy/test_positioning.py | asodeur/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | import numpy as np
from numba import roc
import numba.unittest_support as unittest
class TestPositioning(unittest.TestCase):
def test_kernel_jit(self):
@roc.jit
def udt(output):
global_id = roc.get_global_id(0)
global_size = roc.get_global_size(0)
local_id = roc.get_local_id(0)
group_id = roc.get_group_id(0)
num_groups = roc.get_num_groups(0)
workdim = roc.get_work_dim()
local_size = roc.get_local_size(0)
output[0, group_id, local_id] = global_id
output[1, group_id, local_id] = global_size
output[2, group_id, local_id] = local_id
output[3, group_id, local_id] = local_size
output[4, group_id, local_id] = group_id
output[5, group_id, local_id] = num_groups
output[6, group_id, local_id] = workdim
out = np.zeros((7, 2, 3), dtype=np.intp)
udt[2, 3](out)
np.testing.assert_equal([[0, 1, 2], [3, 4, 5]], out[0])
np.testing.assert_equal(6, out[1])
np.testing.assert_equal([[0, 1, 2]] * 2, out[2])
np.testing.assert_equal(3, out[3])
np.testing.assert_equal([[0, 0, 0], [1, 1, 1]], out[4])
np.testing.assert_equal(2, out[5])
np.testing.assert_equal(1, out[6])
if __name__ == '__main__':
unittest.main()
| 32.761905 | 63 | 0.587936 | 210 | 1,376 | 3.57619 | 0.228571 | 0.093209 | 0.095872 | 0.130493 | 0.193076 | 0.061252 | 0.061252 | 0 | 0 | 0 | 0 | 0.045593 | 0.282703 | 1,376 | 41 | 64 | 33.560976 | 0.715299 | 0 | 0 | 0 | 0 | 0 | 0.005818 | 0 | 0 | 0 | 0 | 0 | 0.21875 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fae926464e0fc8eaff483735b546273b89790232 | 6,563 | py | Python | sprocket/model/diagGMM.py | kan-bayashi/sprocket | b87e1aa3822ed23019b5373660e0f977cbc6c996 | [
"MIT"
] | 3 | 2020-06-03T08:29:49.000Z | 2022-03-23T02:29:01.000Z | sprocket/model/diagGMM.py | kan-bayashi/sprocket | b87e1aa3822ed23019b5373660e0f977cbc6c996 | [
"MIT"
] | 1 | 2020-06-07T23:06:10.000Z | 2020-06-07T23:06:10.000Z | sprocket/model/diagGMM.py | kan-bayashi/sprocket | b87e1aa3822ed23019b5373660e0f977cbc6c996 | [
"MIT"
] | 1 | 2020-06-03T09:41:42.000Z | 2020-06-03T09:41:42.000Z | # -*- coding: utf-8 -*-
import numpy as np
import sklearn.mixture
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
class BlockDiagonalGaussianMixture(sklearn.mixture.GaussianMixture):
"""GMM with block diagonal covariance matrix
This class offers the training of GMM with block diagonal covariance matrix.
Note that the parent class (GaussianMixture) is trained as full-covariance matrix
Parameters
----------
n_mix : int, optional
The number of mixture components of the GMM
Default set to 32.
n_iter : int, optional
The number of iteration for EM algorithm.
Default set to 100.
floor : str, optional
Flooring of covariance matrix
Attributes
----------
param :
Sklean-based model parameters of the GMM
"""
def __init__(self, n_mix=32, n_iter=100, floor=1e-6):
super().__init__(n_components=n_mix, reg_covar=floor, max_iter=n_iter,
covariance_type='full')
self.n_mix = n_mix
self.n_iter = n_iter
self.floor = floor
# seed for random in sklearn
self.random_state = np.random.mtrand._rand
def fit(self, X):
"""Fit GMM parameters to X
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
# initialize
self._initialize_parameters(X, self.random_state)
lower_bound = -np.infty
for n in range(self.n_iter):
# E-step
log_prob_norm, log_resp = self._e_step(X)
# M-step
self._m_step(X, log_resp)
# check convergence
back_lower_bound = lower_bound
lower_bound = self._compute_lower_bound(
log_resp, log_prob_norm)
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
self._estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_gaussian_parameters(self, X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
means : array-like, shape (n_components, n_features)
The centers of the current components.
covariances : array-like (n_components, n_features, n_features)
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
# estimate weight and mean
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
# estimate covariance
n_components, n_features = means.shape
diagcov = self._calculate_diag_covariances(resp, nk, X, X,
means, means)
xycov = self._calculate_diag_covariances(resp, nk,
X[:, :n_features // 2],
X[:, n_features // 2:],
means[:, :n_features // 2],
means[:, n_features // 2:])
# block_diag to full
covariances = self._block_diag_to_full(diagcov, xycov)
return nk, means, covariances
def _block_diag_to_full(self, diagcov, xycov):
"""Transform diagonal covariance to full covariance
Parameters
----------
diagcov : array-like, shape (n_components, n_features)
Diagonal covariance
xycov : array-like, shape (n_components, n_features // 2)
Variance-covariance
Returns
-------
covariance : array-like, shape (n_components, n_features, n_features)
Full covariance consiting of xxcov, xycov, yxcov, yycov
"""
n_components, n_features = diagcov.shape
covariances = np.empty((n_components, n_features, n_features))
for m in range(n_components):
covariances[m] = np.diag(diagcov[m])
covariances[m, n_features // 2:,
:n_features // 2] = np.diag(xycov[m])
covariances[m, :n_features // 2,
n_features // 2:] = np.diag(xycov[m])
return covariances
def _calculate_diag_covariances(self, resp, nk, x, y, xmeans, ymeans):
"""Calculate diagonal covariance in each portion
Parameters
----------
resp : array-like, shape (n_samples, n_components)
The responsibilities for each data sample in X.
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
x, y : array-like, shape (n_samples, n_features)
The input data array of source and atarget.
xmeans, ymeans : array-like, shape (n_components, n_features)
Mean of x and y
Returns
-------
diag_covariances : array-like, shape (n_components, n_features)
"""
avg_XY = np.dot(resp.T, x * y) / nk[:, np.newaxis]
avg_xymeans = xmeans * ymeans
avg_x_ymeans = ymeans * np.dot(resp.T, x) / nk[:, np.newaxis]
avg_y_xmeans = xmeans * np.dot(resp.T, y) / nk[:, np.newaxis]
diag_covariances = avg_XY - \
(avg_x_ymeans + avg_y_xmeans) + avg_xymeans + self.floor
return diag_covariances
| 35.668478 | 85 | 0.580832 | 769 | 6,563 | 4.743823 | 0.209363 | 0.061678 | 0.057566 | 0.061678 | 0.319353 | 0.304276 | 0.266173 | 0.180099 | 0.157895 | 0.131853 | 0 | 0.00565 | 0.325766 | 6,563 | 183 | 86 | 35.863388 | 0.818757 | 0.419016 | 0 | 0 | 0 | 0 | 0.00123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.050847 | 0 | 0.220339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faea7e9591c0e3b9bf385325c1b31a252b6e8852 | 1,168 | py | Python | fft/first_idft.py | mherbert7/dsp | 611da522ff2c659bf4e8d1f124999ed39937f9d9 | [
"Unlicense"
] | null | null | null | fft/first_idft.py | mherbert7/dsp | 611da522ff2c659bf4e8d1f124999ed39937f9d9 | [
"Unlicense"
] | null | null | null | fft/first_idft.py | mherbert7/dsp | 611da522ff2c659bf4e8d1f124999ed39937f9d9 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 23:31:51 2018
@author: Marcus
"""
import numpy as np
import time
N = 2**4
samples = np.random.normal(0, 1, N) + 1j * np.random.normal(0, 1, N)
pre_calc = np.zeros(N, dtype=np.complex)
pre_m = np.zeros((N, N), dtype=np.complex)
for n in range(N):
pre_calc[n] = (1j * 2 * np.pi * n) / N
for m in range(N):
pre_m[m] = np.exp(pre_calc * m)
def my_idft(x, pre_calc_vals):
N = len(x)
output = np.zeros(N, dtype=np.complex)
for m in range(N):
intermediate_sum = 0
for n in range(N):
intermediate_sum += x[n] * pre_calc_vals[m][n]
output[m] = intermediate_sum
return output / N
my_t0 = time.clock()
my_result = my_idft(samples, pre_m)
my_t1 = time.clock()
np_t0 = time.clock()
np_result = np.fft.ifft(samples)
np_t1 = time.clock()
if(np.allclose(my_result, np_result)):
print("Results match!")
else:
print("Error: Results do not match!")
my_time = my_t1 - my_t0
np_time = np_t1 - np_t0
print("My IDFT:", my_time, "s")
print("NP IDFT:", np_time, "s")
print("NP is", my_time / np_time, "times faster.") | 20.491228 | 68 | 0.599315 | 206 | 1,168 | 3.237864 | 0.330097 | 0.052474 | 0.047976 | 0.067466 | 0.256372 | 0.116942 | 0 | 0 | 0 | 0 | 0 | 0.034949 | 0.240582 | 1,168 | 57 | 69 | 20.491228 | 0.717024 | 0.064212 | 0 | 0.117647 | 0 | 0 | 0.071823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.117647 | 0.147059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faeab8b66f91683aa6f90e52431fcd0991331853 | 3,348 | py | Python | shambler.py | bruno-chavez/shambler | 5ec0fac191b332938fd0ce9f94b55b9b4ba5f762 | [
"MIT"
] | null | null | null | shambler.py | bruno-chavez/shambler | 5ec0fac191b332938fd0ce9f94b55b9b4ba5f762 | [
"MIT"
] | 1 | 2018-04-17T03:09:46.000Z | 2018-04-27T11:38:54.000Z | shambler.py | bruno-chavez/shambler | 5ec0fac191b332938fd0ce9f94b55b9b4ba5f762 | [
"MIT"
] | 2 | 2018-04-26T05:09:09.000Z | 2018-11-02T18:50:41.000Z | import os
from itertools import chain
JSON_EXT = '.json'
JSON_FOLDER = 'JSON_Files'
def shambler(source_file, target_file_path, json_key):
# Checking whether the user has input file names, file paths
# If they are relative, we will let python take care of it.
source_file = _resolve_path(source_file)
if not os.path.exists(source_file):
raise FileNotFoundError('%s was not found.' % source_file)
target_file_path = _resolve_path(target_file_path, extension=JSON_EXT)
# Places all lines of original file in a list, checks for correct file name.
source_file_lines = []
with open(source_file, 'r') as f:
source_file_lines = f.readlines()
# Replaces all the double quotes to single quotes and strips trailing whitespace and removes empty lines
source_file_lines = [line.rstrip().replace('\"', '\'')
for line in source_file_lines]
source_file_lines = [line for line in source_file_lines if line]
with open(target_file_path, 'w') as json_file:
json_file.write("[\n")
num_lines = len(source_file_lines)
keys = _resolve_key_list(json_key, num_lines)
for i, (line, key) in enumerate(zip(source_file_lines, keys)):
json_file.write("\t{\n")
json_file.write('\t"{K}": "{V}"\n'.format(K=key, V=line))
json_file.write('\t}\n' if i == num_lines - 1 else '\t},\n')
json_file.write("]")
return target_file_path
def shambler_interactive():
# Necessary inputs for shambler to work with.
file_path = input("Enter your input plain text file: ")
json_file_name = input("Enter a name for the output JSON file: ")
# User has the option of inputting json_key in the format "key, number, key, number"
# So that they can specify keys and the number of uses of each key in order.
json_key = input("Enter a key to use in the JSON file: ")
output_file = shambler(file_path, json_file_name, json_key)
print("%s created successfully." % output_file)
def _resolve_key_list(json_key_user_input, num_lines):
if ',' in json_key_user_input:
# Split user input by commas
keys = json_key_user_input.strip().split(',')
keys = zip(keys[::2], keys[1::2])
# Creating a list of each key entry multiplied by the number following it.
# Added max in case user enters negative/zero value to default to 1
keys = list(chain.from_iterable(
[[key_pair[0]] * max(int(key_pair[1]), 1) for key_pair in keys]))
# Now filling out the list with the last entry if it does not match the number of lines in the source file.
if len(keys) < num_lines:
keys = keys + [keys[-1]] * abs(len(keys) - num_lines)
else:
keys = [json_key_user_input] * num_lines
return keys
def _resolve_path(file_path, extension=''):
if not file_path:
raise IOError('Missing input. Please try again.')
script_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# Resolves the json file path to an absolute path within the JSON_Files folder if not already a path
if os.path.sep not in file_path:
file_path = os.path.join(script_path, JSON_FOLDER, file_path)
return file_path + extension
if __name__ == '__main__':
shambler_interactive()
| 38.930233 | 115 | 0.670848 | 512 | 3,348 | 4.164063 | 0.3125 | 0.070356 | 0.056285 | 0.030019 | 0.142589 | 0.045028 | 0 | 0 | 0 | 0 | 0 | 0.003499 | 0.23178 | 3,348 | 85 | 116 | 39.388235 | 0.825428 | 0.258961 | 0 | 0 | 0 | 0.039216 | 0.101297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.039216 | 0 | 0.176471 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faec0617cec52d2af368906d78d2dfaae3d250b7 | 5,920 | py | Python | ionoscloud/models/kubernetes_auto_scaling.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/kubernetes_auto_scaling.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/kubernetes_auto_scaling.py | ionos-cloud/ionos-cloud-sdk-python | 3c5804697c262898e6f6a438dc40e1b45a4bb5c9 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class KubernetesAutoScaling(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_node_count': 'int',
'max_node_count': 'int',
}
attribute_map = {
'min_node_count': 'minNodeCount',
'max_node_count': 'maxNodeCount',
}
def __init__(self, min_node_count=None, max_node_count=None, local_vars_configuration=None): # noqa: E501
"""KubernetesAutoScaling - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_node_count = None
self._max_node_count = None
self.discriminator = None
self.min_node_count = min_node_count
self.max_node_count = max_node_count
@property
def min_node_count(self):
"""Gets the min_node_count of this KubernetesAutoScaling. # noqa: E501
The minimum number of worker nodes that the managed node group can scale in. Should be set together with 'maxNodeCount'. Value for this attribute must be greater than equal to 1 and less than equal to maxNodeCount. # noqa: E501
:return: The min_node_count of this KubernetesAutoScaling. # noqa: E501
:rtype: int
"""
return self._min_node_count
@min_node_count.setter
def min_node_count(self, min_node_count):
"""Sets the min_node_count of this KubernetesAutoScaling.
The minimum number of worker nodes that the managed node group can scale in. Should be set together with 'maxNodeCount'. Value for this attribute must be greater than equal to 1 and less than equal to maxNodeCount. # noqa: E501
:param min_node_count: The min_node_count of this KubernetesAutoScaling. # noqa: E501
:type min_node_count: int
"""
if self.local_vars_configuration.client_side_validation and min_node_count is None: # noqa: E501
raise ValueError("Invalid value for `min_node_count`, must not be `None`") # noqa: E501
self._min_node_count = min_node_count
@property
def max_node_count(self):
"""Gets the max_node_count of this KubernetesAutoScaling. # noqa: E501
The maximum number of worker nodes that the managed node pool can scale-out. Should be set together with 'minNodeCount'. Value for this attribute must be greater than equal to 1 and minNodeCount. # noqa: E501
:return: The max_node_count of this KubernetesAutoScaling. # noqa: E501
:rtype: int
"""
return self._max_node_count
@max_node_count.setter
def max_node_count(self, max_node_count):
"""Sets the max_node_count of this KubernetesAutoScaling.
The maximum number of worker nodes that the managed node pool can scale-out. Should be set together with 'minNodeCount'. Value for this attribute must be greater than equal to 1 and minNodeCount. # noqa: E501
:param max_node_count: The max_node_count of this KubernetesAutoScaling. # noqa: E501
:type max_node_count: int
"""
if self.local_vars_configuration.client_side_validation and max_node_count is None: # noqa: E501
raise ValueError("Invalid value for `max_node_count`, must not be `None`") # noqa: E501
self._max_node_count = max_node_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KubernetesAutoScaling):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KubernetesAutoScaling):
return True
return self.to_dict() != other.to_dict()
| 37.707006 | 438 | 0.651689 | 770 | 5,920 | 4.823377 | 0.232468 | 0.101777 | 0.067851 | 0.03231 | 0.542811 | 0.512924 | 0.506193 | 0.400108 | 0.378568 | 0.303177 | 0 | 0.014839 | 0.271453 | 5,920 | 156 | 439 | 37.948718 | 0.846279 | 0.426014 | 0 | 0.085714 | 0 | 0 | 0.074138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.057143 | 0 | 0.371429 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faec3f0c82aaf05f81a0627d474207469cf7fa10 | 1,169 | py | Python | Leetcode/Python Solutions/Dynamic Programming/UniqueBinarySearchTrees.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/Dynamic Programming/UniqueBinarySearchTrees.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/Dynamic Programming/UniqueBinarySearchTrees.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
LeetCode Problem: 96. Unique Binary Search Trees
Link: https://leetcode.com/problems/unique-binary-search-trees/
Video Link: https://www.youtube.com/watch?v=CMaZ69P1bAc
Resources: https://en.wikipedia.org/wiki/Catalan_number
Written by: Mostofa Adib Shakib
Language: Python
For Catalan(3)
Catalan(2)
/ \
Catalan(1) Catalan(1)
(LST) (RST)
LeftSubTree(LST): Value increases upto the given catalan number
RightSubTree(RST): Value decreases until 0
"""
# Dynamic Programming
# Time Complexity: O(n*m)
# Space Complexity: O(n)
class Solution:
def numTrees(self, n: int) -> int:
# Assume the 0th case
# The catalan number for Catalan(1) = 1
dp = [1, 1] + [0] * (n-1)
for i in range(2, n+1): # The outer loop makes every number the root and calculates it's catalan number
for j in range(1, i+1): # The inner calculates the summation of the cartesian product for the given catalan number
dp[i] += dp[j-1] * dp[i-j] # The left value increases where as the right value decreases
return dp[-1] | 31.594595 | 142 | 0.620188 | 165 | 1,169 | 4.387879 | 0.533333 | 0.089779 | 0.049724 | 0.063536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027219 | 0.27716 | 1,169 | 37 | 143 | 31.594595 | 0.829586 | 0.717707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faeee7998bd569d00da26eb3e5219811d8da8001 | 2,252 | py | Python | shellpython/shellpy.py | wujuguang/shellpy | 54a41b7bfc6a7b6b10fd19419ce058bddd96e5bd | [
"BSD-3-Clause"
] | 706 | 2016-01-18T12:45:55.000Z | 2022-03-31T05:06:14.000Z | shellpython/shellpy.py | wujuguang/shellpy | 54a41b7bfc6a7b6b10fd19419ce058bddd96e5bd | [
"BSD-3-Clause"
] | 61 | 2016-01-17T08:08:40.000Z | 2022-02-13T19:18:01.000Z | shellpython/shellpy.py | wujuguang/shellpy | 54a41b7bfc6a7b6b10fd19419ce058bddd96e5bd | [
"BSD-3-Clause"
] | 78 | 2016-02-13T14:56:33.000Z | 2022-03-15T22:01:16.000Z | #!/usr/bin/env python
import sys
import os
import re
import subprocess
import shellpython.config as config
from shellpython.preprocessor import preprocess_file
from argparse import ArgumentParser
from shellpython.constants import *
def main2():
main(python_version=2)
def main3():
main(python_version=3)
def main(python_version):
custom_usage = '''%(prog)s [SHELLPY ARGS] file [SCRIPT ARGS]
For arguments help use:
%(prog)s --help
'''
custom_epilog = '''github : github.com/lamerman/shellpy'''
try:
spy_file_index = next(index for index, arg in enumerate(sys.argv) if re.match('.+\.spy$', arg))
shellpy_args = sys.argv[1:spy_file_index]
script_args = sys.argv[spy_file_index + 1:]
except StopIteration:
shellpy_args = sys.argv[1:]
spy_file_index = None
parser = ArgumentParser(description='A tool for convenient shell scripting in python',
usage=custom_usage, epilog=custom_epilog)
parser.add_argument('-v', '--verbose', help='increase output verbosity. Always print the command being executed',
action="store_true")
parser.add_argument('-vv', help='even bigger output verbosity. All stdout and stderr of executed commands is '
'printed', action="store_true")
shellpy_args, _ = parser.parse_known_args(shellpy_args)
if spy_file_index is None:
exit('No *.spy file was specified. Only *.spy files are supported by the tool.')
if shellpy_args.verbose or shellpy_args.vv:
config.PRINT_ALL_COMMANDS = True
if shellpy_args.vv:
config.PRINT_STDOUT_ALWAYS = True
config.PRINT_STDERR_ALWAYS = True
filename = sys.argv[spy_file_index]
processed_file = preprocess_file(filename, is_root_script=True, python_version=python_version)
# include directory of the script to pythonpath
new_env = os.environ.copy()
new_env['PYTHONPATH'] = new_env.get("PYTHONPATH", '') + os.pathsep + os.path.dirname(filename)
new_env[SHELLPY_PARAMS] = config.dumps()
retcode = subprocess.call(processed_file + ' ' + ' '.join(script_args), shell=True, env=new_env)
exit(retcode)
if __name__ == '__main__':
main()
| 32.171429 | 117 | 0.682504 | 295 | 2,252 | 4.99661 | 0.410169 | 0.059701 | 0.048847 | 0.024423 | 0.100407 | 0.042062 | 0.042062 | 0.042062 | 0 | 0 | 0 | 0.00395 | 0.213144 | 2,252 | 69 | 118 | 32.637681 | 0.827878 | 0.029307 | 0 | 0 | 0 | 0 | 0.214286 | 0.012363 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.166667 | 0 | 0.229167 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faef057ae695587c91a977897400ae054181fd10 | 3,833 | py | Python | tordatahub/auth/AliyunAccount.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/auth/AliyunAccount.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/auth/AliyunAccount.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import hmac
import base64
from hashlib import sha1
from collections import OrderedDict
from ..thirdparty import six
from ..thirdparty.six.moves.urllib.parse import urlparse, unquote
from ..models import Headers
from ..utils import Logger, hmac_sha1
from .core import Account, AccountType
class AliyunAccount(Account):
"""
Aliyun account implement base from :class:`tordatahub.auth.Account`
"""
__slots__ = '_access_id', '_access_key'
def __init__(self, *args, **kwds):
self._access_id = kwds.get('access_id', '')
self._access_key = kwds.get('access_key', '')
super(AliyunAccount, self).__init__(*args, **kwds)
@property
def access_id(self):
return self._access_id
@access_id.setter
def access_id(self, value):
self._access_id = value
@property
def access_key(self):
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
def get_type(self):
"""
Get account type.
:return: the account type
:rtype: :class:`datahub.auth.AccountType`
"""
return AccountType.ALIYUN
def _build_canonical_str(self, url_components, req):
# Build signing string
lines = [req.method, req.headers[Headers.CONTENT_TYPE], req.headers[Headers.DATE], ]
headers_to_sign = dict()
# req headers
headers = req.headers
for k, v in six.iteritems(headers):
k = k.lower()
if k.startswith('x-datahub-'):
headers_to_sign[k] = v
# url params
if url_components.query:
params_list = sorted(parse_qsl(url_components.query, True),
key=lambda it: it[0])
params = dict(params_list)
for k, v in params:
if key.startswith('x-datahub-'):
headers_to_sign[k] = v
headers_to_sign = OrderedDict([(k, headers_to_sign[k])
for k in sorted(headers_to_sign)])
Logger.logger.debug('headers to sign: %s' % headers_to_sign)
for k, v in six.iteritems(headers_to_sign):
lines.append('%s:%s' % (k, v))
lines.append(url_components.path)
return '\n'.join(lines)
def sign_request(self, req, endpoint):
"""
Generator signature for request.
:param req: request object
:param endpoint: tordatahub server endpoint
:return: none
"""
url = req.url[len(endpoint):]
url_components = urlparse(unquote(url))
canonical_str = self._build_canonical_str(url_components, req)
Logger.logger.debug('canonical string: ' + canonical_str)
sign = hmac_sha1(self._access_key, canonical_str).decode()
auth_str = 'DATAHUB %s:%s' %(self._access_id, sign)
req.headers[Headers.AUTHORIZATION] = auth_str
| 31.941667 | 92 | 0.642317 | 487 | 3,833 | 4.87885 | 0.35729 | 0.030303 | 0.049242 | 0.008838 | 0.049663 | 0.049663 | 0.049663 | 0.027778 | 0 | 0 | 0 | 0.00388 | 0.26037 | 3,833 | 119 | 93 | 32.210084 | 0.834215 | 0.291156 | 0 | 0.066667 | 0 | 0 | 0.045087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.166667 | 0.033333 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faef3e33175a1546c06dd449b662bb75945e709e | 963 | py | Python | Searching/binary_maze.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | 3 | 2022-01-09T04:33:04.000Z | 2022-02-04T17:40:43.000Z | Searching/binary_maze.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | Searching/binary_maze.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | # 이것이 코딩테스트다 p.152
# Sample Input:
# 5 6
# 101010
# 111111
# 000001
# 111111
# 111111
# Output : 10
from collections import deque
# Complexities : Time O(NM) | Space O(NM)
if __name__ == "__main__":
N, M = map(int, input().split())
maze = []
for _ in range(N):
maze.append(list(map(int, input())))
q = deque([(0, 0, 1)])
run = True
while q and run:
row, col, move = q.popleft()
drow = [0, 0, -1, 1]
dcol = [1, -1, 0, 0]
for i in range(4):
nrow, ncol = row + drow[i], col + dcol[i]
if nrow >= 0 and nrow < N and ncol >= 0 and ncol < M:
if nrow == N - 1 and ncol == M - 1:
print(move + 1)
run = False
break
if maze[nrow][ncol] == 0: continue
if maze[nrow][ncol] == 1:
q.append((nrow, ncol, move + 1))
maze[nrow][ncol] = -1
| 24.692308 | 65 | 0.450675 | 132 | 963 | 3.219697 | 0.431818 | 0.094118 | 0.084706 | 0.065882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101222 | 0.404984 | 963 | 38 | 66 | 25.342105 | 0.640489 | 0.125649 | 0 | 0 | 0 | 0 | 0.009639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faef5a2fc458ea2788ff3ffcc456892ea4c595bf | 2,014 | py | Python | VISinger/vsinging_edit.py | ishine/VITS_Singing | 0343074855d049a5ab1ac4b48d436c6cc623d552 | [
"Apache-2.0"
] | 10 | 2022-03-17T03:46:13.000Z | 2022-03-29T16:53:24.000Z | VISinger/vsinging_edit.py | ishine/VITS_Singing | 0343074855d049a5ab1ac4b48d436c6cc623d552 | [
"Apache-2.0"
] | 1 | 2022-03-18T09:28:58.000Z | 2022-03-18T09:28:58.000Z | VISinger/vsinging_edit.py | ishine/VITS_Singing | 0343074855d049a5ab1ac4b48d436c6cc623d552 | [
"Apache-2.0"
] | 3 | 2022-03-17T03:46:15.000Z | 2022-03-18T13:55:14.000Z | import os
import sys
import numpy as np
from scipy.io import wavfile
from time import *
import torch
import utils
from models import SynthesizerTrn
def save_wav(wav, path, rate):
wav *= 32767 / max(0.01, np.max(np.abs(wav))) * 0.6
wavfile.write(path, rate, wav.astype(np.int16))
# define model and load checkpoint
hps = utils.get_hparams_from_file("./configs/singing_base.json")
net_g = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).cuda()
_ = utils.load_checkpoint("./logs/singing_base/G_140000.pth", net_g, None)
net_g.eval()
# net_g.remove_weight_norm()
idx = "2044001628"
text_norm = np.load(f"midis/singing_label.npy")
text_tone = np.load(f"midis/singing_pitch.npy")
input_ids = torch.LongTensor(text_norm)
tune_ids = torch.LongTensor(text_tone)
input_f0 = torch.load(f"../VISinger_data/wav_dump_16k/{idx}_bits16.f0.pt")
len_text = input_ids.size()[0]
len_tone = tune_ids.size()[0]
len_spec = input_f0.size()[-1]
assert len_text == len_tone
if (len_text != len_spec):
len_min = min(len_text, len_spec)
input_ids = input_ids[:len_min]
tune_ids = tune_ids[:len_min]
input_f0 = input_f0[:len_min]
begin_time = time()
with torch.no_grad():
x_tst = input_ids.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([input_ids.size(0)]).cuda()
t_tst = tune_ids.cuda().unsqueeze(0)
t_tst_lengths = torch.LongTensor([tune_ids.size(0)]).cuda()
f0_tst = input_f0.cuda().unsqueeze(0)
audio = net_g.infer(x_tst, x_tst_lengths, t_tst, t_tst_lengths, f0_tst, t_tst_lengths, noise_scale=0, noise_scale_w=0, length_scale=1)[0][0,0].data.cpu().float().numpy()
end_time = time()
run_time = end_time - begin_time
print('Syth Time (Seconds):', run_time)
data_len = len(audio) / 16000
print('Wave Time (Seconds):', data_len)
print('Real time Rate (%):', run_time/data_len)
save_wav(audio, f"./midis/singing_edit.wav", hps.data.sampling_rate)
| 33.566667 | 174 | 0.701589 | 334 | 2,014 | 3.952096 | 0.335329 | 0.036364 | 0.024242 | 0.018182 | 0.028788 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035673 | 0.150943 | 2,014 | 59 | 175 | 34.135593 | 0.736257 | 0.029295 | 0 | 0 | 0 | 0 | 0.129952 | 0.093502 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.020833 | false | 0 | 0.166667 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faef5dce83ddbe787319a52370f717747e1b49e1 | 2,891 | py | Python | dao/mysql/localhost/pool.py | 17621192638/flaskfd | e73ea0c98bb37ac92ff28b91d1e92d0d7edff41d | [
"MIT"
] | 1 | 2020-11-03T02:05:15.000Z | 2020-11-03T02:05:15.000Z | dao/mysql/localhost/pool.py | 17621192638/flaskfd | e73ea0c98bb37ac92ff28b91d1e92d0d7edff41d | [
"MIT"
] | null | null | null | dao/mysql/localhost/pool.py | 17621192638/flaskfd | e73ea0c98bb37ac92ff28b91d1e92d0d7edff41d | [
"MIT"
] | 1 | 2022-02-27T12:28:53.000Z | 2022-02-27T12:28:53.000Z | import utils.mysql.mysql_common_util as mysql_util
import configparser,os,time
cf = configparser.ConfigParser()
cf.read(os.path.dirname(__file__)+"/../conf.ini")
# conf对应的数据库key
key = "localhost"
# 创建当前数据库的连接池对象
class service(object):
def __init__(self):
environment = cf.get(key,"environment")
for i in range(3):
print("当前mysql运行环境: {} !!!!".format(environment))
time.sleep(0.1)
self.pool = mysql_util.get_mysql_pool(
host=cf.get(key,"host"),
port=cf.get(key,"port"),
user=cf.get(key,"user"),
password=cf.get(key,"passwd"),
database=cf.get(key,"db"),
charset="utf8mb4"
)
self.conn, self.cursor = mysql_util.get_db_from_pool(pool=self.pool)
s = service()
run_sql = mysql_util.get_wrapper(s.pool)
run_sql_v2 = mysql_util.get_wrapper_v2(s.pool)
from utils.mysql.common_dao import common_dao as common
# 给公共方法加上带pool的注释器,无法通过init方法传递注释器
class common_dao(common):
def __init__(self,table_name):
super().__init__(table_name=table_name)
escape_none_keys = ["status","email","phone","text","sentiment","img_url","dms_name","twords"]
def move_none_keys(self,**kwargs):
"""移除不接受None的key"""
model = kwargs.get("model",None)
if model:
escape_keys = [k for k,v in model.items() if (v ==None or v=="") and k in self.escape_none_keys]
for k in escape_keys: del model[k]
@mysql_util.pymysql_time_deal
@run_sql(fetch_type="all")
def select(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select(*args, **kwargs)
@mysql_util.pymysql_time_deal
@run_sql(fetch_type="one")
def select_one(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select_one(*args, **kwargs)
@run_sql(fetch_type=None)
def update_by_id(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().update_by_id(*args, **kwargs)
@run_sql_v2(fetch_type=None)
def update_by_id_v2(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().update_by_id_v2(*args, **kwargs)
@run_sql(fetch_type=None)
def insert(self,*args, **kwargs):
return super().insert(*args, **kwargs)
@run_sql(fetch_type="one")
def select_total(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select_total(*args, **kwargs)
@run_sql(fetch_type=None)
def delete_by_id(self,*args, **kwargs):
return super().delete_by_id(*args, **kwargs)
@run_sql(fetch_type=None)
def delete(self,*args, **kwargs):
return super().delete(*args, **kwargs)
@run_sql_v2(fetch_type="all")
def insert_v2(self,*args, **kwargs):
return super().insert_v2(*args, **kwargs)
if __name__ == '__main__':
pass
| 30.755319 | 108 | 0.638533 | 398 | 2,891 | 4.351759 | 0.258794 | 0.103926 | 0.072748 | 0.060624 | 0.434758 | 0.432448 | 0.352771 | 0.283487 | 0.246536 | 0.157621 | 0 | 0.005674 | 0.207541 | 2,891 | 93 | 109 | 31.086022 | 0.750327 | 0.025943 | 0 | 0.188406 | 0 | 0 | 0.054843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0.028986 | 0.043478 | 0.057971 | 0.391304 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faefef229f2918a8bcb2cd4cb755f12c13ba9263 | 2,967 | py | Python | code/at_offer/finding_sorting/coding_interview39.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 2 | 2020-01-05T07:46:20.000Z | 2020-04-17T02:58:13.000Z | code/at_offer/finding_sorting/coding_interview39.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 1 | 2020-01-05T07:50:26.000Z | 2020-04-28T03:50:08.000Z | code/at_offer/finding_sorting/coding_interview39.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 1 | 2020-04-18T03:58:26.000Z | 2020-04-18T03:58:26.000Z | """
题目:数组中出现次数超过一半的数字
数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。例如输入一个长度为9的数组{1,2,3,2,2,2,5,4,2}。
由于数字2在数组中出现了5次,超过数组长度的一半,因此输出2。如果不存在则输出0。
思路一:可遍历整个数组 并且用字典记录每个元素出现的频率 然后再次遍历字典 如果出现频率超过一半 返回该元素 否则返回0 这是一种用空间换时间的做法
时间复杂度为O(n) 空间复杂度为O(n)
思路二:如果某个元素出现的频率超过长度的元素 则排序之后 该元素必然出现在中间位置 在此我们借用Partition函数 每次运行该函数 便能排好一个元素
如果该元素位置在中间位置 则为所找
思路三:如果一个元素出现的次数超过了长度的一半 则该元素出现的频率大于其他所有元素出现的次数之和 我们用一个key表示元素 times表示对应出现的频率
遍历整个数组 如果出现的元素不是key 则times减1 如果是该元素 则times加1 如果times为0 则换一个元素 并重置times
"""
class Solution:
def MoreThanHalfNum_Solution1(self, numbers):
if len(numbers) == 0:
return 0
freq = dict()
for e in numbers:
if e not in freq.keys():
freq[e] = 1
else:
freq[e] += 1
for key, value in freq.items():
if value > len(numbers)//2:
return key
return 0
def MoreThanHalfNum_Solution2(self, numbers):
if len(numbers) == 0:
return 0
start, end = 0, len(numbers) - 1
index = self.Partition(numbers, start, end)
while index != len(numbers) // 2:
if index > len(numbers) // 2:
index = self.Partition(numbers, start, index - 1)
else:
index = self.Partition(numbers, index + 1, end)
if self.CheckValid(numbers, numbers[index]):
return numbers[index]
else:
return 0
def MoreThanHalfNum_Solution3(self, numbers):
if len(numbers) == 0:
return 0
results, times = numbers[0], 1
for i in range(1, len(numbers)):
if times == 0:
results, times = numbers[i], 1
elif results == numbers[i]:
times += 1
else:
times -= 1
if self.CheckValid(numbers, results):
return results
else:
return 0
def Partition(self, numbers, start, end):
key = numbers[start]
while start < end:
while start < end and key <= numbers[end]:
end -= 1
if key > numbers[end]:
numbers[end], numbers[start] = numbers[start], numbers[end]
while start < end and key >= numbers[start]:
start += 1
if key < numbers[start]:
numbers[end], numbers[start] = numbers[start], numbers[end]
return start
def CheckValid(self, numbers, element):
times = 0
for e in numbers:
if e == element:
times += 1
return times > len(numbers) // 2
s = Solution()
print(s.MoreThanHalfNum_Solution1([1, 2, 3, 2, 2, 2, 5, 4, 2]))
print(s.MoreThanHalfNum_Solution2([1, 2, 3, 2, 2, 2, 5, 4, 2]))
print(s.MoreThanHalfNum_Solution2([1, 2, 3, 6, 7, 9, 5, 4, 2]))
print(s.MoreThanHalfNum_Solution3([1, 2, 3, 2, 2, 2, 5, 4, 2]))
print(s.MoreThanHalfNum_Solution3([1, 2, 3, 6, 7, 9, 5, 4, 2])) | 33.715909 | 77 | 0.557802 | 353 | 2,967 | 4.665722 | 0.252125 | 0.07286 | 0.010929 | 0.009715 | 0.304797 | 0.268367 | 0.248937 | 0.215543 | 0.105647 | 0.094718 | 0 | 0.051062 | 0.333333 | 2,967 | 88 | 78 | 33.715909 | 0.781598 | 0.166835 | 0 | 0.298507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074627 | false | 0 | 0 | 0 | 0.253731 | 0.074627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faf069762fe4a3817284e4a54927ba09ab79e1ed | 9,559 | py | Python | src/Processing/pipeline.py | hobbitsyfeet/3DMeasure | 829dbc4e9a1974064ed7baa221c765c3c9123834 | [
"MIT"
] | 6 | 2020-01-14T14:37:31.000Z | 2021-12-16T19:45:29.000Z | src/Processing/pipeline.py | hobbitsyfeet/3DMeasure | 829dbc4e9a1974064ed7baa221c765c3c9123834 | [
"MIT"
] | null | null | null | src/Processing/pipeline.py | hobbitsyfeet/3DMeasure | 829dbc4e9a1974064ed7baa221c765c3c9123834 | [
"MIT"
] | null | null | null | import pcl
import open3d as o3d
import numpy as np
import load
import plane_segmentation
import eulcidian_cluster
import manual_registration
import filter_outliers
import measure_cloud
import global_registration
import reg_grow_segmentation
import convert
import resample
import voxel_grid
import time
import uuid
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from copy import deepcopy
if __name__ =="__main__":
pre_clustered = False
start_time = ""
save_path = "F:/Data/Pipeline/" + start_time
cloud_paths = load.get_files()
registered_cloud = None
manual_reg = False
cloud_list = []
#pre-load all files for processing
for path in cloud_paths:
#cloud_list.append(pcl.load(path,format="ply"))
cloud_list.append(o3d.io.read_point_cloud(path))
#target_cloud = pcl.load(cloud_paths[0],format="ply")
target_cloud = o3d.io.read_point_cloud(path)
#process each cloud
counter = 0
for point_cloud in cloud_list:
print(point_cloud)
#point_cloud = convert.pcl_to_o3d(point_cloud)
if pre_clustered == False:
eulcidian_cluster.cluster_and_select(point_cloud,point_cloud)
#print("Eliminating horizontal planes...")
#point_cloud = plane_segmentation.segment(point_cloud)
#print("Clustering Clouds...")
#point_cloud = eulcidian_cluster.cluster_and_select(point_cloud, point_cloud)
# point_cloud = convert.pcl_to_o3d(point_cloud)
# registered_cloud = manual_registration.register(cloud_cluster_1,cloud_cluster_2)
if counter > 0:
# NOTE automatic registration relies on downsizing for accurate results.
print("Preparing clusters for registration")
#point_cloud_current = convert.pcl_to_o3d(cloud_list[counter])
#point_cloud_previous = convert.pcl_to_o3d(target_cloud)
point_cloud_current = cloud_list[counter]
point_cloud_previous = target_cloud
# measure_cloud.manual_measure(point_cloud_current)
# measure_cloud.manual_measure(point_cloud_previous)
if manual_reg == False:
voxel_size = 0.01 #5cm average
source, target, source_down, target_down, source_fpfh, target_fpfh = \
global_registration.prepare_dataset(point_cloud_current, point_cloud_previous,voxel_size)
print("Calculating Global Registration...")
globally_registered_cloud = global_registration.execute_global_registration(source_down,target_down,
source_fpfh,target_fpfh,
voxel_size)
print("Calculating ICP Registration...")
result_icp = global_registration.refine_registration(source, target,
source_fpfh, target_fpfh,
globally_registered_cloud.transformation,
voxel_size)
print("Transforming Source...")
print(result_icp.transformation)
else:
result_icp = manual_registration.register(point_cloud_current,point_cloud_previous)
point_cloud_current.transform(result_icp.transformation)
registered_cloud = point_cloud_current + point_cloud_previous
print(registered_cloud)
print(point_cloud_current)
print(point_cloud_previous)
# o3d.io.write_point_cloud((save_path + "registered_cloud.ply"),registered_cloud, write_ascii=True, compressed=True)
# if len(registered_cloud.points) > 100:
o3d.io.write_point_cloud((save_path + "regeristered_prefilter_cloud.ply"),registered_cloud,write_ascii=True)
target_cloud = o3d.io.read_point_cloud((save_path + "regeristered_prefilter_cloud.ply"),format="ply")
registered_cloud = convert.o3d_to_pcl(registered_cloud)
# registered_cloud = convert.o3d_to_pcl(registered_cloud)
# pcl.save(registered_cloud,(save_path + "regeristered_prefilter_cloud.ply"), format="ply")
# target_cloud = pcl.load(save_path + "regeristered_prefilter_cloud.ply")
#remove outliers
registered_cloud = filter_outliers.statistical_filter(registered_cloud)
#smooth data
registered_cloud = resample.smooth(registered_cloud,0.01)
# average and reduce point size
registered_cloud = voxel_grid.filter(registered_cloud,leaf_size=0.01) #0.1cm
# target_cloud = deepcopy(point_cloud_current + point_cloud_previous)
print("Saving...")
pcl.save(registered_cloud,(save_path + "registered_cloud.ply"), format="ply")
print("Measuring")
measure_cloud.manual_measure(convert.pcl_to_o3d(registered_cloud))
'''
smoothness = ""
PCA_results = []
#Perform PCA on entire object
df = pd.DataFrame(convert.pcl_to_numpy(registered_cloud))
pca = PCA(n_components=3)
pca.fit(df)
# Store results of PCA in a data frame
result = pd.DataFrame(pca.transform(df), columns=['PCA%i' % i for i in range(3)], index=df.index)
print (result)
PCA_results.append(deepcopy(result))
#perform PCA on each cluster
while(str(smoothness) is not ""):
clusters = reg_grow_segmentation.segment(registered_cloud, float(smoothness),min_cluster=5, view=True)
for cluster in clusters:
print("Starting Statistical Analysis...")
df = pd.DataFrame(convert.pcl_to_numpy(cluster))
pca = PCA(n_components=3)
pca.fit(df)
# Store results of PCA in a data frame
result = pd.DataFrame(pca.transform(df), columns=['PCA%i' % i for i in range(3)], index=df.index)
PCA_results.append(deepcopy(result))
print (result)
smoothness = input()
show_fig_flag = 0
while show_fig_flag is not -1:
# Plot initialisation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(PCA_results[show_fig_flag]['PCA0'], PCA_results[show_fig_flag]['PCA1'], PCA_results[show_fig_flag]['PCA2'], cmap="Set2_r", s=60)
# make simple, bare axis lines through space:
xAxisLine = ((min(PCA_results[show_fig_flag]['PCA0']), max(PCA_results[show_fig_flag]['PCA0'])), (0, 0), (0,0))
ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r')
yAxisLine = ((0, 0), (min(PCA_results[show_fig_flag]['PCA1']), max(PCA_results[show_fig_flag]['PCA1'])), (0,0))
ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r')
zAxisLine = ((0, 0), (0,0), (min(PCA_results[show_fig_flag]['PCA2']), max(PCA_results[show_fig_flag]['PCA2'])))
ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r')
# label the axes
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("")
plt.show(block=False)
print("Which cluster would you like to see? Enter -1 to continue")
show_fig_flag = int(input())
#convert to o3d to measure
# registered_cloud = convert.pcl_to_o3d(registered_cloud)
print("Starting Statistical Analysis...")
df = pd.DataFrame(convert.pcl_to_numpy(registered_cloud))
pca = PCA(n_components=3)
pca.fit(df)
# Store results of PCA in a data frame
result = pd.DataFrame(pca.transform(df), columns=['PCA%i' % i for i in range(3)], index=df.index)
print (result)
# Plot initialisation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(result['PCA0'], result['PCA1'], result['PCA2'], cmap="Set2_r", s=60)
# make simple, bare axis lines through space:
xAxisLine = ((min(result['PCA0']), max(result['PCA0'])), (0, 0), (0,0))
ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r')
yAxisLine = ((0, 0), (min(result['PCA1']), max(result['PCA1'])), (0,0))
ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r')
zAxisLine = ((0, 0), (0,0), (min(result['PCA2']), max(result['PCA2'])))
ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r')
# label the axes
ax.set_xlabel("Width")
ax.set_ylabel("Height")
ax.set_zlabel("Depth")
ax.set_title("")
plt.show(block=False)
'''
counter += 1
| 39.6639 | 155 | 0.57642 | 1,064 | 9,559 | 4.924812 | 0.205827 | 0.066794 | 0.025191 | 0.029198 | 0.512595 | 0.500191 | 0.388931 | 0.333969 | 0.283588 | 0.246183 | 0 | 0.019037 | 0.324092 | 9,559 | 240 | 156 | 39.829167 | 0.791983 | 0.13809 | 0 | 0.054054 | 0 | 0 | 0.062854 | 0.015775 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.283784 | 0 | 0.283784 | 0.148649 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faf21fdb1a2f9c963f6c5f900fde995eea67ab14 | 14,673 | py | Python | pymoldflow/studyrlt.py | tianyikillua/pymoldflow | 93a0d4a1ee6dcd07584d8bf4227f618f5333f05b | [
"MIT"
] | 1 | 2020-11-22T16:04:48.000Z | 2020-11-22T16:04:48.000Z | pymoldflow/studyrlt.py | tianyikillua/pymoldflow | 93a0d4a1ee6dcd07584d8bf4227f618f5333f05b | [
"MIT"
] | null | null | null | pymoldflow/studyrlt.py | tianyikillua/pymoldflow | 93a0d4a1ee6dcd07584d8bf4227f618f5333f05b | [
"MIT"
] | null | null | null | import os
import shutil
import subprocess
import numpy as np
import meshio
from .base import MoldflowAutomation
from .data_io import PatranMesh, convert_to_time_series_xdmf, read_moldflow_xml
class MoldflowResultsExporter(MoldflowAutomation):
"""
Export Autodesk Moldflow simulation results
Args:
moldflow_path (str): Path to Autodesk Moldflow Insight
sdyfile (str): Autodesk Moldflow ``.sdy`` file containing simulation results
outdir (str): Output directory
outfile (str): Output file with a format compatible with `meshio <https://github.com/nschloe/meshio>`_
use_metric_units (bool): Use Metric units (mm for length for instance)
verbose (bool): Print out progress information
stdout (obj): Redirect progress information
"""
def __init__(
self,
moldflow_path,
sdyfile=None,
outdir=None,
outfile=None,
use_metric_units=True,
verbose=True,
stdout=None,
):
super().__init__(moldflow_path, use_metric_units, verbose, stdout)
self.sdyfile = sdyfile
self.outdir = outdir
self.outfile = outfile
self.mesh = None
def check(self):
"""
Check if the provided ``studyrlt.exe`` program works
"""
self._print("Checking that studyrlt works fine...")
success, _ = self._run_studyrlt(None)
return success
def export_log(self):
"""
Export analysis log to ``log.txt``
Returns:
bool: Success indicator
"""
log = os.path.join(self._export_dir(), "log.txt")
# Run studyrlt
self._print("Exporting log file...")
success, log_ = self._run_studyrlt("exportoutput")
if success:
shutil.move(log_, log)
return success
def export_mesh(
self, output_formats=[], only_export_rawdata=False, return_mesh=False
):
"""
Export and optionally process mesh information
Args:
output_formats (list): List of `meshio <https://github.com/nschloe/meshio>`_-compatible mesh formats (MED, XDMF, ...) to be exported
only_export_rawdata (bool): Whether only export the raw ``.pat`` Patran mesh without processing
return_mesh (bool): Whether also return the ``meshio`` mesh object
Returns:
bool: Success indicator
"""
mesh = os.path.join(self._rawdata_dir(), "mesh.pat")
# Run studyrlt if mesh doesn't exist
if not os.path.isfile(mesh):
self._print("Mesh: running studyrlt...")
success, mesh_ = self._run_studyrlt("exportpatran")
if success:
shutil.move(mesh_, mesh)
else:
return False
else:
self._print("Mesh: Patran file already generated")
if only_export_rawdata:
return True
# Read and process mesh
self._print("Mesh: reading Patran file...")
self.mesh = PatranMesh(mesh, read_celltypes=["triangle", "tetra"])
if self.use_metric_units:
self.mesh.scale() # convert to mm
# Only keep one cell type (2d triangular or 3d tetra)
if "tetra" in self.mesh.cells:
self.mesh.cell_type = "tetra"
elif "triangle" in self.mesh.cells:
self.mesh.cell_type = "triangle"
self.mesh.cells = {self.mesh.cell_type: self.mesh.cells[self.mesh.cell_type]}
self.mesh.cellsID = self.mesh.cellsID[self.mesh.cell_type]
self.mesh.cellsID = dict(
zip(self.mesh.cellsID, np.arange(len(self.mesh.cellsID)))
)
self.mesh.point_data = {}
self.mesh.cell_data[self.mesh.cell_type] = {}
# Export to specified formats
def _output_mesh(ext):
os.makedirs(self._interfaces_dir(), exist_ok=True)
out = os.path.join(self._interfaces_dir(), "mesh." + ext)
meshio.write(out, self.mesh)
for ext in output_formats:
_output_mesh(ext)
if return_mesh:
return True, self.mesh
else:
return True
def export_result(
self,
resultID,
name,
only_last_step=True,
export_npy=False,
only_export_rawdata=False,
return_array=False,
):
"""
Export and optionally process simulation results
Args:
resultID (int): Identifier of the simulation result (refer to ``results.dat``)
name (str): Name of the provided simulation result
only_last_step (bool): Only process the last time-step
export_npy (bool): Whether also export raw numerical values
only_export_rawdata (bool): Whether only export the raw ``.xml`` file without processing
return_array (bool): Whether also return the ``numpy`` array for fields defined at a single time-step
Returns:
int: Success indicator, (1) success; (-1) run_studyrlt error; (-2) read_moldflow_xml error
"""
xml = os.path.join(self._rawdata_dir(), "{}.xml".format(self._io_name(name)))
# Run studyrlt if xml doesn't exist
if not os.path.isfile(xml):
self._print("{}: running studyrlt...".format(name))
success, xml_ = self._run_studyrlt(resultID)
if success:
shutil.move(xml_, xml)
else:
if return_array:
return -1, None
else:
return -1
else:
self._print("{}: XML file already generated".format(name))
if only_export_rawdata:
return 1
self._print("{}: parsing XML...".format(name))
success, data = read_moldflow_xml(xml, only_last_step=only_last_step)
if not success:
if return_array:
return -2, None
else:
return -2
# Process and export data
if data["type"] == "NMDT(Non-mesh data)":
array = self._process_nmdt_result(data, name, return_array=return_array)
elif data["time"] is None:
array = self._process_single_result(
data, name, export_npy=export_npy, return_array=return_array
)
else:
self._process_time_series_result(data, name)
if return_array:
return 1, array
else:
return 1
def finalize(self):
"""
Post-process the output file
Currently it will generate a time-series XDMF file
"""
# Convert to a time-series XDMF file
if os.path.isfile(self.outfile) and ".xdmf" in self.outfile:
convert_to_time_series_xdmf(self.outfile, backup=False)
def _export_dir(self):
if self.outdir is None and self.outfile is not None:
return os.path.dirname(self.outfile)
elif self.outdir is None:
return os.path.dirname(self.sdyfile)
else:
return self.outdir
def _rawdata_dir(self):
return os.path.join(self._export_dir(), "rawdata")
def _interfaces_dir(self):
return os.path.join(self._export_dir(), "interfaces")
def _io_name(self, name):
return name.lower().replace(" ", "_").replace("/", "").replace(",", "")
def _run_studyrlt(self, action):
sdy = self.sdyfile
check_mode = False # flag to verify if studyrlt works fine
command = [self.studyrlt_exe, self.sdyfile]
if action == "exportpatran":
command.append("-exportpatran")
out_ = sdy.replace(".sdy", ".pat")
elif action == "exportoutput":
command.append("-exportoutput")
out_ = sdy.replace(".sdy", ".txt")
elif type(action) == int:
command.append("-xml")
command.append("{:d}".format(action))
out_ = sdy.replace(".sdy", ".xml")
else:
check_mode = True
if not check_mode and self.use_metric_units:
command.append("-unit")
command.append("Metric")
if not check_mode:
assert os.path.isfile(sdy)
# Execute the command, if there is an execution error, then we
# have a problem with studyrlt
try:
CREATE_NO_WINDOW = 0x08000000
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
creationflags=CREATE_NO_WINDOW,
)
output, _ = proc.communicate()
proc.wait()
except subprocess.SubprocessError:
print("Unable to run {}".format(" ".join(command)))
return False, None
# If the output does not contain Autodesk, then problem with studyrlt
output = output.decode("windows-1252").strip()
if "Autodesk" not in output:
print("Verify that the given studyrlt.exe works")
return False, None
# Directly return for check mode, since no output is expected
if check_mode:
return True, None
# Cleanups
tmps = [sdy.replace(".sdy", ".out"), sdy.replace(".sdy", ".err")]
for tmp in tmps:
if os.path.isfile(tmp):
os.remove(tmp)
# If can not find the output file, then we have a problem with studyrlt
if os.path.isfile(out_):
os.makedirs(self._rawdata_dir(), exist_ok=True)
out = os.path.join(self._rawdata_dir(), os.path.basename(out_))
shutil.move(out_, out)
return True, out
else:
print("Unable to retrieve outputs for {}".format(" ".join(command)))
return False, None
def _prepare_data_structure(self, data):
if data["type"] == "NDDT(Node data)":
num = len(self.mesh.points)
locate = self.mesh.pointsID
else:
num = len(self.mesh.cells[self.mesh.cell_type])
locate = self.mesh.cellsID
if data["dim"] == 1:
values = np.full(num, np.nan)
else:
values = np.full((num, data["dim"]), np.nan)
return locate, values
def _process_nmdt_result(self, data, name, return_array=False):
# TODO: not necessarily time in fact
# TODO: multidimensional values?
import xlsxwriter
x = data["time"]
y = data["val"]
assert len(x) == len(y)
length = len(x)
if self.outfile is not None:
# Open an Excel file
out = os.path.join(self._export_dir(), self._io_name(name) + ".xlsx")
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
# Dump data
name = f"{name} ({data['unit']})"
worksheet.write_row("A1", ["Time (s)", name], bold)
worksheet.write_column("A2", x)
worksheet.write_column("B2", y)
# Plot chart
chart = workbook.add_chart({"type": "scatter", "subtype": "straight"})
chart.add_series(
{
"name": ["Sheet1", 0, 1],
"categories": ["Sheet1", 1, 0, length, 0],
"values": ["Sheet1", 1, 1, length, 1],
}
)
chart.set_x_axis({"name": "Time (s)", "major_gridlines": {"visible": True}})
chart.set_y_axis({"name": name})
chart.set_size({"x_scale": 2, "y_scale": 2})
chart.set_legend({"none": True})
worksheet.insert_chart("E2", chart)
workbook.close()
if return_array:
return x, y
def _process_single_result(self, data, name, export_npy=False, return_array=False):
# Prepare data structure
locate, values = self._prepare_data_structure(data)
# Read data
val = data["val"]
for identifier, value in val.items():
try:
values[locate[identifier]] = value
except Exception:
pass
# For 6-dimensional values, reverse 13 and 23
if data["dim"] == 6:
values = values[:, [0, 1, 2, 3, 5, 4]]
# Export to the output file
if self.outfile is not None:
if data["type"] == "NDDT(Node data)":
self.mesh.point_data[name] = values
else:
self.mesh.cell_data[self.mesh.cell_type][name] = values
meshio.write(self.outfile, self.mesh)
# Export raw values
if export_npy:
os.makedirs(self._interfaces_dir(), exist_ok=True)
out = os.path.join(self._interfaces_dir(), f"{self._io_name(name)}.npy")
np.save(out, values)
# Export array
if return_array:
return values
else:
return None
def _process_time_series_result(self, data, name):
# Prepare PVD information
timestep = data["time"]
nsteps = len(timestep)
name_step = [f"{name}__{t:.4f}" for t in timestep]
# Read each time-step
locate, values_ = self._prepare_data_structure(data)
for i in range(nsteps):
self._print(f"{name}: reading time-step #{i + 1:d}/{nsteps:d}...")
# Read data
values = np.copy(values_)
for identifier, value in data["val"][i].items():
try:
values[locate[identifier]] = value
except Exception:
pass
# For 6-dimensional values, reverse 13 and 23
if data["dim"] == 6:
values = values[:, [0, 1, 2, 3, 5, 4]]
# Save to the mesh data structure
if self.outfile is not None:
if data["type"] == "NDDT(Node data)":
self.mesh.point_data[name_step[i]] = values
else:
self.mesh.cell_data[self.mesh.cell_type][name_step[i]] = values
# Final write
if self.outfile is not None:
meshio.write(self.outfile, self.mesh)
| 34.852732 | 145 | 0.545696 | 1,663 | 14,673 | 4.667468 | 0.188214 | 0.036073 | 0.018552 | 0.016233 | 0.259856 | 0.208709 | 0.155372 | 0.128704 | 0.109379 | 0.076269 | 0 | 0.00733 | 0.349145 | 14,673 | 420 | 146 | 34.935714 | 0.805445 | 0.184625 | 0 | 0.237226 | 0 | 0 | 0.082285 | 0.002231 | 0 | 0 | 0.000892 | 0.002381 | 0.007299 | 1 | 0.058394 | false | 0.007299 | 0.029197 | 0.010949 | 0.193431 | 0.043796 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faf5650e8a3f52def370becc66cede60c369028e | 2,738 | py | Python | test/integ_tests/test_integration.py | antalszava/amazon-braket-pennylane-plugin-python-1 | 0228fd38dee5a586807b8a2b32b3bfa0f0360669 | [
"Apache-2.0"
] | 16 | 2021-01-11T20:59:39.000Z | 2022-03-04T14:18:20.000Z | test/integ_tests/test_integration.py | antalszava/amazon-braket-pennylane-plugin-python-1 | 0228fd38dee5a586807b8a2b32b3bfa0f0360669 | [
"Apache-2.0"
] | 43 | 2020-12-09T00:19:38.000Z | 2022-03-29T19:52:55.000Z | test/integ_tests/test_integration.py | aws/amazon-braket-pennylane-plugin-python | dbb8f4ae6d82778a9efb0a7f635100be6e323024 | [
"Apache-2.0"
] | 11 | 2021-01-11T21:01:42.000Z | 2021-11-01T08:46:11.000Z | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Tests that plugin devices are accessible and integrate with PennyLane"""
import numpy as np
import pennylane as qml
import pkg_resources
import pytest
from conftest import shortname_and_backends
ENTRY_POINTS = {entry.name: entry for entry in pkg_resources.iter_entry_points("pennylane.plugins")}
class TestDeviceIntegration:
"""Test the devices work correctly from the PennyLane frontend."""
@pytest.mark.parametrize("d", shortname_and_backends)
def test_load_device(self, d, extra_kwargs):
"""Test that the device loads correctly"""
dev = TestDeviceIntegration._device(d, 2, extra_kwargs)
assert dev.num_wires == 2
assert dev.shots is None
assert dev.short_name == d[0]
def test_args_aws(self):
"""Test that BraketAwsDevice requires correct arguments"""
with pytest.raises(TypeError, match="missing 3 required positional arguments"):
qml.device("braket.aws.qubit")
def test_args_local(self):
"""Test that BraketLocalDevice requires correct arguments"""
with pytest.raises(TypeError, match="missing 1 required positional argument"):
qml.device("braket.local.qubit")
@pytest.mark.parametrize("d", shortname_and_backends)
@pytest.mark.parametrize("shots", [None, 8192])
def test_one_qubit_circuit(self, shots, d, tol, extra_kwargs):
"""Test that devices provide correct result for a simple circuit"""
dev = TestDeviceIntegration._device(d, 1, extra_kwargs)
a = 0.543
b = 0.123
c = 0.987
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([1]), wires=0)
qml.Hadamard(wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol)
@staticmethod
def _device(shortname_and_backend, wires, extra_kwargs):
device_name, backend = shortname_and_backend
device_class = ENTRY_POINTS[device_name].load()
return qml.device(device_name, wires=wires, **extra_kwargs(device_class, backend))
| 38.56338 | 100 | 0.688459 | 371 | 2,738 | 4.97035 | 0.412399 | 0.032538 | 0.032538 | 0.023861 | 0.111714 | 0.111714 | 0.111714 | 0.066161 | 0.066161 | 0 | 0 | 0.014339 | 0.210373 | 2,738 | 70 | 101 | 39.114286 | 0.838575 | 0.32469 | 0 | 0.052632 | 0 | 0 | 0.074917 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.157895 | false | 0 | 0.131579 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faf7987b61f1f17233d706e7eace5e40be0ca94e | 792 | py | Python | HMIN320/Stereovision/stereovision.py | Eikins/M2-Imagina | 6e37ef755bd5312fc808ec599b3bd76084d35568 | [
"MIT"
] | null | null | null | HMIN320/Stereovision/stereovision.py | Eikins/M2-Imagina | 6e37ef755bd5312fc808ec599b3bd76084d35568 | [
"MIT"
] | null | null | null | HMIN320/Stereovision/stereovision.py | Eikins/M2-Imagina | 6e37ef755bd5312fc808ec599b3bd76084d35568 | [
"MIT"
] | null | null | null | from enum import Enum
import cv2
class Camera(Enum):
LEFT = 0,
RIGHT = 1
def OnClick(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
if (params == Camera.LEFT):
print ("DDD")
elif (params == Camera.RIGHT):
print ("MDR")
leftImage = cv2.imread("images/TurtleG.tif")
rightImage = cv2.imread("images/TurtleD.tif")
cv2.namedWindow("Left Image", cv2.WINDOW_NORMAL)
cv2.namedWindow("Right Image", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Left Image", OnClick, Camera.LEFT) # Left
cv2.setMouseCallback("Right Image", OnClick, Camera.RIGHT) # Right
cv2.imshow("Left Image", leftImage)
cv2.imshow("Right Image", rightImage)
while True:
key = cv2.waitKey(0)
if key == 27:
cv2.destroyAllWindows()
break
| 24 | 66 | 0.660354 | 101 | 792 | 5.148515 | 0.425743 | 0.051923 | 0.057692 | 0.076923 | 0.088462 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030111 | 0.203283 | 792 | 32 | 67 | 24.75 | 0.793978 | 0.012626 | 0 | 0 | 0 | 0 | 0.134961 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fafa000b315bfac007911e703fb6b8b09f2b8a9d | 6,162 | py | Python | stackexchange/web.py | lizmat/soiqbot | eb1c922418a7fc58bdbbb3573c48f90cdd155667 | [
"MIT"
] | 9 | 2016-03-24T19:47:53.000Z | 2022-01-13T19:07:07.000Z | stackexchange/web.py | lizmat/soiqbot | eb1c922418a7fc58bdbbb3573c48f90cdd155667 | [
"MIT"
] | null | null | null | stackexchange/web.py | lizmat/soiqbot | eb1c922418a7fc58bdbbb3573c48f90cdd155667 | [
"MIT"
] | 4 | 2016-04-18T14:38:19.000Z | 2020-01-14T15:42:09.000Z | # stackweb.py - Core classes for web-request stuff
from __future__ import print_function
from stackexchange.core import StackExchangeError
from six.moves import urllib
import datetime, operator, io, gzip, time
import datetime
try:
import json
except ImportError:
import simplejson as json
class TooManyRequestsError(Exception):
def __str__(self):
return "More than 30 requests have been made in the last five seconds."
class WebRequest(object):
data = ''
info = None
def __init__(self, data, info):
self.data = data
self.info = info
def __str__(self):
return str(self.data)
class WebRequestManager(object):
debug = False
cache = {}
def __init__(self, impose_throttling = False, throttle_stop = True, cache = True, cache_age = 1800):
# Whether to monitor requests for overuse of the API
self.impose_throttling = impose_throttling
# Whether to throw an error (when True) if the limit is reached, or wait until another request
# can be made (when False).
self.throttle_stop = throttle_stop
# Whether to use request caching.
self.do_cache = cache
# The time, in seconds, for which to cache a response
self.cache_age = cache_age
# The time at which we should resume making requests after receiving a 'backoff' for each method
self.backoff_expires = {}
# When we last made a request
window = datetime.datetime.now()
# Number of requests since last throttle window
num_requests = 0
def debug_print(self, *p):
if WebRequestManager.debug:
print(' '.join([x if isinstance(x, str) else repr(x) for x in p]))
def canon_method_name(self, url):
# Take the URL relative to the domain, without initial / or parameters
parsed = urllib.parse.urlparse(url)
return '/'.join(parsed.path.split('/')[1:])
def request(self, url, params):
now = datetime.datetime.now()
# Quote URL fields (mostly for 'c#'), but not : in http://
components = url.split('/')
url = components[0] + '/' + ('/'.join(urllib.parse.quote(path) for path in components[1:]))
done = False
for k, v in params.items():
if not done:
url += '?'
done = True
else:
url += '&'
url += '%s=%s' % (k, urllib.parse.quote(str(v).encode('utf-8')))
# Now we have the `proper` URL, we can check the cache
if self.do_cache and url in self.cache:
timestamp, data = self.cache[url]
self.debug_print('C>', url, '@', timestamp)
if (now - timestamp).seconds <= self.cache_age:
self.debug_print('Hit>', url)
return data
# Before we do the actual request, are we going to be throttled?
def halt(wait_time):
if self.throttle_stop:
raise TooManyRequestsError()
else:
# Wait the required time, plus a bit of extra padding time.
time.sleep(wait_time + 0.1)
if self.impose_throttling:
# We need to check if we've been told to back off
method = self.canon_method_name(url)
backoff_time = self.backoff_expires.get(method, None)
if backoff_time is not None and backoff_time >= now:
self.debug_print('backoff: %s until %s' % (method, backoff_time))
halt((now - backoff_time).seconds)
if (now - WebRequestManager.window).seconds >= 5:
WebRequestManager.window = now
WebRequestManager.num_requests = 0
WebRequestManager.num_requests += 1
if WebRequestManager.num_requests > 30:
halt(5 - (now - WebRequestManager.window).seconds)
# We definitely do need to go out to the internet, so make the real request
self.debug_print('R>', url)
request = urllib.request.Request(url)
request.add_header('Accept-encoding', 'gzip')
req_open = urllib.request.build_opener()
try:
conn = req_open.open(request)
info = conn.info()
req_data = conn.read()
error_code = 200
except urllib.error.HTTPError as e:
# we'll handle the error response later
error_code = e.code
# a hack (headers is an undocumented property), but there's no sensible way to get them
info = getattr(e, 'headers', {})
req_data = e.read()
# Handle compressed responses.
# (Stack Exchange's API sends its responses compressed but intermediary
# proxies may send them to us decompressed.)
if info.get('Content-Encoding') == 'gzip':
data_stream = io.BytesIO(req_data)
gzip_stream = gzip.GzipFile(fileobj = data_stream)
actual_data = gzip_stream.read()
else:
actual_data = req_data
# Check for errors
if error_code != 200:
try:
error_ob = json.loads(actual_data.decode('utf8'))
except:
raise StackExchangeError()
else:
raise StackExchangeError(error_ob.get('error_id', StackExchangeError.UNKNOWN), error_ob.get('error_name'), error_ob.get('error_message'))
conn.close()
req_object = WebRequest(actual_data, info)
# Let's store the response in the cache
if self.do_cache:
self.cache[url] = (now, req_object)
self.debug_print('Store>', url)
return req_object
def json_request(self, to, params):
req = self.request(to, params)
parsed_result = json.loads(req.data.decode('utf8'))
# In API v2.x we now need to respect the 'backoff' warning
if 'backoff' in parsed_result:
method = self.canon_method_name(to)
self.backoff_expires[method] = datetime.datetime.now() + datetime.timedelta(seconds = parsed_result['backoff'])
return (parsed_result, req.info)
| 36.898204 | 153 | 0.599481 | 764 | 6,162 | 4.709424 | 0.312827 | 0.019455 | 0.019455 | 0.012507 | 0.02557 | 0.011673 | 0 | 0 | 0 | 0 | 0 | 0.006557 | 0.307043 | 6,162 | 166 | 154 | 37.120482 | 0.836066 | 0.206102 | 0 | 0.081081 | 0 | 0 | 0.043979 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.072072 | 0.018018 | 0.288288 | 0.072072 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fafc0e34e7de3b181121e76478e0612c34963839 | 1,484 | py | Python | setup.py | timgates42/try | 785798471ae753968053641b6407ac7be9f91309 | [
"MIT"
] | 691 | 2016-03-21T04:44:02.000Z | 2022-03-30T03:59:31.000Z | setup.py | timgates42/try | 785798471ae753968053641b6407ac7be9f91309 | [
"MIT"
] | 16 | 2016-03-21T11:27:10.000Z | 2021-12-01T01:51:32.000Z | setup.py | timgates42/try | 785798471ae753968053641b6407ac7be9f91309 | [
"MIT"
] | 47 | 2016-03-21T05:04:56.000Z | 2022-03-09T04:45:26.000Z | # -*- coding: utf-8 -*-
"""
Setup try package.
"""
import ast
import re
from setuptools import setup, find_packages
def get_version():
"""Gets the current version"""
_version_re = re.compile(r"__VERSION__\s+=\s+(.*)")
with open("trypackage/__init__.py", "rb") as init_file:
version = str(ast.literal_eval(_version_re.search(
init_file.read().decode("utf-8")).group(1)))
return version
setup(
name="trypackage",
version=get_version(),
license="MIT",
description="Awesome cli tool to try out python packages",
author="Timo Furrer",
author_email="tuxtimo@gmail.com",
url="https://github.com/timofurrer/try",
packages=find_packages(),
include_package_data=True,
install_requires=["click"],
entry_points={
"console_scripts": [
"try=trypackage.__main__:main",
]
},
keywords=[
"try", "python", "packages",
"pypi", "github",
"interactive", "console",
"ipython", "versions",
"virtualenv"
],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Intended Audience :: End Users/Desktop",
"Topic :: Utilities",
],
)
| 23.1875 | 62 | 0.587601 | 154 | 1,484 | 5.474026 | 0.62987 | 0.090154 | 0.118624 | 0.092527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00726 | 0.257412 | 1,484 | 63 | 63 | 23.555556 | 0.757713 | 0.044474 | 0 | 0.045455 | 0 | 0 | 0.410421 | 0.051392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.068182 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fafc3c938fdc7c34c445e7c2c07bfa0509f1663a | 5,470 | py | Python | mbed_host_tests/host_tests_registry/host_registry.py | screamerbg/htrun | 1c570ec77d8c5673dae55dc790302d86d712c36b | [
"Apache-2.0"
] | null | null | null | mbed_host_tests/host_tests_registry/host_registry.py | screamerbg/htrun | 1c570ec77d8c5673dae55dc790302d86d712c36b | [
"Apache-2.0"
] | null | null | null | mbed_host_tests/host_tests_registry/host_registry.py | screamerbg/htrun | 1c570ec77d8c5673dae55dc790302d86d712c36b | [
"Apache-2.0"
] | null | null | null | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
try:
from imp import load_source
except ImportError:
import importlib
import sys
def load_source(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module_name] = module
return module
from inspect import getmembers, isclass
from os import listdir
from os.path import abspath, exists, isdir, isfile, join
from ..host_tests.base_host_test import BaseHostTest
class HostRegistry:
""" Class stores registry with host tests and objects representing them
"""
HOST_TESTS = {} # Map between host_test_name -> host_test_object
def register_host_test(self, ht_name, ht_object):
"""! Registers host test object by name
@param ht_name Host test unique name
@param ht_object Host test class object
"""
if ht_name not in self.HOST_TESTS:
self.HOST_TESTS[ht_name] = ht_object
def unregister_host_test(self, ht_name):
"""! Unregisters host test object by name
@param ht_name Host test unique name
"""
if ht_name in self.HOST_TESTS:
del self.HOST_TESTS[ht_name]
def get_host_test(self, ht_name):
"""! Fetches host test object by name
@param ht_name Host test unique name
@return Host test callable object or None if object is not found
"""
return self.HOST_TESTS[ht_name] if ht_name in self.HOST_TESTS else None
def is_host_test(self, ht_name):
"""! Checks (by name) if host test object is registered already
@param ht_name Host test unique name
@return True if ht_name is registered (available), else False
"""
return (ht_name in self.HOST_TESTS and
self.HOST_TESTS[ht_name] is not None)
def table(self, verbose=False):
"""! Prints list of registered host test classes (by name)
@Detail For devel & debug purposes
"""
from prettytable import PrettyTable, HEADER
column_names = ['name', 'class', 'origin']
pt = PrettyTable(column_names, junction_char="|", hrules=HEADER)
for column in column_names:
pt.align[column] = 'l'
for name, host_test in sorted(self.HOST_TESTS.items()):
cls_str = str(host_test.__class__)
if host_test.script_location:
src_path = host_test.script_location
else:
src_path = 'mbed-host-tests'
pt.add_row([name, cls_str, src_path])
return pt.get_string()
def register_from_path(self, path, verbose=False):
""" Enumerates and registers locally stored host tests
Host test are derived from mbed_host_tests.BaseHostTest classes
"""
if path:
path = path.strip('"')
if verbose:
print("HOST: Inspecting '%s' for local host tests..." % path)
if exists(path) and isdir(path):
python_modules = [
f for f in listdir(path)
if isfile(join(path, f)) and f.endswith(".py")
]
for module_file in python_modules:
self._add_module_to_registry(path, module_file, verbose)
def _add_module_to_registry(self, path, module_file, verbose):
module_name = module_file[:-3]
try:
mod = load_source(module_name, abspath(join(path, module_file)))
except Exception as e:
print(
"HOST: Error! While loading local host test module '%s'"
% join(path, module_file)
)
print("HOST: %s" % str(e))
return
if verbose:
print("HOST: Loading module '%s': %s" % (module_file, str(mod)))
for name, obj in getmembers(mod):
if (
isclass(obj) and
issubclass(obj, BaseHostTest) and
str(obj) != str(BaseHostTest)
):
if obj.name:
host_test_name = obj.name
else:
host_test_name = module_name
host_test_cls = obj
host_test_cls.script_location = join(path, module_file)
if verbose:
print(
"HOST: Found host test implementation: %s -|> %s"
% (str(obj), str(BaseHostTest))
)
print(
"HOST: Registering '%s' as '%s'"
% (str(host_test_cls), host_test_name)
)
self.register_host_test(
host_test_name, host_test_cls()
)
| 35.290323 | 79 | 0.596161 | 686 | 5,470 | 4.572886 | 0.279883 | 0.086707 | 0.034428 | 0.017851 | 0.141218 | 0.082563 | 0.075869 | 0.075869 | 0.04686 | 0.04686 | 0 | 0.003516 | 0.324132 | 5,470 | 154 | 80 | 35.519481 | 0.845009 | 0.263985 | 0 | 0.10989 | 0 | 0 | 0.064726 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087912 | false | 0 | 0.120879 | 0 | 0.285714 | 0.065934 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fafd5c22c2e8d5d921e379d038c9cca8e51e0945 | 845 | py | Python | proxypool/crawlers/public/ip3366.py | wp4969/ProxyPool | b51c08466f208aedd823105bf1c82bc22d890b58 | [
"MIT"
] | null | null | null | proxypool/crawlers/public/ip3366.py | wp4969/ProxyPool | b51c08466f208aedd823105bf1c82bc22d890b58 | [
"MIT"
] | null | null | null | proxypool/crawlers/public/ip3366.py | wp4969/ProxyPool | b51c08466f208aedd823105bf1c82bc22d890b58 | [
"MIT"
] | null | null | null | from proxypool.crawlers.base import BaseCrawler
PROXY_TYPE = range(1, 3)
MAX_PAGE = 7
BASE_URL = 'http://www.ip3366.net/free/?stype={stype}&page={page}'
class Ip3366Crawl(BaseCrawler):
"""
ip3366 http://www.ip3366.net
如果不想获取器执行这个代理 可以设置:ignore = True
"""
urls = [BASE_URL.format(stype=stype, page=page) for stype in PROXY_TYPE for page in range(1, MAX_PAGE+1)]
ignore = False
def parse(self, response):
trs = response.xpath('//div[@id="list"]/table/tbody/tr')
for tr in trs:
ip = tr.xpath('.//td[1]/text()')[0]
port = tr.xpath('.//td[2]/text()')[0]
proxy = '{}:{}'.format(ip, port)
#elite = '高匿' in tr.xpath('.//td[3]/text()').get()
#https = 'HTTPS' in tr.xpath('.//td[4]/text()').get()
yield proxy
| 32.5 | 110 | 0.551479 | 115 | 845 | 4 | 0.486957 | 0.06087 | 0.078261 | 0.069565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043062 | 0.257988 | 845 | 25 | 111 | 33.8 | 0.69059 | 0.192899 | 0 | 0 | 0 | 0 | 0.188976 | 0.050394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fafeb0eba344c4cb37e2d3495da38b5e73a365ff | 10,982 | py | Python | visualization/dashapp/layouts.py | Lchuang/yews | 254c1d3887b812a94421bd6ccef4a51a7ef330e0 | [
"Apache-2.0"
] | null | null | null | visualization/dashapp/layouts.py | Lchuang/yews | 254c1d3887b812a94421bd6ccef4a51a7ef330e0 | [
"Apache-2.0"
] | null | null | null | visualization/dashapp/layouts.py | Lchuang/yews | 254c1d3887b812a94421bd6ccef4a51a7ef330e0 | [
"Apache-2.0"
] | null | null | null | #!/Users/lindsaychuang/miniconda3/envs/obspy/bin/python
# this file defines all the layouts used in apps
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import dash_daq as daq
from dash_table.Format import Format
import datetime
# ---- 01. map layout ------
map_layout = {
'mapbox': {
'accesstoken': 'pk.eyJ1IjoieWNodWFuZzM1IiwiYSI6ImNqeGtzZDluZzFkcWgzem12ZDY2cWpoemsifQ.1_ZAhhpXtE2hnkSCtKIDZw',
'style': 'mapbox://styles/ychuang35/cjxlwvwur15fj1cousoa18kju',
'center': {'lat': 23.60, 'lon': 121.0},
'zoom': 7,
},
'hovermode': 'closest',
'height': 800,
'yaxis': {'title': 'Latitude'},
'xaxis': {'title': 'Longitude'},
'margin': {"r": 0, "t": 0, "l": 5, "b": 0}
}
map_data = [
{'name': "events",
'marker': {
'opacity': 0.8,
'colorscale': 'Portland',
'cmax': 10,
'cmin': 0,
'colorbar': {'title': 'phase number'},
'showscale': True,
'sizeref': 0.4
},
'type': 'scattermapbox',
'showlegend': False,
'hovertemplate': '(%{lat:.2f}, %{lon:.2f})'
'<br><b>%{text}</b>'
'<br><b>M: %{marker.size:.f}</b>'
'<br><b>Phase: %{marker.color:.f}</b>'
},
{'name': "stations",
'marker': {
'color': 'rgb(0, 0, 0)',
'size': 9,
'opacity': 1,
'symbol': 'triangle',
'line': {'color': 'rgb(0, 0, 0)'},
},
'type': 'scattermapbox',
'showlegend': False,
'hovertemplate': '(%{lat:.2f}, %{lon:.2f})'
'<br><b>%{text}</b>'
},
]
geomap_layout = dcc.Graph(id='map', figure={
'layout': map_layout,
'data': map_data
})
# ---- 02. earthquake catalog drop-down list
eqs_loc_layout = dcc.Dropdown(id='eq_loc_dw',
options=[],
placeholder='select a standard earthquake catalog',
multi=True
)
# ---- 03. earthquake catalog table
eqs_table_layout = dash_table.DataTable(id='table',
data=[],
style_header={
'backgroundColor': 'rgb(230,230,230)',
'fontWeight': 'bold',
'textAlign': 'center'
},
style_data_conditional=[{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(2489,248,248)'
}],
style_table={
#'maxHeight': '400px',
'overflowY': 'scroll',
'overflowX': 'scroll',
#'maxWidth': '200px',
'textAlign': 'center'
},
columns=[
{'id': 'otime', 'name': 'otime'},
{'id': 'evla', 'name': 'lat'},
{'id': 'evlo', 'name': 'lon'},
{'id': 'evdp', 'name': 'dep'},
{'id': 'mag', 'name': 'mag'},
],
fixed_rows={'headers': True, 'data': 0},
style_cell={'width': '70px'},
sort_action='native',
sort_mode='multi',
row_selectable='single',
row_deletable=False,
filter_action='native',
page_size=50,
)
# ---- 04. phase catalog drop-down list
phase_loc_layout = dcc.Dropdown(id='phase_loc_dw',
options=[],
placeholder='select an earthquake phase catalog',
multi=True
)
# ---- 04. station catalog drop-down list
sta_loc_layout = dcc.Dropdown(id='sta_loc_dw',
options=[],
placeholder='select a station catalog',
multi=True
)
# ---- 06. continuous waveform path
deployment_path = dcc.Dropdown(id='deployment_pt',
options=[],
placeholder='select deployment output path',
multi=False
)
# ---- 05. mode radio buttons
mode_button = dcc.RadioItems(
options=[
{'label': 'simultaneous', 'value': 'sim'},
{'label': 'manual', 'value': 'manual'}
]
)
# ---- 06. mode switch
mode_switch = daq.BooleanSwitch(
id='mode_switch',
on=False,
label='Instant Update',
labelPosition='top',
),
# ---- 07. Icon nob
icon_nob = daq.Knob(
label="Display length (mins)",
size=100,
value=5,
max=5,
scale={'start': 0, 'labelInterval': 1, 'interval': 0.5},
id="win_nob"
)
# ---- 08. Tabs
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px'
}
idx_tabs = dcc.Tabs(id="index_page_tab", value='earthquakes', children=[
dcc.Tab(label='earthquakes', value='earthquakes', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='phases', value='phases', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='stations', value='stations', style=tab_style, selected_style=tab_selected_style)
])
# ---- 09. date-time picker
cont_date_picker = dcc.DatePickerSingle(
id='cont_dtp',
display_format='M-D-Y',
)
# ---- 10. time input
cont_hours_input = daq.NumericInput(
id='hour_inout',
max=23,
value=8,
min=0,
label='Hour'
)
cont_minutes_input = daq.NumericInput(
id='min_inout',
max=59,
value=20,
min=0,
label='Min'
)
cont_seconds_input = daq.NumericInput(
id='sec_inout',
max=59,
value=30,
min=0,
label='Sec'
)
# ---- 11. Time range slider
time_slider = dcc.RangeSlider(
id="time_slider",
min=0,
max=86400,
step=86400,
value=[0, 86400],
marks={i: f'{i}'
for i in range(0, 86400, 10000)
}
)
# ---- button css
# ---- 10.analysis button
view_waveform_evt = html.A(
id='view_event_wf',
children=html.Button('View Event Waveform', type='submit', id='evtwf_button'),
href='http://www.yahoo.com',
target='_blank'
)
# ---- 11. catalog analysis button
view_events_sta = html.A(
children=html.Button(
'Analyse Catalog', type='submit', id='evt_button'),
id='ana_eq_cata',
href='http://www.google.com',
target='_blank'
)
# ---- 12. view continuous data
view_waveform_cont = html.A(
children=html.Button(
'View All Waveform', type='submit', id='cont_button'),
id='view_cont_wf',
href='/apps/Continuous_WF',
target='_blank',
)
# ---- 13. radio item
cont_filter_radio = dcc.RadioItems(
options=[
{'label': 'raw', 'value': 'raw'},
{'label': 'bandpass', 'value': 'bandpass'},
{'label': 'highpass', 'value': 'highpass'},
{'label': 'lowpass', 'value': 'lowpass'},
],
value='bandpass',
id='filter_type'
)
# ---- 14. filer
cont_filter_low = daq.NumericInput(
id='filter_low',
max=50,
value=2,
min=0.001,
label='Low F',
labelPosition='top',
)
cont_filter_high = daq.NumericInput(
id='filter_high',
max=100,
value=8,
min=0.001,
label='High F',
labelPosition='top',
)
# ---- normalization option
cont_control_norm = dcc.Dropdown(
id='norm_control',
options=[
{'label': 'Original Scale', 'value': 'Original Scale'},
{'label': 'Normalize', 'value': 'Normalize'},
],
placeholder='select normalization style',
multi=False,
value="Normalize"
)
# ---- waveform source option
cont_wf_path = dcc.Dropdown(id='cont_wf_pt',
options=[],
placeholder='select continuous waveform path',
multi=False
)
# ---- 17. Tab control content
Cont_control_tab = html.Div([
html.P("Settings for waveform display", className="cont_control_display_title"),
html.Div([
html.P("Waveform Normalization Mode"),
html.Div(cont_control_norm),
html.P("Select Continuous Waveform Source"),
html.Div(cont_wf_path),
html.P('Select CPIC Deployment Source'),
html.Div(deployment_path),
], className="cont_control_display_left")
])
# ---- 15. Continuous waveform display
cont_wf_tabs = dcc.Tabs(id="cont_wf_tabs", value='Control',
children=[
dcc.Tab(label='N', value='N', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='E', value='E', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Z', value='Z', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='NEZ', value='NEZ', style=tab_style, selected_style=tab_selected_style),
dcc.Tab(Cont_control_tab, label='Control', value='Control', style=tab_style, selected_style=tab_selected_style)
])
# ---- 16. cont joy stick
browse_wf = daq.Joystick(
id='move_handle',
label="Browse waveform",
angle=0,
size=60,
),
# ---- 18. Tab N control content
Cont_N_tab = dcc.Loading(
id='loadN', children=html.Div([dcc.Graph(id='N_comp_wfs')]))
Cont_E_tab = dcc.Loading(
id='loadE', children=html.Div([dcc.Graph(id='E_comp_wfs')]))
Cont_Z_tab = dcc.Loading(
id='loadZ', children=html.Div([dcc.Graph(id='Z_comp_wfs')]))
Cont_NEZ_tab = dcc.Loading(
id='loadNEZ', children=html.Div([dcc.Graph(id='NEZ_comp_wfs')])) | 34.974522 | 139 | 0.477782 | 1,061 | 10,982 | 4.782281 | 0.306315 | 0.043555 | 0.02838 | 0.03311 | 0.166732 | 0.134608 | 0.103075 | 0.103075 | 0.08652 | 0.08652 | 0 | 0.030754 | 0.37525 | 10,982 | 314 | 140 | 34.974522 | 0.708789 | 0.075123 | 0 | 0.190476 | 0 | 0 | 0.23102 | 0.023398 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.014652 | 0.021978 | 0 | 0.021978 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faffb57b7651a65639929342efa031a9632c53ea | 1,342 | py | Python | Stable_baselines3/ppo_main2.py | FitMachineLearning/FitML | a60f49fce1799ca4b11b48307441325b6272719a | [
"MIT"
] | 171 | 2017-11-07T09:59:20.000Z | 2022-03-29T13:59:18.000Z | Stable_baselines3/ppo_main2.py | FitMachineLearning/FitML | a60f49fce1799ca4b11b48307441325b6272719a | [
"MIT"
] | 1 | 2017-12-24T20:08:18.000Z | 2018-01-31T22:26:49.000Z | Stable_baselines3/ppo_main2.py | FitMachineLearning/FitML | a60f49fce1799ca4b11b48307441325b6272719a | [
"MIT"
] | 44 | 2017-11-07T12:08:05.000Z | 2022-01-04T15:53:12.000Z | import gym
import pybullet, pybullet_envs
import torch as th
from stable_baselines3 import PPO
from stable_baselines3.common.evaluation import evaluate_policy
# Create environment
# env = gym.make('LunarLanderContinuous-v2')
env = gym.make('BipedalWalker-v3')
# env.render(mode="human")
policy_kwargs = dict(activation_fn=th.nn.LeakyReLU, net_arch=[512, 512])
# Instantiate the agent
model = PPO('MlpPolicy', env,learning_rate=0.0003,policy_kwargs=policy_kwargs, verbose=1)
# Train the agent
for i in range(8000):
print("Training itteration ",i)
model.learn(total_timesteps=10000)
# Save the agent
model.save("ppo_Ant")
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=5)
print("mean_reward ", mean_reward)
if mean_reward >= 270:
print("***Agent Trained with average reward ", mean_reward)
break
del model # delete trained model to demonstrate loading
# Load the trained agent
# model = PPO.load("ppo_Ant")
# Evaluate the agent
# mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)
# Enjoy trained agent
# obs = env.reset()
# for i in range(100):
# action, _states = model.predict(obs, deterministic=True)
# obs, rewards, dones, info = env.step(action)
# env.render()
| 31.209302 | 90 | 0.707899 | 185 | 1,342 | 4.972973 | 0.513514 | 0.065217 | 0.043478 | 0.023913 | 0.134783 | 0.134783 | 0.134783 | 0.134783 | 0.134783 | 0.134783 | 0 | 0.030994 | 0.182563 | 1,342 | 42 | 91 | 31.952381 | 0.807657 | 0.391207 | 0 | 0 | 0 | 0 | 0.133421 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.277778 | 0 | 0.277778 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
faffca2d14779955ce62c87fc83b42efbcaf3c11 | 5,886 | py | Python | examples/_bak/testServer2.py | ProkopHapala/ProbeParticleModel | 1afbd32cbf68440d71c2ee53f2066c898a00ae23 | [
"MIT"
] | 26 | 2015-10-21T21:02:03.000Z | 2021-11-17T11:40:28.000Z | examples/_bak/testServer2.py | ProkopHapala/ProbeParticleModel | 1afbd32cbf68440d71c2ee53f2066c898a00ae23 | [
"MIT"
] | 9 | 2015-10-26T14:11:25.000Z | 2021-06-23T10:04:11.000Z | examples/_bak/testServer2.py | ProkopHapala/ProbeParticleModel | 1afbd32cbf68440d71c2ee53f2066c898a00ae23 | [
"MIT"
] | 20 | 2015-07-13T14:39:59.000Z | 2020-12-03T12:52:36.000Z | #!/usr/bin/python
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import elements
#print dir( elements )
import basUtils
print(" # ========== make & load ProbeParticle C++ library ")
def makeclean( ):
import os
[ os.remove(f) for f in os.listdir(".") if f.endswith(".so") ]
[ os.remove(f) for f in os.listdir(".") if f.endswith(".o") ]
[ os.remove(f) for f in os.listdir(".") if f.endswith(".pyc") ]
#makeclean( ) # force to recompile
import ProbeParticle as PP
print(" # ========== server interface file I/O ")
PP.loadParams( 'params.ini' )
print(" # ============ define atoms ")
#bas = basUtils.loadBas('surf.bas')[0]
#bas = basUtils.loadBas('PTCDA_Ruslan_1x1.bas')[0]
#bas = basUtils.loadBas('GrN6x6.bas')[0]
atoms = basUtils.loadAtoms('input.xyz')
Rs = np.array([atoms[1],atoms[2],atoms[3]]);
iZs = np.array( atoms[0])
if not PP.params['PBC' ]:
print(" NO PBC => autoGeom ")
PP.autoGeom( Rs, shiftXY=True, fitCell=True, border=3.0 )
print(" NO PBC => params[ 'gridA' ] ", PP.params[ 'gridA' ])
print(" NO PBC => params[ 'gridB' ] ", PP.params[ 'gridB' ])
print(" NO PBC => params[ 'gridC' ] ", PP.params[ 'gridC' ])
print(" NO PBC => params[ 'scanMin' ] ", PP.params[ 'scanMin' ])
print(" NO PBC => params[ 'scanMax' ] ", PP.params[ 'scanMax' ])
#Rs[0] += PP.params['moleculeShift' ][0] # shift molecule so that we sample reasonable part of potential
#Rs[1] += PP.params['moleculeShift' ][1]
#Rs[2] += PP.params['moleculeShift' ][2]
Rs = np.transpose( Rs, (1,0) ).copy()
Qs = np.array( atoms[4] )
if PP.params['PBC' ]:
iZs,Rs,Qs = PP.PBCAtoms( iZs, Rs, Qs, avec=PP.params['gridA'], bvec=PP.params['gridB'] )
print("shape( Rs )", np.shape( Rs ));
#print "Rs : ",Rs
print(" # ============ define Scan and allocate arrays - do this before simulation, in case it will crash ")
dz = PP.params['scanStep'][2]
zTips = np.arange( PP.params['scanMin'][2], PP.params['scanMax'][2]+0.00001, dz )[::-1];
ntips = len(zTips);
print(" zTips : ",zTips)
rTips = np.zeros((ntips,3))
rs = np.zeros((ntips,3))
fs = np.zeros((ntips,3))
rTips[:,0] = 1.0
rTips[:,1] = 1.0
rTips[:,2] = zTips
PP.setTip()
xTips = np.arange( PP.params['scanMin'][0], PP.params['scanMax'][0]+0.00001, 0.1 )
yTips = np.arange( PP.params['scanMin'][1], PP.params['scanMax'][1]+0.00001, 0.1 )
extent=( xTips[0], xTips[-1], yTips[0], yTips[-1] )
fzs = np.zeros(( len(zTips), len(yTips ), len(xTips ) ));
nslice = 10;
atomTypesFile = os.path.dirname(sys.argv[0]) + '/../code/defaults/atomtypes.ini'
FFparams = PP.loadSpecies( atomTypesFile )
C6,C12 = PP.getAtomsLJ( PP.params['probeType'], iZs, FFparams )
print(" # ============ define Grid ")
cell =np.array([
PP.params['gridA'],
PP.params['gridB'],
PP.params['gridC'],
]).copy()
gridN = PP.params['gridN']
FF = np.zeros( (gridN[2],gridN[1],gridN[0],3) )
#quit()
# ==============================================
# The costly part of simulation starts here
# ==============================================
print(" # =========== Sample LenardJones ")
PP.setFF( FF, cell )
PP.setFF_Pointer( FF )
PP.getLenardJonesFF( Rs, C6, C12 )
plt.figure(figsize=( 5*nslice,5 )); plt.title( ' FF LJ ' )
'''
for i in range(nslice):
plt.subplot( 1, nslice, i+1 )
plt.imshow( FF[i,:,:,2], origin='lower', interpolation='nearest' )
'''
withElectrostatics = ( abs( PP.params['charge'] )>0.001 )
if withElectrostatics:
print(" # =========== Sample Coulomb ")
FFel = np.zeros( np.shape( FF ) )
CoulombConst = -14.3996448915; # [ e^2 eV/A ]
Qs *= CoulombConst
#print Qs
PP.setFF_Pointer( FFel )
PP.getCoulombFF ( Rs, Qs )
plt.figure(figsize=( 5*nslice,5 )); plt.title( ' FFel ' )
'''
for i in range(nslice):
plt.subplot( 1, nslice, i+1 )
plt.imshow( FFel[i,:,:,2], origin='lower', interpolation='nearest' )
'''
FF += FFel*PP.params['charge']
PP.setFF_Pointer( FF )
del FFel
'''
plt.figure(figsize=( 5*nslice,5 )); plt.title( ' FF total ' )
for i in range(nslice):
plt.subplot( 1, nslice, i+1 )
plt.imshow( FF[i,:,:,2], origin='lower', interpolation='nearest' )
'''
print(" # ============ Relaxed Scan 3D ")
for ix,x in enumerate( xTips ):
print("relax ix:", ix)
rTips[:,0] = x
for iy,y in enumerate( yTips ):
rTips[:,1] = y
itrav = PP.relaxTipStroke( rTips, rs, fs ) / float( len(zTips) )
fzs[:,iy,ix] = fs[:,2].copy()
#print itrav
#if itrav > 100:
# print " bad convergence > %i iterations per pixel " % itrav
# print " exiting "
# break
print(" # ============ convert Fz -> df ")
dfs = PP.Fz2df( fzs, dz = dz, k0 = PP.params['kCantilever'], f0=PP.params['f0Cantilever'], n=int(PP.params['Amplitude']/dz) )
print(" # ============ Plot Relaxed Scan 3D ")
#slices = range( PP.params['plotSliceFrom'], PP.params['plotSliceTo'], PP.params['plotSliceBy'] )
#print "plotSliceFrom, plotSliceTo, plotSliceBy : ", PP.params['plotSliceFrom'], PP.params['plotSliceTo'], PP.params['plotSliceBy']
#print slices
#nslice = len( slices )
slices = list(range( 0, len(dfs)))
for ii,i in enumerate(slices):
print(" plotting ", i)
plt.figure( figsize=( 10,10 ) )
plt.imshow( dfs[i], origin='lower', interpolation=PP.params['imageInterpolation'], cmap=PP.params['colorscale'], extent=extent )
# z = zTips[i] - PP.params['moleculeShift' ][2]
z = zTips[i]
plt.colorbar();
plt.xlabel(r' Tip_x $\AA$')
plt.ylabel(r' Tip_y $\AA$')
plt.title( r"df Tip_z = %2.2f $\AA$" %z )
plt.savefig( 'df_%04i.png' %i, bbox_inches='tight' )
print(" ***** ALL DONE ***** ")
#plt.show()
| 30.030612 | 132 | 0.578661 | 803 | 5,886 | 4.229141 | 0.298879 | 0.089517 | 0.017668 | 0.023557 | 0.201413 | 0.168139 | 0.158422 | 0.158422 | 0.148999 | 0.128975 | 0 | 0.028101 | 0.195889 | 5,886 | 195 | 133 | 30.184615 | 0.689415 | 0.185525 | 0 | 0.039216 | 0 | 0 | 0.23345 | 0.007251 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009804 | false | 0 | 0.088235 | 0 | 0.098039 | 0.205882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f019e58d978897b9b1cc1a2f377de443998fabc | 820 | py | Python | create_table.py | juliachn/astronaut-db | ac1e1df5d109b5975548d8578f535ca8b2bedf39 | [
"MIT"
] | null | null | null | create_table.py | juliachn/astronaut-db | ac1e1df5d109b5975548d8578f535ca8b2bedf39 | [
"MIT"
] | null | null | null | create_table.py | juliachn/astronaut-db | ac1e1df5d109b5975548d8578f535ca8b2bedf39 | [
"MIT"
] | null | null | null | """create table in postgresql"""
from config import config
import psycopg2
def create_tables():
sql = """CREATE TABLE astronauts_in_space (name VARCHAR(255) NOT NULL,
craft VARCHAR(20) NOT NULL)"""
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the create_table statement
cur.execute(sql)
# close communication with the db
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
create_tables() | 23.428571 | 75 | 0.608537 | 94 | 820 | 5.170213 | 0.56383 | 0.067901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014011 | 0.303659 | 820 | 35 | 76 | 23.428571 | 0.837128 | 0.235366 | 0 | 0 | 0 | 0 | 0.165316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.15 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f02a1b01f31096708b76acaf22f14eb038ba6d3 | 2,025 | py | Python | tests/fitness/observation_fitness_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 1 | 2019-05-08T14:53:27.000Z | 2019-05-08T14:53:27.000Z | tests/fitness/observation_fitness_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 2 | 2020-08-26T09:16:47.000Z | 2020-10-30T16:47:03.000Z | tests/fitness/observation_fitness_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | null | null | null | import numpy as np
from auxein.population import build_individual
from auxein.fitness.observation_based import ObservationBasedFitness, MultipleLinearRegression, MaximumLikelihood
def test_multiple_linear_regression():
xs = np.array([[23], [26], [30], [34], [43], [48], [52], [57], [58]])
y = np.array([651, 762, 856, 1063, 1190, 1298, 1421, 1440, 1518])
i = build_individual([23.42, 167.68], [])
fitness_function = MultipleLinearRegression(xs, y)
assert np.isclose(fitness_function.fitness(i), -18804)
def test_fitness_landscape():
class TestFitnessFunction(ObservationBasedFitness):
def fitness(self, individual):
return individual.genotype.dna[0] + individual.genotype.dna[1]
def value(self, individual, x):
pass
fitness_function = TestFitnessFunction()
landscape = fitness_function.get_landscape([[-1, 1], [0, 1]], 3)
assert len(landscape) == 9
for e in landscape:
assert len(e) == 3
expected = [[-1, 0, -1], [0, 0, 0], [1, 0, 1], [-1, 0.5, -0.5], [0, 0.5, 0.5], [1, 0.5, 1.5], [-1, 1, 0], [0, 1, 1], [1, 1, 2]]
assert np.array_equal(landscape, expected)
# Classic example with students and time spent studying
# from: https://en.wikipedia.org/wiki/Logistic_regression
def test_maximum_likelihood_value():
xs = np.array([[0.50], [0.75], [1.00], [1.25], [1.50], [1.75], [1.75], [2.00], [2.25], [2.50], [2.75], [3.00], [3.25], [3.50], [4.00], [4.25], [4.50], [4.75], [5.00], [5.50]])
y = np.array([0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])
fitness_function = MaximumLikelihood(xs, y)
i = build_individual([-4.0777, 1.5046])
assert np.isclose(fitness_function.value(i, [1]), 0.07, atol=0.01)
assert np.isclose(fitness_function.value(i, [2]), 0.26, atol=0.01)
assert np.isclose(fitness_function.value(i, [3]), 0.61, atol=0.01)
assert np.isclose(fitness_function.value(i, [4]), 0.87, atol=0.01)
assert np.isclose(fitness_function.value(i, [5]), 0.97, atol=0.01)
| 42.1875 | 179 | 0.628642 | 323 | 2,025 | 3.863777 | 0.312694 | 0.020833 | 0.016827 | 0.105769 | 0.217147 | 0.184295 | 0.184295 | 0.155449 | 0.137821 | 0.137821 | 0 | 0.139005 | 0.175802 | 2,025 | 47 | 180 | 43.085106 | 0.608748 | 0.053827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.28125 | 1 | 0.15625 | false | 0.03125 | 0.09375 | 0.03125 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f03bab4ed15742dd70074c8d48e92b23ae5cb42 | 418 | py | Python | server/accounts/urls.py | Vamshi399/project_management_tool | 658af1c87b18d5e76e34c0b48ccf418c9f353423 | [
"Apache-2.0"
] | null | null | null | server/accounts/urls.py | Vamshi399/project_management_tool | 658af1c87b18d5e76e34c0b48ccf418c9f353423 | [
"Apache-2.0"
] | null | null | null | server/accounts/urls.py | Vamshi399/project_management_tool | 658af1c87b18d5e76e34c0b48ccf418c9f353423 | [
"Apache-2.0"
] | 1 | 2021-05-12T19:08:52.000Z | 2021-05-12T19:08:52.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url('api/users', views.UserCreate.as_view(), name='account-create'),
url('tasks', views.TaskCreate.as_view(), name='tasks-create'),
url('tasks2', views.Task2Create.as_view(), name='tasks-create'),
url('project', views.ProjectCreate.as_view(), name='project-create'),
url('role', views.RoleView.as_view(), name='role-view'),
]
| 32.153846 | 73 | 0.684211 | 56 | 418 | 5.017857 | 0.428571 | 0.106762 | 0.177936 | 0.106762 | 0.170819 | 0.170819 | 0 | 0 | 0 | 0 | 0 | 0.005435 | 0.119617 | 418 | 12 | 74 | 34.833333 | 0.758152 | 0 | 0 | 0 | 0 | 0 | 0.220096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f06558b33e9c97342f7c648ce6deb597ed13453 | 1,739 | py | Python | highbrow/posts/routes.py | ishmamt/highbrow | 4fb8523d8c4cbc7d66bfecb61f40b75beca55eb3 | [
"MIT"
] | null | null | null | highbrow/posts/routes.py | ishmamt/highbrow | 4fb8523d8c4cbc7d66bfecb61f40b75beca55eb3 | [
"MIT"
] | null | null | null | highbrow/posts/routes.py | ishmamt/highbrow | 4fb8523d8c4cbc7d66bfecb61f40b75beca55eb3 | [
"MIT"
] | null | null | null | from flask import render_template, url_for, redirect, request, Blueprint
from highbrow.posts.forms import PostForm
from highbrow.posts.utils import fetch_post, create_comment, fetch_comments, like_unlike_post
from highbrow.utils import fetch_notifications, if_is_liked, if_is_saved
from flask_login import current_user
posts = Blueprint('posts', __name__) # similar to app = Flask(__name__)
@posts.route("/post/<string:post_id>", methods=["GET", "POST"])
def post(post_id):
post = fetch_post(post_id)
comments = fetch_comments(post_id)
notifications = fetch_notifications(current_user.username)
is_liked = if_is_liked(current_user.username, post_id)
is_saved = if_is_saved(current_user.username, post_id)
profile_picture = url_for('static', filename='profile_pictures/' + current_user.profile_picture)
comment_form = PostForm()
if comment_form.validate_on_submit() and request.method == "POST":
create_comment(current_user.username, post["link"], post["username"], comment_form.comment.data)
return redirect(url_for('posts.post', post_id=post_id))
return render_template("post.html", comment_form=comment_form, comments=comments,
number_of_comments=post["comments"], post_details=post, notifications=notifications,
current_user=current_user.username, is_liked=is_liked, is_saved=is_saved, profile_picture=profile_picture)
@posts.route("/post/like/<string:notified_user>/<string:notifying_user>/<string:post_id>/<string:is_liked>")
def like_post(notifying_user, notified_user, post_id, is_liked):
like_unlike_post(notifying_user, notified_user, post_id, is_liked)
return redirect(url_for("posts.post", post_id=post_id))
| 56.096774 | 133 | 0.761357 | 240 | 1,739 | 5.154167 | 0.254167 | 0.063056 | 0.076799 | 0.033953 | 0.216653 | 0.134196 | 0.134196 | 0.134196 | 0.134196 | 0.066289 | 0 | 0 | 0.130535 | 1,739 | 30 | 134 | 57.966667 | 0.818122 | 0.018401 | 0 | 0 | 0 | 0.04 | 0.118475 | 0.066862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.2 | 0 | 0.4 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f0bb6996176372b6cd900111be1f18d7f46c073 | 2,319 | py | Python | configs/ufld/resnet18_tusimple.py | AbdulFMS/lanedet | 32f258437f1852a115d194a3c195913a7d65240c | [
"Apache-2.0"
] | 277 | 2021-04-16T02:49:52.000Z | 2022-03-31T06:25:11.000Z | configs/ufld/resnet18_tusimple.py | ibaiGorordo/lanedet | 210709066886e4906c8d3ab4d9173785ef07c65d | [
"Apache-2.0"
] | 40 | 2021-05-17T08:31:48.000Z | 2022-03-31T11:39:21.000Z | configs/ufld/resnet18_tusimple.py | ibaiGorordo/lanedet | 210709066886e4906c8d3ab4d9173785ef07c65d | [
"Apache-2.0"
] | 47 | 2021-04-16T07:18:53.000Z | 2022-03-17T03:13:21.000Z | net = dict(
type='Detector',
)
backbone = dict(
type='ResNetWrapper',
resnet='resnet18',
pretrained=True,
replace_stride_with_dilation=[False, False, False],
out_conv=False,
)
featuremap_out_channel = 512
griding_num = 100
num_classes = 6
heads = dict(type='LaneCls',
dim = (griding_num + 1, 56, num_classes))
trainer = dict(
type='LaneCls'
)
evaluator = dict(
type='Tusimple',
)
import math
scheduler = dict(
type = 'LambdaLR',
lr_lambda = lambda _iter : math.pow(1 - _iter/total_iter, 0.9)
)
optimizer = dict(
type = 'SGD',
lr = 0.025,
weight_decay = 1e-4,
momentum = 0.9
)
epochs = 150
batch_size = 4
total_iter = (3616 // batch_size + 1) * epochs
import math
scheduler = dict(
type = 'LambdaLR',
lr_lambda = lambda _iter : math.pow(1 - _iter/total_iter, 0.9)
)
img_norm = dict(
mean=[103.939, 116.779, 123.68],
std=[1., 1., 1.]
)
ori_img_h = 720
ori_img_w = 1280
img_h = 288
img_w = 800
cut_height=0
sample_y = range(710, 150, -10)
dataset_type = 'TuSimple'
dataset_path = './data/tusimple'
row_anchor = 'tusimple_row_anchor'
train_process = [
dict(type='RandomRotation', degree=(-6, 6)),
dict(type='RandomUDoffsetLABEL', max_offset=100),
dict(type='RandomLROffsetLABEL', max_offset=200),
dict(type='GenerateLaneCls', row_anchor=row_anchor,
num_cols=griding_num, num_classes=num_classes),
dict(type='Resize', size=(img_w, img_h)),
dict(type='Normalize', img_norm=img_norm),
dict(type='ToTensor', keys=['img', 'cls_label']),
]
val_process = [
dict(type='Resize', size=(img_w, img_h)),
dict(type='Normalize', img_norm=img_norm),
dict(type='ToTensor', keys=['img']),
]
dataset = dict(
train=dict(
type=dataset_type,
data_root=dataset_path,
split='trainval',
processes=train_process,
),
val=dict(
type=dataset_type,
data_root=dataset_path,
split='test',
processes=val_process,
),
test=dict(
type=dataset_type,
data_root=dataset_path,
split='test',
processes=val_process,
)
)
workers = 12
ignore_label = 255
log_interval = 100
eval_ep = 1
save_ep = epochs
row_anchor='tusimple_row_anchor'
test_json_file='data/tusimple/test_label.json'
lr_update_by_epoch = False
| 20.522124 | 66 | 0.651574 | 317 | 2,319 | 4.514196 | 0.388013 | 0.1174 | 0.023061 | 0.039832 | 0.37666 | 0.340321 | 0.340321 | 0.340321 | 0.340321 | 0.310273 | 0 | 0.05117 | 0.207848 | 2,319 | 112 | 67 | 20.705357 | 0.727817 | 0 | 0 | 0.270833 | 0 | 0 | 0.131091 | 0.012505 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.020833 | 0 | 0.020833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f0c86d2283f684984e36164a05189394bd02f11 | 334 | py | Python | Experiencing-Python/HackerRank/Capitalize!.py | ar-pavel/Code-Library | 2d1b952231c1059bbf98d85d2c23fd8fb21b455c | [
"MIT"
] | null | null | null | Experiencing-Python/HackerRank/Capitalize!.py | ar-pavel/Code-Library | 2d1b952231c1059bbf98d85d2c23fd8fb21b455c | [
"MIT"
] | null | null | null | Experiencing-Python/HackerRank/Capitalize!.py | ar-pavel/Code-Library | 2d1b952231c1059bbf98d85d2c23fd8fb21b455c | [
"MIT"
] | null | null | null | def solve(s):
t=" "
c = ""
for i in s:
if t == ' ':
c += i.upper()
else:
c += i
t = i
return c
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
| 15.181818 | 47 | 0.398204 | 42 | 334 | 2.952381 | 0.619048 | 0.096774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.434132 | 334 | 21 | 48 | 15.904762 | 0.656085 | 0 | 0 | 0 | 0 | 0 | 0.071856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f0f3484a16e736e3a84e167b7d1c89961787b7d | 4,429 | py | Python | overlay.py | blazejmanczak/AoM-LineMatching | 1c81fd1dd396e3cc120d5bab388acc92181e2881 | [
"Apache-2.0"
] | 2 | 2021-03-01T06:18:13.000Z | 2021-05-18T11:33:30.000Z | overlay.py | blazejmanczak/ArtifactsOfMemory | 1c81fd1dd396e3cc120d5bab388acc92181e2881 | [
"Apache-2.0"
] | null | null | null | overlay.py | blazejmanczak/ArtifactsOfMemory | 1c81fd1dd396e3cc120d5bab388acc92181e2881 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present, Netherlands Institute for Sound and Vision (Blazej Manczak)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from matchingMethods import all_in_one
import argparse
import os
import pandas as pd
from PIL import Image
import time
def print_config():
"""
Prints all entries in config variable.
"""
print("[INFO]: Overlaying with follwoing parameters ...")
for key, value in vars(config).items():
print(key + ' : ' + str(value))
def overaly(config):
"""Performs the overlaying"""
print("[INFO]: Loading in the pickeled data ... ")
img_names = os.listdir(config.path_dir, )
img_paths =[os.path.join(config.path_dir, name) for name in img_names]
data = pd.read_pickle(config.data_directory)
non_zero_objects_dic = pd.read_pickle(config.non_zero_objects_dic_directory)
threshold, minLineLength, maxLineGap = [int(param) for param in config.hough_params.split(",")] # parse hough parameters
start_time = time.time()
count = 0
for img_path in img_paths:
try:
img_array = all_in_one(path = img_path, data = data, non_zero_objects_dic = non_zero_objects_dic ,num_lines = config.num_lines, normalizing_stats=[71.73, 26.70, 254.71, 94.19],
params_hough={"threshold": threshold, "minLineLength": minLineLength, "maxLineGap": maxLineGap})
im = Image.fromarray(img_array)
im.save(os.path.join(config.save_dir ,"overlayed_" + img_path.split("/")[-1]))
count += 1
except Exception as e: print("Overlaying failed for path {} with exception {} ".format(img_path, e))
end_time = time.time()
print("[INFO]: overalying and saving took on average {} seconds per query image".format(round((end_time-start_time)/count,4)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path_dir",
required=False,
type=str,
default = "frames/contemporary" ,
help="Directory containing images on which the matching should be done. All images in the directory will be matched. The directory should contain only images.")
parser.add_argument("--save_dir",
required=False,
type=str,
default="frames/outputs",
help="Directory where the overlayed images should be stored.")
parser.add_argument("--data_directory",
required = False,
type = str,
default = "data/data.pkl",
help = "Diectory to a pickle file of the processed archives")
parser.add_argument("--non_zero_objects_dic_directory",
required=False,
type=str,
default="data/non_zero_object_dic.pickle",
help="Diectory to a pickle file of the processed matchingObjects that contain a line")
parser.add_argument("--num_lines",
required=False,
type=int,
default=1,
help="How many lines should be overlayed? If num_lines bigger than matches, all matches are overlayed.")
parser.add_argument("--hough_params",
required=False,
type=str,
default="200,150,25",
help="What parameters to use for line detection? Argument is expected to be a string of integers seperated by a comma. \
Consecutive ints stand for threshold, minLineLength and maxLineGap respectively.")
config = parser.parse_args()
print_config()
overaly(config)
print("[INFO]: Overlaying successful!") | 43 | 188 | 0.611199 | 528 | 4,429 | 4.988636 | 0.405303 | 0.022779 | 0.038724 | 0.03227 | 0.113895 | 0.08732 | 0.08732 | 0.029613 | 0.029613 | 0 | 0 | 0.011931 | 0.280876 | 4,429 | 103 | 189 | 43 | 0.815071 | 0.156017 | 0 | 0.166667 | 0 | 0.030303 | 0.249793 | 0.01737 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.121212 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
877c1d4a6e53fdc3a180cca9ff5ee364a4423ed1 | 2,392 | py | Python | mabel/data/formats/json.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | null | null | null | mabel/data/formats/json.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | 287 | 2021-05-14T21:25:26.000Z | 2022-03-30T12:02:51.000Z | mabel/data/formats/json.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | 1 | 2021-04-29T18:18:20.000Z | 2021-04-29T18:18:20.000Z | """
Create .serialize and .parse methods to handle json operations
Where orjson is installed, the performance impact is nil, without orjson, parsing is
about as fast as ujson, however serialization is slower, although still faster than the
native json library.
"""
from typing import Any, Union
import datetime
try:
# if orjson is available, use it
import orjson
parse = orjson.loads
def serialize(
obj: Any, indent: bool = False, as_bytes: bool = False
) -> Union[str, bytes]:
if as_bytes:
if indent and isinstance(obj, dict):
return orjson.dumps(
obj, option=orjson.OPT_INDENT_2 + orjson.OPT_SORT_KEYS
)
else:
return orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
# return a string
if indent and isinstance(obj, dict):
return orjson.dumps(
obj, option=orjson.OPT_INDENT_2 + orjson.OPT_SORT_KEYS
).decode()
else:
return orjson.dumps(obj, option=orjson.OPT_SORT_KEYS).decode()
except ImportError: # pragma: no cover
# orjson doesn't install on 32bit systems so we need a backup plan
# however, orjson and ujson have functional differences so we can't
# just swap the references.
import ujson
def serialize(
obj: Any, indent: bool = False, as_bytes: bool = False
) -> Union[str, bytes]: # type:ignore
def fix_fields(dt: Any) -> str:
"""
orjson and ujson handles some fields differently,
if one of those fields is detected, fix it.
"""
if isinstance(dt, (datetime.date, datetime.datetime)):
return dt.isoformat()
if isinstance(dt, dict):
return fix_fields(dt)
return dt
if isinstance(obj, dict):
obj_copy = {k: fix_fields(v) for k, v in obj.items()}
else:
obj_copy = obj
if as_bytes:
if indent:
return ujson.dumps(obj_copy, sort_keys=True, indent=2).encode()
else:
return ujson.dumps(obj_copy, sort_keys=True).encode()
if indent:
return ujson.dumps(obj_copy, sort_keys=True, indent=2)
else:
return ujson.dumps(obj_copy, sort_keys=True)
parse = ujson.loads # type:ignore
| 32.324324 | 87 | 0.5949 | 303 | 2,392 | 4.60066 | 0.359736 | 0.045911 | 0.04878 | 0.057389 | 0.430416 | 0.408895 | 0.408895 | 0.408895 | 0.408895 | 0.352941 | 0 | 0.003706 | 0.323161 | 2,392 | 73 | 88 | 32.767123 | 0.857319 | 0.25 | 0 | 0.456522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.108696 | 0 | 0.413043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
877cb1febc5d5fc104596b7224e3703948c81dbb | 15,191 | py | Python | autograder.py | B1ACK917/2021HWAutoGrader | a76663bc596a1e6b190a29d7aa38c974dd536819 | [
"MIT"
] | 81 | 2021-03-13T04:16:27.000Z | 2021-05-13T03:17:48.000Z | autograder.py | wslyw/2021HWAutoGrader | a76663bc596a1e6b190a29d7aa38c974dd536819 | [
"MIT"
] | 12 | 2021-03-14T11:32:06.000Z | 2021-03-26T05:28:26.000Z | autograder.py | wslyw/2021HWAutoGrader | a76663bc596a1e6b190a29d7aa38c974dd536819 | [
"MIT"
] | 30 | 2021-03-14T15:28:18.000Z | 2022-03-26T01:47:08.000Z | """
@Version 0.3.3
@Author B1ACK917
@Contributor YuanWind
"""
import json
import os
from tqdm import tqdm
import time
import copy
import matplotlib.pyplot as plt
from genHTML import gen
import platform
def check_bomb(server, VMList, serverDict, VMDict, serverIDMap, VMIDMap, vmid2node):
"""
判断一个服务器是否发生资源溢出
:param server: 服务器ID
:param VMList: 服务器上挂载的虚拟机
:param serverDict: 服务器型号到服务器详细信息的映射
:param VMDict: 虚拟机ID到虚拟机详细信息的映射
:param serverIDMap: 从服务器ID到服务器型号的映射
:param VMIDMap: 虚拟机ID到虚拟机型号的映射
:param vmid2node: 虚拟机节点信息
:return:
"""
serverCPU, serverMEM = serverDict[serverIDMap[server]]['cpu'], serverDict[serverIDMap[server]]['memory']
serverCPU_A, serverMEM_A = serverCPU / 2, serverMEM / 2
serverCPU_B, serverMEM_B = serverCPU / 2, serverMEM / 2
for VM in VMList:
node = vmid2node[VM]
if node == 'A':
serverCPU_A -= VMDict[VMIDMap[VM]]['cpu']
serverMEM_A -= VMDict[VMIDMap[VM]]['memory']
elif node == 'B':
serverCPU_B -= VMDict[VMIDMap[VM]]['cpu']
serverMEM_B -= VMDict[VMIDMap[VM]]['memory']
elif node == None:
serverCPU_A -= VMDict[VMIDMap[VM]]['cpu'] / 2
serverMEM_A -= VMDict[VMIDMap[VM]]['memory'] / 2
serverCPU_B -= VMDict[VMIDMap[VM]]['cpu'] / 2
serverMEM_B -= VMDict[VMIDMap[VM]]['memory'] / 2
if serverCPU_A < 0 or serverMEM_A < 0 or serverCPU_B < 0 or serverMEM_B < 0:
return False
return True
def grader(testCmd, ioData):
vmid2node = {}
serverDict = {}
VMDict = {}
operateInfo = []
"""
所有处理后的数据将存放在下文中的fullInfo中,每一个字段在后面的注释都有说明。
"""
with open(ioData) as file:
"""
从输入文件中获取服务器信息和每一天的操作序列
operateInfo中按天存储了当天的操作序列
"""
serverNums = eval(file.readline())
for i in range(serverNums):
_type, _cpus, _mem, _hardCost, _energyCost = file.readline()[1:-2].split(',')
serverDict.update({_type: {'cpu': eval(_cpus),
'memory': eval(_mem),
'hardCost': eval(_hardCost),
'energyCost': eval(_energyCost)}})
VMNums = eval(file.readline())
for i in range(VMNums):
_type, _cpus, _mem, doubleNode = file.readline()[1:-2].split(',')
VMDict.update({_type: {'cpu': eval(_cpus),
'memory': eval(_mem),
'double': eval(doubleNode)}})
days = eval(file.readline())
for i in range(days):
dayOperateInfo = {'operate': []}
nums = eval(file.readline())
for j in range(nums):
tmp = file.readline().strip()[1:-1]
if tmp[:3] == 'add':
_op, _, _id = tmp.split(',')
dayOperateInfo['operate'].append((_op, eval(_id), _))
else:
_op, _id = tmp.split(',')
dayOperateInfo['operate'].append((_op, eval(_id)))
operateInfo.append(dayOperateInfo)
beginTime = time.perf_counter()
result = os.popen(testCmd)
result = result.read().strip().split('\n')
endTime = time.perf_counter() # 计时器
fullInfo = []
for i in range(len(result)):
if 'purchase' in result[i]: # 整理每天的购买信息
singleDayInfo = {'purchase': {}}
_, serverBought = result[i][1:-1].split(',')
serverBought = eval(serverBought.strip())
for j in range(i + 1, i + serverBought + 1):
serverName, serverNum = result[j][1:-1].split(',')
singleDayInfo['purchase'].update({serverName: eval(serverNum)})
fullInfo.append(singleDayInfo)
elif 'migration' in result[i]: # 整理每天的迁移信息
migrationInfo = {'migration': []}
_, migrationNum = result[i][1:-1].split(',')
migrationNum = eval(migrationNum.strip())
for j in range(i + 1, i + migrationNum + 1):
sp = result[j][1:-1].split(',')
if len(sp) == 2:
sourceID, targetID = sp
migrationInfo['migration'].append((eval(sourceID), (eval(targetID), None)))
else:
sourceID, targetID, targetNode = sp
migrationInfo['migration'].append((eval(sourceID), (eval(targetID), targetNode)))
fullInfo[-1].update(migrationInfo)
requestInfo = {'request': []}
for j in range(i + migrationNum + 1, len(result)): # 超过迁移行数以后的部分被认为是部署信息,直到遇到purchase为止
if 'purchase' in result[j]:
break
sp = result[j][1:-1].split(',')
if len(sp) == 1:
serverID, serverNode = sp[0], None
requestInfo['request'].append((eval(serverID), serverNode))
else:
serverID, serverNode = sp
requestInfo['request'].append((eval(serverID), serverNode.strip()))
fullInfo[-1].update(requestInfo)
for i in range(len(fullInfo)):
fullInfo[i].update(operateInfo[i])
serverIDMap = {}
IDInd = 0
dayServerInfo = {}
VMIDMap = {}
VMIDTypeMap = {}
migTot = 0
migHappenTime = []
bombInfo = []
migOverInfo = []
for day_i in range(len(fullInfo)):
day = fullInfo[day_i]
for server in day['purchase']:
for i in range(day['purchase'][server]):
serverIDMap.update({IDInd: server})
dayServerInfo[IDInd] = []
IDInd += 1
cnt = 0
for mig in day['migration']:
try:
cnt += 1
source = mig[0]
target = mig[1][0]
vmid2node[source] = mig[1][1]
dayServerInfo[VMIDMap[source]].remove(source)
dayServerInfo[target].append(source)
VMIDMap[source] = target
migTot += 1
if cnt > int(len(VMIDMap) / 200):
migOverInfo.append(('migOverflow', day_i + 1, cnt, mig, int(len(VMIDMap) / 200)))
if not check_bomb(target, dayServerInfo[target], serverDict, VMDict, serverIDMap, VMIDTypeMap,
vmid2node):
bombInfo.append(
('Migration', day_i + 1, cnt, mig, target, serverIDMap[target], serverDict[serverIDMap[target]],
dayServerInfo[target][-5 if len(
dayServerInfo[target]) > 5 else -len(dayServerInfo[target]):]))
except KeyError:
raise RuntimeError(('migration error', mig))
migHappenTime.append(cnt)
opInd = 0
for op in day['operate']:
if op[0] == 'add':
try:
dayServerInfo[day['request'][opInd][0]].append(op[1])
VMIDMap[op[1]] = day['request'][opInd][0]
vmid2node[op[1]] = day['request'][opInd][1]
VMIDTypeMap[op[1]] = op[2].strip()
if not check_bomb(day['request'][opInd][0], dayServerInfo[day['request'][opInd][0]], serverDict,
VMDict, serverIDMap, VMIDTypeMap, vmid2node):
bombInfo.append(('Add', day_i + 1, opInd + 1, op, day['request'][opInd][0],
serverIDMap[day['request'][opInd][0]],
serverDict[serverIDMap[day['request'][opInd][0]]],
dayServerInfo[day['request'][opInd][0]][-5 if len(
dayServerInfo[day['request'][opInd][0]]) > 5 else -len(
dayServerInfo[day['request'][opInd][0]]):]))
opInd += 1
except KeyError:
raise RuntimeError(('server plant error', day['request'][opInd][0], (op[0], op[2], op[1])))
except IndexError:
raise RuntimeError(('req error', (op[0], op[2], op[1])))
else:
dayServerInfo[VMIDMap[op[1]]].remove(op[1])
vmid2node[op[1]] = 'del'
# try:
#
# except KeyError:
# raise RuntimeError(('server plant error', VMIDMap[op[1]], op))
fullInfo[day_i].update({'info': copy.deepcopy(dayServerInfo)})
"""
经过以上操作后,所有的信息都将被存储到fullInfo中,获取信息可以按照以下方式:
fullInfo['purchase']:长度为天数的一个列表,每一个元素是当天的购买信息,以键值对存储。
比如{'SERVER1':40}表示购买了40台SERVER1型号服务器。
fullInfo['migration']:长度为天数的一个列表,每一个元素是当天的迁移信息,迁移信息存储在列表中,每一个迁移操作以元组形式存储。
比如(12345,10,None)表示将12345号虚拟机迁移到10号服务器上,以双节点部署。
又比如(12345,10,A)则表示将12345号虚拟机迁移到10号服务器上的A节点。
fullInfo['operate']:长度为天数的一个列表,每一个元素是当天的操作信息,包括add和delete,存储在列表中,每一个操作信息以元组形式存储
如果元组第一个元素为'add',则元组长度为3,分别为('add',虚拟机ID,虚拟机型号)
如果元组第一个元素为'del',则元组长度为2,分别为('del',虚拟机ID)
fullInfo['request']:长度为天数的一个列表,每一个元素是当天的部署情况,以元组形式存储。
比如(4,A)表示将对应请求add的虚拟机部署到4号服务器的A节点上。
如果元组第二个元素即tuple[1]为None,则表示虚拟机双节点部署到该服务器上。
fullInfo['info']:长度为天数的一个列表,每一个元素是当天结束时服务器以及挂载在其上的虚拟机,以键值对形式存储。
比如{5:[123,321,345,654]}表示5号服务器上挂载了4台虚拟机,ID分别为123,321,345,654
"""
energyCost = []
emptyRate = []
hardCost = 0.0
serverNums = {}
for serverID, _ in dayServerInfo.items():
"""
计算硬件成本,从最后一天的服务器信息中统计。
"""
hardCost += serverDict[serverIDMap[serverID]]['hardCost']
if serverIDMap[serverID] not in serverNums:
serverNums[serverIDMap[serverID]] = 1
else:
serverNums[serverIDMap[serverID]] += 1
for day in fullInfo:
"""
计算每日运行成本。
计算方式是每天结束时如果一台服务器上有虚拟机在挂载状态,就计算一天的运行费用。
同时统计闲置率。
"""
info = day['info']
c = 0.0
inUse, empty = 0, 0
for serverID, _ in info.items():
if _:
c += serverDict[serverIDMap[serverID]]['energyCost']
inUse += 1
else:
empty += 1
energyCost.append(c)
emptyRate.append((empty / (inUse + empty)) if inUse + empty else 0)
timeFormat = '%m_%d_%H_%M_%S'
folderName = os.path.join('./resource', time.strftime(timeFormat, time.localtime(time.time())))
os.mkdir(folderName)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.plot(range(len(fullInfo)), emptyRate, label='Empty Ratio')
plt.xlabel('Days')
plt.ylabel('Ratio')
plt.title('Empty Ratio (How many servers are not in use)')
plt.legend()
plt.savefig(os.path.join(folderName, '1.png'))
plt.clf()
plt.plot(range(len(fullInfo)), energyCost, label='Energy Cost')
plt.xlabel('Days')
plt.ylabel('Money')
plt.title('Energy Cost (When a server is in use, it leads to energy cost)')
plt.legend()
plt.savefig(os.path.join(folderName, '2.png'))
plt.clf()
plt.plot(range(len(fullInfo)), migHappenTime, label='Migration Times')
plt.xlabel('Days')
plt.ylabel('Times')
plt.title('Migration Times')
plt.legend()
plt.savefig(os.path.join(folderName, '3.png'))
plt.clf()
labels = ['{}\n{}cpu\n{}mem'.format(s, serverDict[s]['cpu'], serverDict[s]['memory']) for s in serverNums.keys()]
sizes = list(serverNums.values())
plt.pie(sizes, labels=labels, autopct='%1.2f%%')
plt.title('Server Types')
plt.savefig(os.path.join(folderName, '4.png'))
plt.clf()
return os.path.split(ioData)[-1], hardCost, sum(energyCost), endTime - beginTime, sum(emptyRate) / len(
emptyRate), sum(energyCost) / len(energyCost), folderName, migTot, bombInfo, migOverInfo
if __name__ == '__main__':
if not os.path.exists('./resource'):
os.mkdir('./resource')
l = []
with open('config.json') as file:
config = json.load(file)
language = config['language']
pypyPath = config['pythonInterpreter']
exe = config['executable']
sourceCode = config['sourceCode']
javaPath = config['javaPath']
javaJARFile = config['buildJARPath']
ioDataList = config['ioData'] # 从config获取参数
print('AutoGrader Running with args: {}'.format(
[language, pypyPath, exe, sourceCode, javaPath, javaJARFile, ioDataList]))
for d in tqdm(ioDataList, ncols=40):
"""
根据语言生成测试指令testCmd
"""
if language == 'c' or language == 'c++':
testCmd = '\"{}\"<\"{}\"'.format(exe, d)
elif language == 'python':
if pypyPath:
testCmd = '\"{}\" \"{}\"<\"{}\"'.format(pypyPath, sourceCode, d)
else:
testCmd = 'python \"{}\"<\"{}\"'.format(sourceCode, d)
elif language == 'java':
filePath, JARPath = os.path.split(javaJARFile)
if javaPath:
testCmd = '\"{}\" -Djava.library.path=\"{}\" -classpath \"{}\" \"com.huawei.java.main.Main\"<\"{}\"'.format(
javaPath, filePath, javaJARFile, d)
else:
testCmd = 'java -Djava.library.path=\"{}\" -classpath \"{}\" \"com.huawei.java.main.Main\"<\"{}\"'.format(
filePath, javaJARFile, d)
else:
raise ValueError('unsupport language')
try:
_ = grader(testCmd, d)
l.append(_)
except RuntimeError as e:
if e.args[0][0] == 'server plant error':
print('服务器或虚拟机信息错误,服务器 ID 不存在')
print('发生错误的服务器ID为{},你输出的操作为{}'.format(e.args[0][1], e.args[0][2]))
elif e.args[0][0] == 'migration error':
print('虚拟机迁移错误,服务器 ID 不存在')
print('你输出的操作为{}'.format(e.args[0][1]))
elif e.args[0][0] == 'req error':
print('请求错误')
print('找不到和{}对应的服务器部署操作'.format(e.args[0][1]))
print('该问题导致分析器无法继续运行,报告中不会包含本次分析')
res = gen(l)
sys = platform.system()
if sys == 'Windows' or sys == 'windows':
os.popen('start chrome.exe {}'.format(os.path.join(os.path.dirname(os.path.abspath(__file__)), res)))
elif sys == 'Linux' or sys == 'linux':
os.popen('google-chrome {}'.format(os.path.join(os.path.dirname(os.path.abspath(__file__)), res)))
print('你正在使用Linux系统,可能无法打开网页或报错,请尝试用默认浏览器打开目录下最新生成的html或者将html和resource拷贝到Windows下查看\n')
elif sys == 'Darwin' or sys == 'darwin':
os.popen('open -a Safari {}'.format(os.path.join(os.path.dirname(os.path.abspath(__file__)), res)))
print('你正在使用Mac系统,可能无法打开网页或报错,请尝试用默认浏览器打开目录下最新生成的html或者将html和resource拷贝到Windows下查看\n')
| 42.314763 | 128 | 0.529853 | 1,489 | 15,191 | 5.348556 | 0.222968 | 0.012808 | 0.022602 | 0.022099 | 0.262557 | 0.211326 | 0.148543 | 0.111251 | 0.061652 | 0.044576 | 0 | 0.020516 | 0.32618 | 15,191 | 358 | 129 | 42.432961 | 0.757522 | 0.029425 | 0 | 0.101083 | 0 | 0.00722 | 0.116326 | 0.024787 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00722 | false | 0 | 0.028881 | 0 | 0.046931 | 0.036101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
877dbaa8ca2f80ac5b136fa5a55cc2df0dab1b58 | 1,215 | py | Python | my_life/lifeServerApiApp/App/DiaryService/diaryApi.py | CLAY-zhao/MyLife | 994e1f4b2cb20b0bd09edc95ea5ed0b09010a1e3 | [
"bzip2-1.0.6"
] | null | null | null | my_life/lifeServerApiApp/App/DiaryService/diaryApi.py | CLAY-zhao/MyLife | 994e1f4b2cb20b0bd09edc95ea5ed0b09010a1e3 | [
"bzip2-1.0.6"
] | 1 | 2022-01-15T05:36:51.000Z | 2022-01-15T05:36:51.000Z | my_life/lifeServerApiApp/App/DiaryService/diaryApi.py | CLAY-zhao/MyLife | 994e1f4b2cb20b0bd09edc95ea5ed0b09010a1e3 | [
"bzip2-1.0.6"
] | null | null | null | from ...settings.config import data
from rest_framework.views import APIView
from django.http import JsonResponse
from .order.diary_serializer import HomeDiarySerializer
from ...utils.AppFunctools import modelObject
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
class DiaryApi(APIView):
PAGE_COUNT = 6 # 每页显示n条数据
data = {
"appStatus": {
"errorCode": 0,
"errorParameter": "",
"message": "操作成功!"
},
"content": {}
}
def get(self, request):
"""首页获取日记简介的部分内容/首页不展示日记所有内容"""
diary_list = modelObject.diary_model.all()
paginator = Paginator(diary_list, self.PAGE_COUNT)
page = request.GET.get('page', 1) # 获取当前页数,None则取1
try:
diary = paginator.page(page)
except PageNotAnInteger:
# 如果page不为int类型,则返回第1页
diary = paginator.page(self.PAGE_COUNT)
except EmptyPage:
# 如果page超出获取范围,则返回最后一页
diary = paginator.page(paginator.num_pages)
diary_data = HomeDiarySerializer(instance=diary, many=True).data
self.data['content']['list'] = diary_data
return JsonResponse(self.data)
| 27.613636 | 72 | 0.634568 | 120 | 1,215 | 6.333333 | 0.5 | 0.035526 | 0.071053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005593 | 0.264198 | 1,215 | 43 | 73 | 28.255814 | 0.844519 | 0.07572 | 0 | 0 | 0 | 0 | 0.059246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8782be8e1dfda01d65552854454e5ce24b5f9726 | 637 | py | Python | .config/polybar/scripts/mail/mail.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | 3 | 2021-02-18T17:59:17.000Z | 2021-02-19T19:54:18.000Z | .config/polybar/scripts/mail/mail.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | null | null | null | .config/polybar/scripts/mail/mail.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import imaplib
path = os.path.dirname(os.path.realpath(__file__))
mail_client = imaplib.IMAP4_SSL('imap.gmail.com', '993')
if not os.path.isfile(f"{path}/config.py"):
print(f"⚠ No mail config found. Check out \"{path}/config.py\"")
exit()
else:
from config import *
mail_client.login(user, password)
def check_mails():
mail_client.select()
unread = mail_client.search(None, 'UnSeen')
return len(unread[1][0].split())
unread = check_mails()
if unread > 0 or not quiet:
if not hide_unreads:
print(prefix, unread)
else:
print(prefix)
else:
print('') | 21.233333 | 68 | 0.66248 | 95 | 637 | 4.326316 | 0.568421 | 0.097324 | 0.058394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013487 | 0.185243 | 637 | 30 | 69 | 21.233333 | 0.776493 | 0.031397 | 0 | 0.136364 | 0 | 0 | 0.119935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.136364 | 0 | 0.227273 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8783c7b5deca87958f9de465f6600ca4c5858f14 | 1,069 | py | Python | silence.py | ColesonWelles/SilencePy | 7c650d9349dd6a708c94bce9e3b69e58895dc21a | [
"MIT"
] | null | null | null | silence.py | ColesonWelles/SilencePy | 7c650d9349dd6a708c94bce9e3b69e58895dc21a | [
"MIT"
] | null | null | null | silence.py | ColesonWelles/SilencePy | 7c650d9349dd6a708c94bce9e3b69e58895dc21a | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
sys is needed for argv
pilsuc = False for importing pil
try tries to import pillow but will install via pip if it fails
pillow used for image manipulation
open method opens the base image
truetype method opens font file
if arguments passed to, join method creats text of args
else in case no args are passed to
draw method creates a draw image object using the base image
draws text
save method saves image
'''
import sys
pilsuc = False
while pilsuc == False:
try:
from PIL import Image, ImageDraw, ImageFont
pilsuc = True
except ImportError:
import subprocess
subprocess.call([sys.executable, "-m", "pip", "install", "Pillow"])
img = Image.open("img.png")
font = ImageFont.truetype("LiberationSans-Regular.ttf", 68)
if len(sys.argv) > 1:
text = "\n".join(sys.argv[1:])
else:
print("Usage: py silence.py [text for meme] - generates \"silence crab\" meme with input text")
sys.exit()
draw = ImageDraw.Draw(img)
draw.text((6,60), text, fill="white", font=font)
img.save('export.png')
| 22.744681 | 99 | 0.698784 | 163 | 1,069 | 4.582822 | 0.558282 | 0.044177 | 0.032129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00814 | 0.19551 | 1,069 | 46 | 100 | 23.23913 | 0.860465 | 0.40131 | 0 | 0 | 0 | 0 | 0.219937 | 0.041139 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8783c800b214430bdf292410b4ff190c797fe5e9 | 271 | py | Python | server.py | jackyin68/pyprofiling | 3f4180c735b80b978028cad776d30a6b99af1547 | [
"Apache-2.0"
] | 3 | 2022-01-10T13:09:45.000Z | 2022-02-27T23:26:32.000Z | server.py | jackyin68/pyprofiling | 3f4180c735b80b978028cad776d30a6b99af1547 | [
"Apache-2.0"
] | null | null | null | server.py | jackyin68/pyprofiling | 3f4180c735b80b978028cad776d30a6b99af1547 | [
"Apache-2.0"
] | 1 | 2022-02-21T15:21:24.000Z | 2022-02-21T15:21:24.000Z | import os
import sys
path = os.path.dirname(sys.path[0])
if path and path not in sys.path:
sys.path.append(path)
from flask import Flask
app = Flask("Product")
@app.route("/")
def welcome():
return "欢迎来到通达信数据分析的世界"
if __name__ == '__main__':
app.run()
| 13.55 | 35 | 0.667897 | 41 | 271 | 4.219512 | 0.560976 | 0.16185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004545 | 0.188192 | 271 | 19 | 36 | 14.263158 | 0.781818 | 0 | 0 | 0 | 0 | 0 | 0.110701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0.083333 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8784d47e997e461bc2117484da1a0b3ef1cc901a | 1,995 | py | Python | src/predectorutils/subcommands/analysis_tables.py | ccdmb/predector-utils | 68e9e72682dd73fff8d1c53969870a2e9628556e | [
"Apache-2.0"
] | null | null | null | src/predectorutils/subcommands/analysis_tables.py | ccdmb/predector-utils | 68e9e72682dd73fff8d1c53969870a2e9628556e | [
"Apache-2.0"
] | 7 | 2020-06-17T02:37:21.000Z | 2021-11-22T02:18:54.000Z | src/predectorutils/subcommands/analysis_tables.py | ccdmb/predector-utils | 68e9e72682dd73fff8d1c53969870a2e9628556e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import argparse
from typing import Iterator
from typing import Set
import sqlite3
import pandas as pd
from predectorutils.database import (
load_db,
ResultsTable,
ResultRow,
TargetRow
)
def cli(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"db",
type=str,
help="Where to store the database"
)
parser.add_argument(
"-t", "--template",
type=str,
default="{analysis}.tsv",
help=(
"A template for the output filenames. Can use python `.format` "
"style variable analysis. Directories will be created."
)
)
parser.add_argument(
"--mem",
type=float,
default=1.0,
help=(
"The amount of RAM in gibibytes to let "
"SQLite use for cache."
)
)
return
def inner(
con: sqlite3.Connection,
cur: sqlite3.Cursor,
args: argparse.Namespace
) -> None:
from ..analyses import Analyses
tab = ResultsTable(con, cur)
targets = list(tab.fetch_targets())
seen: Set[Analyses] = set()
for target in targets:
if target.analysis in seen:
raise ValueError(
"There are multiple versions of the same analysis."
)
else:
seen.add(target.analysis)
records = tab.select_target(target, checksums=False)
df = pd.DataFrame(map(lambda x: x.as_analysis().as_series(), records))
fname = args.template.format(analysis=str(target.analysis))
dname = os.path.dirname(fname)
if dname != '':
os.makedirs(dname, exist_ok=True)
df.to_csv(fname, sep="\t", index=False, na_rep=".")
def runner(args: argparse.Namespace) -> None:
try:
con, cur = load_db(args.db, args.mem)
inner(con, cur, args)
except Exception as e:
raise e
finally:
con.commit()
con.close()
return
| 21.684783 | 78 | 0.582456 | 234 | 1,995 | 4.91453 | 0.521368 | 0.023478 | 0.044348 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00436 | 0.310276 | 1,995 | 91 | 79 | 21.923077 | 0.831395 | 0.010526 | 0 | 0.128571 | 0 | 0 | 0.144957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.114286 | 0 | 0.185714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
878755049296809773d0780b76a7eb056449007e | 1,740 | py | Python | qmmm_neuralnets/files/h5_file_ops.py | adamduster/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | null | null | null | qmmm_neuralnets/files/h5_file_ops.py | adamduster/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | 1 | 2021-09-17T18:19:48.000Z | 2021-09-17T18:19:48.000Z | qmmm_neuralnets/files/h5_file_ops.py | lin-compchem/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Operations over generic quantities of H5 files
"""
import h5py as h5
import sys
def mean_over_files(fnames, key):
"""
Calculate the mean of of a value over a list of h5 files
Parameters
----------
fnames: list of str
list of hdf5 files with identical keys
key: str
Name of key to calculate mean over
Returns
-------
mean: float or int
The mean of the quantity
"""
num_files = check_file_list(fnames)
mean = 0
for fname in fnames:
with h5.File(fname, 'r') as ifi:
try:
mean += ifi[key][:].mean()
except ValueError:
raise
return mean / num_files
def check_file_list(fnames):
"""
Check to see if we have a list of files to do error-checking for various
subroutines
Parameters
----------
fnames: list of str
file names to check if list
Returns
-------
num_files: int
number of files in list
"""
try:
num_files = len(fnames)
assert (num_files > 0)
except ValueError:
sys.stderr.write("Please pass a list of filenames as an argument")
raise
return num_files
def check_for_keys(fname, *keys):
"""
Check if the key(s) exists in the h5 file
Parameters
----------
fname: str
The name of the h5 file
*keys:
keys to check
Returns
-------
"""
with h5.File(fname, 'r') as ifi:
all_keys = list(ifi.keys())
for key in keys:
if key not in all_keys:
sys.stderr.write("Error, key {} not in hdf5 file {}\n".format(
key, fname))
raise KeyError
| 21.75 | 78 | 0.551724 | 234 | 1,740 | 4.034188 | 0.333333 | 0.038136 | 0.022246 | 0.04661 | 0.097458 | 0.044492 | 0.044492 | 0 | 0 | 0 | 0 | 0.010591 | 0.348851 | 1,740 | 79 | 79 | 22.025316 | 0.822595 | 0.395977 | 0 | 0.285714 | 0 | 0 | 0.093363 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.107143 | false | 0.035714 | 0.071429 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8789b39379bf81890eef79642c63e5f745f80aaf | 1,455 | py | Python | exercicios/exercicio096.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | exercicios/exercicio096.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | exercicios/exercicio096.py | Helton-Rubens/Python-3 | eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7 | [
"MIT"
] | null | null | null | jogador = dict()
jogadores = list()
dec = ' '
while dec not in 'Nn':
jogador['nome'] = str(input('Qual o nome do jogador? ')).strip().title()
i = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
gols = []
for i in range(1, i+1):
gols.append(int(input(f'Quantos gols {jogador["nome"]} fez no {i}º jogo? ')))
jogador['gols'] = gols[:]
jogador['total'] = sum(gols)
jogadores.append(jogador.copy())
dec = str(input('Quer continuar?[S/N] '))[0]
while dec[0] not in 'SsNn':
print('Erro! Digite apenas "Sim" ou "Não"')
dec = str(input('Quer continuar?[S/N] '))[0]
print('-='*40)
print(f'{"Cod":>3} {"Nome":<15}{"Gols":<15}{"Total":<15}')
print('-='*40)
for n, i in enumerate(jogadores):
print(f'{n:>3}', end=' ')
for u in i.values():
print(f'{str(u):<15}', end='')
print()
print()
while True:
print('--'*30)
busca = int(input('Deseja buscar os dados de qual jogador?(999 para parar) '))
while busca > len(jogadores) or busca < 0:
print(f'Inválido. Não existe jogador com código {busca}!')
print('--'*30)
busca = int(input('Deseja buscar os dados de qual jogador?(999 para parar)'))
if busca == 999:
break
else:
print(f'levantamento do jogador {jogadores[busca]["nome"]}')
for i, c in enumerate(jogadores[busca]['gols']):
print(f' No {i+1}º jogo, {jogadores[busca]["nome"]}, fez {c} gols.')
| 37.307692 | 85 | 0.569072 | 212 | 1,455 | 3.90566 | 0.367925 | 0.043478 | 0.021739 | 0.036232 | 0.219807 | 0.219807 | 0.219807 | 0.219807 | 0.154589 | 0.154589 | 0 | 0.030009 | 0.221306 | 1,455 | 38 | 86 | 38.289474 | 0.700794 | 0 | 0 | 0.263158 | 0 | 0.026316 | 0.38488 | 0.061856 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.342105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
878aef6bb4057ee6f517c4833149a1d1ed15902f | 25,168 | py | Python | amical/data_processing.py | tomasstolker/AMICAL | c9bbf8e4a468313efff3b349fffea7648c411a51 | [
"MIT"
] | null | null | null | amical/data_processing.py | tomasstolker/AMICAL | c9bbf8e4a468313efff3b349fffea7648c411a51 | [
"MIT"
] | null | null | null | amical/data_processing.py | tomasstolker/AMICAL | c9bbf8e4a468313efff3b349fffea7648c411a51 | [
"MIT"
] | null | null | null | """
@author: Anthony Soulain (University of Sydney)
-------------------------------------------------------------------------
AMICAL: Aperture Masking Interferometry Calibration and Analysis Library
-------------------------------------------------------------------------
Function related to data cleaning (ghost, background correction,
centering, etc.) and data selection (sigma-clipping, centered flux,).
--------------------------------------------------------------------
"""
import sys
import warnings
import numpy as np
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import interpolate_replace_nans
from astropy.io import fits
from matplotlib import pyplot as plt
from matplotlib.colors import PowerNorm
from termcolor import cprint
from tqdm import tqdm
from amical.tools import apply_windowing
from amical.tools import crop_max
from amical.tools import find_max
def _apply_patch_ghost(cube, xc, yc, radius=20, dx=0, dy=-200, method="bg"):
"""Apply a patch on an eventual artifacts/ghosts on the spectral filter (i.e.
K1 filter of SPHERE presents an artifact/ghost at (392, 360)).
Arguments:
----------
`cube` {array} -- Data cube,\n
`xc` {int} -- x-axis position of the artifact,\n
`yc` {int} -- y-axis position of the artifact.
Keyword Arguments:
----------
`radius` {int} -- Radius to apply the patch in a circle (default: {10}),\n
`dy` {int} -- Offset pixel number to compute background values (default: {0}),\n
`dx` {int} -- Same along y-axis (default: {0}),\n
`method` {str} -- If 'bg', the replacement values are the background computed at
xc+dx, yx+dy, else zero is apply (default: {'bg'}).
"""
cube_corrected = []
for i in range(len(cube)):
imA = cube[i].copy()
isz = imA.shape[0]
xc_off, yc_off = xc + dx, yc + dy
xx, yy = np.arange(isz), np.arange(isz)
xx_c = xx - xc
yy_c = yc - yy
xx_off = xx - xc_off
yy_off = yc_off - yy
distance = np.sqrt(xx_c**2 + yy_c[:, np.newaxis] ** 2)
distance_off = np.sqrt(xx_off**2 + yy_off[:, np.newaxis] ** 2)
cond_patch = distance <= radius
cond_bg = distance_off <= radius
if method == "bg":
imA[cond_patch] = np.mean(imA[cond_bg])
elif method == "zero":
imA[cond_patch] = 0
cube_corrected.append(imA)
cube_corrected = np.array(cube_corrected)
return cube_corrected
def select_data(cube, clip_fact=0.5, clip=False, verbose=True, display=True):
"""Check the cleaned data cube using the position of the maximum in the
fft image (supposed to be zero). If not in zero position, the fram is
rejected. It can apply a sigma-clipping to select only the frames with the
highest total fluxes.
Parameters:
-----------
`cube` {array} -- Data cube,\n
`clip_fact` {float} -- Relative sigma if rejecting frames by
sigma-clipping (default=False),\n
`clip` {bool} -- If True, sigma-clipping is used,\n
`verbose` {bool} -- If True, print informations in the terminal,\n
`display` {bool} -- If True, plot figures.
"""
fft_fram = abs(np.fft.fft2(cube))
# flag_fram, cube_flagged, cube_cleaned_checked = [], [], []
fluxes, flag_fram, good_fram = [], [], []
for i in range(len(fft_fram)):
fluxes.append(fft_fram[i][0, 0])
pos_max = np.argmax(fft_fram[i])
if pos_max != 0:
flag_fram.append(i)
else:
good_fram.append(cube[i])
fluxes = np.array(fluxes)
flag_fram = np.array(flag_fram)
best_fr = np.argmax(fluxes)
worst_fr = np.argmin(fluxes)
std_flux = np.std(fluxes)
med_flux = np.median(fluxes)
if verbose:
if (med_flux / std_flux) <= 5.0:
cprint(
"\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True)."
% (med_flux / std_flux),
"cyan",
)
limit_flux = med_flux - clip_fact * std_flux
if clip:
cond_clip = fluxes > limit_flux
cube_cleaned_checked = cube[cond_clip]
ind_clip = np.where(fluxes <= limit_flux)[0]
else:
ind_clip = []
cube_cleaned_checked = np.array(good_fram)
ind_clip2 = np.where(fluxes <= limit_flux)[0]
if ((worst_fr in ind_clip2) and clip) or (worst_fr in flag_fram):
ext = "(rejected)"
else:
ext = ""
diffmm = 100 * abs(np.max(fluxes) - np.min(fluxes)) / med_flux
if display:
plt.figure(figsize=(10, 5))
plt.plot(
fluxes,
label=r"|$\Delta F$|/$\sigma_F$=%2.0f (%2.2f %%)"
% (med_flux / std_flux, diffmm),
lw=1,
)
if len(flag_fram) > 0:
plt.scatter(
flag_fram,
fluxes[flag_fram],
s=52,
facecolors="none",
edgecolors="r",
label="Rejected frames (maximum fluxes)",
)
if clip:
if len(ind_clip) > 0:
plt.plot(
ind_clip,
fluxes[ind_clip],
"x",
color="crimson",
label="Rejected frames (clipping)",
)
else:
print("0")
# plt.hlines(limit_flux, 0, len(fluxes), )
plt.axhline(
limit_flux,
lw=3,
color="#00b08b",
ls="--",
label="Clipping threshold",
zorder=10,
)
plt.legend(loc="best", fontsize=9)
plt.ylabel("Flux [counts]")
plt.xlabel("# frames")
plt.grid(alpha=0.2)
plt.tight_layout()
plt.figure(figsize=(7, 7))
plt.subplot(2, 2, 1)
plt.title("Best fram (%i)" % best_fr)
plt.imshow(cube[best_fr], norm=PowerNorm(0.5, vmin=0), cmap="afmhot")
plt.subplot(2, 2, 2)
plt.imshow(np.fft.fftshift(fft_fram[best_fr]), cmap="gist_stern")
plt.subplot(2, 2, 3)
plt.title("Worst fram (%i) %s" % (worst_fr, ext))
plt.imshow(cube[worst_fr], norm=PowerNorm(0.5, vmin=0), cmap="afmhot")
plt.subplot(2, 2, 4)
plt.imshow(np.fft.fftshift(fft_fram[worst_fr]), cmap="gist_stern")
plt.tight_layout()
plt.show(block=False)
if verbose:
n_good = len(cube_cleaned_checked)
n_bad = len(cube) - n_good
if clip:
cprint("\n---- σ-clip + centered fluxes selection ---", "cyan")
else:
cprint("\n---- centered fluxes selection ---", "cyan")
print(
"%i/%i (%2.1f%%) are flagged as bad frames"
% (n_bad, len(cube), 100 * float(n_bad) / len(cube))
)
return cube_cleaned_checked
def _get_ring_mask(r1, dr, isz, center=None):
if center is None:
xc, yc = isz // 2, isz // 2
else:
xc, yc = center
xx, yy = np.arange(isz), np.arange(isz)
xx2 = xx - xc
yy2 = yc - yy
distance = np.sqrt(xx2**2 + yy2[:, np.newaxis] ** 2)
inner_cond = r1 <= distance
if dr is not None:
r2 = r1 + dr
outer_cond = distance <= r2
else:
outer_cond = True
cond_bg = inner_cond & outer_cond
if dr is not None and np.all(outer_cond):
warnings.warn(
"The outer radius is out of the image, using everything beyond r1 as background",
RuntimeWarning,
)
return cond_bg
def sky_correction(imA, r1=None, dr=None, verbose=False, *, center=None, mask=None):
"""
Perform background sky correction to be as close to zero as possible.
This requires either a radius (r1) to define the background boundary, optionally with a
ring width dr, or a boolean mask with the same shape as the image.
"""
# FUTURE: Future AMICAL release should raise error
if r1 is None and mask is None:
warnings.warn(
"The default value of r1 and dr is now None. Either mask or r1 must be set"
" explicitely. In the future, this will result in an error."
" Setting r1=100 and dr=20",
PendingDeprecationWarning,
)
r1 = 100
dr = 20
if r1 is not None and mask is not None:
raise TypeError("Only one of mask and r1 can be specified")
elif r1 is None and dr is not None:
raise TypeError("dr cannot be set when r1 is None")
elif r1 is not None:
isz = imA.shape[0]
cond_bg = _get_ring_mask(r1, dr, isz, center=center)
elif mask is not None:
if mask.shape != imA.shape:
raise ValueError("mask should have the same shape as image")
elif not mask.any():
warnings.warn(
"Background not computed because mask has no True values",
RuntimeWarning,
)
cond_bg = mask
do_bg = cond_bg.any()
if do_bg:
try:
minA = imA.min()
imB = imA + 1.01 * abs(minA)
backgroundB = np.mean(imB[cond_bg])
imC = imB - backgroundB
backgroundC = np.mean(imC[cond_bg])
except IndexError:
do_bg = False
# Not using else because do_bg can change in except above
if not do_bg:
imC = imA.copy()
backgroundC = 0
warnings.warn(
"Background not computed, likely because specified radius is out of bounds",
RuntimeWarning,
)
elif verbose:
print(
f"Sky correction of {backgroundB} was subtracted,"
f" remaining background is {backgroundC}."
)
return imC, backgroundC
def fix_bad_pixels(image, bad_map, add_bad=None, x_stddev=1):
"""Replace bad pixels with values interpolated from their neighbors (interpolation
is made with a gaussian kernel convolution)."""
if add_bad is None:
add_bad = []
if len(add_bad) != 0:
bad_map = bad_map.copy() # Don't modify input bad pixel map, use a copy
for j in range(len(add_bad)):
bad_map[add_bad[j][1], add_bad[j][0]] = 1
img_nan = image.copy()
img_nan[bad_map == 1] = np.nan
kernel = Gaussian2DKernel(x_stddev=x_stddev)
fixed_image = interpolate_replace_nans(img_nan, kernel)
return fixed_image
def _get_3d_bad_pixels(bad_map, add_bad, data):
"""
Format 3d bad pixel cube from arbitrary bad pixel input
Parameters
----------
`bad_map` {np.ndarray}: Bad pixel map in 2d or 3d (can also be None)\n
`add_bad` {list}: list of bad pixel coordinates\n
`data` {np.ndarray}: Array with the data corresponding to the bad pixel map\n
Returns:
--------
`bad_map` {np.array}: 3d bad map with same shape as data cube
`add_bad` {list}: add_bad list compatible with 3d dataset
"""
n_im = data.shape[0]
# Add check to create default add_bad list (not use mutable data)
if add_bad is None or len(add_bad) == 0:
# Reshape add_bad to simplify indexing in loop
add_bad = [
[],
] * n_im
else:
add_bad = np.array(add_bad)
if add_bad.ndim == 2 and len(add_bad[0]) != 0:
add_bad = np.repeat(add_bad[np.newaxis, :], n_im, axis=0)
elif add_bad.ndim == 3:
if add_bad.shape[0] != n_im:
raise ValueError("3D add_bad should have one list per frame")
if (bad_map is None) and (len(add_bad) != 0):
# If we have extra bad pixels, define bad_map with same shape as image
bad_map = np.zeros_like(data, dtype=bool)
elif bad_map is not None:
# Shape should match data
if bad_map.ndim == 2 and bad_map.shape != data[0].shape:
raise ValueError(
f"2D bad_map should have the same shape as a frame ({data[0].shape}),"
f" but has shape {bad_map.shape}"
)
elif bad_map.ndim == 3 and bad_map.shape != data.shape:
raise ValueError(
f"3D bad_map should have the same shape as data cube ({data.shape}),"
f" but has shape {bad_map.shape}"
)
elif bad_map.ndim == 2:
bad_map = np.repeat(bad_map[np.newaxis, :], n_im, axis=0)
return bad_map, add_bad
def show_clean_params(
filename,
isz,
r1=None,
dr=None,
bad_map=None,
add_bad=None,
edge=0,
remove_bad=True,
nframe=0,
ihdu=0,
f_kernel=3,
offx=0,
offy=0,
apod=False,
window=None,
*,
mask=None,
):
"""Display the input parameters for the cleaning.
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`bad_map` {array}: Bad pixel map with 0 and 1 where 1 set for a bad pixel (default: None),\n
`add_bad` {list}: List of 2d coordinates of bad pixels/cosmic rays (default: []),\n
`edge` {int}: Number of pixel to be removed on the edge of the image (SPHERE),\n
`remove_bad` {bool}: If True, the bad pixels are removed using a gaussian interpolation,\n
`nframe` {int}: Frame number to be shown (default: 0),\n
`ihdu` {int}: Hdu number of the fits file. Normally 1 for NIRISS and 0 for SPHERE (default: 0).
"""
with fits.open(filename) as fd:
data = fd[ihdu].data
img0 = data[nframe]
dims = img0.shape
if isz is None:
print(
"Warning: isz not found (None by default). isz is set to the original image size (%i)"
% (dims[0]),
file=sys.stderr,
)
isz = dims[0]
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
bmap0 = bad_map[nframe]
ab0 = add_bad[nframe]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if (bad_map is not None) & (remove_bad):
img1 = fix_bad_pixels(img0, bmap0, add_bad=ab0)
else:
img1 = img0.copy()
cropped_infos = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)
pos = cropped_infos[1]
noBadPixel = False
bad_pix_x, bad_pix_y = [], []
if np.any(bmap0):
if len(ab0) != 0:
for j in range(len(ab0)):
bmap0[ab0[j][1], ab0[j][0]] = 1
bad_pix = np.where(bmap0 == 1)
bad_pix_x = bad_pix[0]
bad_pix_y = bad_pix[1]
else:
noBadPixel = True
theta = np.linspace(0, 2 * np.pi, 100)
x0 = pos[0]
y0 = pos[1]
if r1 is not None:
x1 = r1 * np.cos(theta) + x0
y1 = r1 * np.sin(theta) + y0
if dr is not None:
r2 = r1 + dr
x2 = r2 * np.cos(theta) + x0
y2 = r2 * np.sin(theta) + y0
sky_method = "ring"
elif mask is not None:
bg_coords = np.where(mask == 1)
bg_x = bg_coords[0]
bg_y = bg_coords[1]
sky_method = "mask"
if window is not None:
r3 = window
x3 = r3 * np.cos(theta) + x0
y3 = r3 * np.sin(theta) + y0
xs1, ys1 = x0 + isz // 2, y0 + isz // 2
xs2, ys2 = x0 - isz // 2, y0 + isz // 2
xs3, ys3 = x0 - isz // 2, y0 - isz // 2
xs4, ys4 = x0 + isz // 2, y0 - isz // 2
max_val = img1[y0, x0]
fig = plt.figure(figsize=(5, 5))
plt.title("--- CLEANING PARAMETERS ---")
plt.imshow(img1, norm=PowerNorm(0.5, vmin=0, vmax=max_val), cmap="afmhot")
if sky_method == "ring":
if dr is not None:
plt.plot(x1, y1, label="Inner radius for sky subtraction")
plt.plot(x2, y2, label="Outer radius for sky subtraction")
else:
plt.plot(x1, y1, label="Boundary for sky subtraction")
elif sky_method == "mask":
plt.scatter(
bg_y,
bg_x,
color="None",
marker="s",
edgecolors="C0",
s=20,
label="Pixels used for sky subtraction",
)
if apod:
if window is not None:
plt.plot(x3, y3, "--", label="Super-gaussian windowing")
plt.plot(x0, y0, "+", color="c", ms=10, label="Centering position")
plt.plot(
[xs1, xs2, xs3, xs4, xs1],
[ys1, ys2, ys3, ys4, ys1],
"w--",
label="Resized image",
)
plt.xlim((0, dims[0] - 1))
plt.ylim((0, dims[1] - 1))
if not noBadPixel:
if remove_bad:
label = "Fixed hot/bad pixels"
else:
label = "Hot/bad pixels"
plt.scatter(
bad_pix_y,
bad_pix_x,
color="None",
marker="s",
edgecolors="r",
facecolors="None",
s=20,
label=label,
)
plt.xlabel("X [pix]")
plt.ylabel("Y [pix]")
plt.legend(fontsize=8, loc=1)
plt.tight_layout()
return fig
def _apply_edge_correction(img0, edge=0):
"""Remove the bright edges (set to 0) observed for
some detectors (SPHERE)."""
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
return img0
def _remove_dark(img1, darkfile=None, ihdu=0, verbose=False):
if darkfile is not None:
with fits.open(darkfile) as hdu:
dark = hdu[ihdu].data
if verbose:
print("Dark cube shape is:", dark.shape)
master_dark = np.mean(dark, axis=0)
img1 -= master_dark
return img1
def clean_data(
data,
isz=None,
r1=None,
dr=None,
edge=0,
bad_map=None,
add_bad=None,
apod=True,
offx=0,
offy=0,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
*,
mask=None,
):
"""Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
n_im = data.shape[0]
cube_cleaned = [] # np.zeros([n_im, isz, isz])
l_bad_frame = []
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
for i in tqdm(range(n_im), ncols=100, desc="Cleaning", leave=False):
img0 = data[i]
img0 = _apply_edge_correction(img0, edge=edge)
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map[i], add_bad=add_bad[i])
else:
img1 = img0.copy()
img1 = _remove_dark(img1, darkfile=darkfile, verbose=verbose)
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
center = find_max(img1, filtmed=filtmed, f=f_kernel)
else:
center = None
if sky and (r1 is not None or mask is not None):
img_biased = sky_correction(
img1, r1=r1, dr=dr, verbose=verbose, center=center, mask=mask
)[0]
elif sky:
warnings.warn(
"sky is set to True, but r1 and mask are set to None. Skipping sky correction",
RuntimeWarning,
)
img_biased = img1.copy()
else:
img_biased = img1.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
im_rec_max = crop_max(
img_biased, isz, offx=offx, offy=offy, filtmed=filtmed, f=f_kernel
)[0]
else:
im_rec_max = img_biased.copy()
if (
(im_rec_max.shape[0] != im_rec_max.shape[1])
or (isz is not None and im_rec_max.shape[0] != isz)
or (isz is None and im_rec_max.shape[0] != img0.shape[0])
):
l_bad_frame.append(i)
else:
if apod and window is not None:
img = apply_windowing(im_rec_max, window=window)
elif apod:
warnings.warn(
"apod is set to True, but window is None. Skipping apodisation",
RuntimeWarning,
)
img = im_rec_max.copy()
else:
img = im_rec_max.copy()
cube_cleaned.append(img)
if verbose:
print("Bad centering frame number:", l_bad_frame)
cube_cleaned = np.array(cube_cleaned)
return cube_cleaned
def select_clean_data(
filename,
isz=256,
r1=None,
dr=None,
edge=0,
clip=True,
bad_map=None,
add_bad=None,
offx=0,
offy=0,
clip_fact=0.5,
apod=True,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
ihdu=0,
display=False,
*,
remove_bad=True,
nframe=0,
mask=None,
):
"""Clean and select good datacube (sigma-clipping using fluxes variations).
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: {256})\n
`r1` {int}: Radius of the rings to compute background sky (default: {100})\n
`dr` {int}: Outer radius to compute sky (default: {10})\n
`edge` {int}: Patch the edges of the image (VLT/SPHERE artifact, default: {0}),\n
`clip` {bool}: If True, sigma-clipping is used to reject frames with low integrated flux,\n
`clip_fact` {float}: Relative sigma if rejecting frames by sigma-clipping,\n
`apod` {bool}: If True, apodisation is performed in the image plan using a super-gaussian
function (known as windowing). The gaussian FWHM is set by the parameter `window`,\n
`window` {float}: FWHM of the super-gaussian to apodise the image (smoothly go to zero
on the edges),\n
`sky` {bool}: If True, the sky is remove using the annulus technique (computed between `r1`
and `r1` + `dr`),
`darkfile` {str}: If specified (default: None), the input dark (master_dark averaged if
multiple integrations) is substracted from the raw image,\n
image,\n
`f_kernel` {float}: kernel size used in the applied median filter (to find the center).
`remove_bad` {bool}: If True, the bad pixels are removed in the cleaning parameter
plots using a gaussian interpolation (default: {True}),\n
`nframe` {int}: Frame number used to show cleaning parameters (default: {0}),\n
Returns:
--------
`cube_final` {np.array}: Cleaned and selected datacube.
"""
with fits.open(filename) as hdu:
cube = hdu[ihdu].data
hdr = hdu[0].header
ins = hdr.get("INSTRUME", None)
if ins == "SPHERE":
seeing_start = float(hdr["HIERARCH ESO TEL AMBI FWHM START"])
seeing = float(hdr["HIERARCH ESO TEL IA FWHM"])
seeing_end = float(hdr["HIERARCH ESO TEL AMBI FWHM END"])
if verbose:
print("\n----- Seeing conditions -----")
print(
"%2.2f (start), %2.2f (end), %2.2f (Corrected AirMass)"
% (seeing_start, seeing_end, seeing)
)
# Add check to create default add_bad list (not use mutable data)
if add_bad is None:
add_bad = []
if r1 is None and mask is None and sky:
warnings.warn(
"The default value of r1 is now None. Either r1 or mask should be set explicitely. This will raise an error in the future.",
PendingDeprecationWarning,
)
r1 = 100
if dr is None:
dr = 10
elif r1 is not None and dr is None and mask is None and sky:
warnings.warn(
"The default value of dr is now None. dr must be set explicitely to be used.",
PendingDeprecationWarning,
)
dr = 10
if display:
show_clean_params(
filename,
isz,
r1,
dr,
bad_map=bad_map,
add_bad=add_bad,
edge=edge,
remove_bad=remove_bad,
nframe=nframe,
ihdu=ihdu,
f_kernel=f_kernel,
offx=offx,
offy=offy,
apod=apod,
window=window,
)
cube_cleaned = clean_data(
cube,
isz=isz,
r1=r1,
edge=edge,
bad_map=bad_map,
add_bad=add_bad,
dr=dr,
sky=sky,
apod=apod,
window=window,
f_kernel=f_kernel,
offx=offx,
offy=offy,
darkfile=darkfile,
verbose=verbose,
mask=mask,
)
if cube_cleaned is None:
return None
cube_final = select_data(
cube_cleaned, clip=clip, clip_fact=clip_fact, verbose=verbose, display=display
)
return cube_final
| 32.061146 | 136 | 0.562738 | 3,488 | 25,168 | 3.948681 | 0.148796 | 0.020039 | 0.01699 | 0.008713 | 0.276919 | 0.20061 | 0.167792 | 0.147245 | 0.124519 | 0.112249 | 0 | 0.025568 | 0.313136 | 25,168 | 784 | 137 | 32.102041 | 0.771158 | 0.238914 | 0 | 0.328671 | 0 | 0.006993 | 0.119012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.022727 | 0 | 0.062937 | 0.020979 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
878fec7820fb97d9b9711929385ca95004c7e7c9 | 3,547 | py | Python | saltpad/core.py | novapost/saltpad | 863e79e6eef7f36d16050170f26203854208283a | [
"Apache-2.0"
] | 1 | 2016-01-08T20:56:20.000Z | 2016-01-08T20:56:20.000Z | saltpad/core.py | novapost/saltpad | 863e79e6eef7f36d16050170f26203854208283a | [
"Apache-2.0"
] | null | null | null | saltpad/core.py | novapost/saltpad | 863e79e6eef7f36d16050170f26203854208283a | [
"Apache-2.0"
] | null | null | null | import os
import sys
import salt.config
import salt.client
import salt.runner
import salt.key
import pymongo
from salt.output import highstate
from functools import wraps
def mproperty(fn):
attribute = "_memo_%s" % fn.__name__
@property
@wraps(fn)
def _property(self):
if not hasattr(self, attribute):
setattr(self, attribute, fn(self))
return getattr(self, attribute)
return _property
class SaltStackClient(object):
def __init__(self, collection_name="saltpad"):
master_opts = salt.config.master_config(
os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master'))
if not 'color' in master_opts:
master_opts['color'] = True
# Inject master_opts
highstate.__opts__ = master_opts
minion_opts = salt.config.client_config(
os.environ.get('SALT_MINION_CONFIG', '/etc/salt/minion'))
self.local = salt.client.LocalClient()
self.runner = salt.runner.RunnerClient(master_opts)
self.key = salt.key.Key(master_opts)
self.collection_name = collection_name
self.con = pymongo.MongoClient()
self.db = self.con[self.collection_name]
self._minions = None
self.highstate_cache = {}
@property
def minions(self):
if self._minions is None:
minions = self.local.cmd('*', 'test.ping', timeout=0)
keys = self.key.list_keys()
ret = {}
ret['up'] = sorted(minions)
ret['down'] = sorted(set(keys['minions']) - set(minions))
self._minions = ret
return self._minions
def get_minion_status(self, minion_name):
if minion_name in self.minions["up"]:
return "up"
elif minion_name in self.minions["down"]:
return "down"
else:
return "Bad minion_name"
def _reload_roles(self):
self._minions_roles = {}
self._roles_minions = {}
for minion in self.minions["up"]:
roles = self.local.cmd(minion, 'grains.get', ['roles'])[minion]
self._minions_roles[minion] = roles
for role in roles:
self._roles_minions.setdefault(role, []).append(minion)
def minions_roles(self):
self._reload_roles()
return self._minions_roles
def roles_minions(self):
self._reload_roles()
return self._roles_minions
def get_job_id(self, minion, jid):
return self.con[minion].find_one({'jid': jid})
def get_multiple_job_status(self, minion, key=None, max=5):
query = {}
if key:
query['key'] = key
return list(self.db[minion].find(query).sort('_id', -1).limit(max))
def get_job_status(self, minion, jid, key=None):
query = {'jid': jid}
if key:
query['key'] = key
return self.db[minion].find_one(query)
def run_job(self, minion, fun, key=None, *args, **kwargs):
result = self.local.run_job(minion, fun,
timeout=99999999999999, ret='nova_mongo_return', arg=args, kwarg=kwargs)
if key is None:
key = fun
self.db[minion].insert({'jid': result['jid'], 'key': key})
return result['jid']
def cmd(self, target, fun, timeout=None, *args, **kwargs):
return self.local.cmd(target, fun, arg=args, timeout=timeout,
kwarg=kwargs)
def cmd_iter(self, target, fun, *args, **kwargs):
return self.local.cmd_iter(target, fun, arg=args, kwarg=kwargs)
| 29.07377 | 84 | 0.605018 | 441 | 3,547 | 4.680272 | 0.23356 | 0.053295 | 0.023256 | 0.017442 | 0.120155 | 0.07655 | 0 | 0 | 0 | 0 | 0 | 0.006584 | 0.272061 | 3,547 | 121 | 85 | 29.31405 | 0.792796 | 0.005075 | 0 | 0.088889 | 0 | 0 | 0.05784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.1 | 0.033333 | 0.422222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8793d15bb2a0283134868ae0228a8a20d4336c28 | 2,633 | py | Python | source_pytorch/predict.py | koljamaier/cnn-classifier | 176cb5992ac90564d2ce74cfebda7bb0827edbb8 | [
"MIT"
] | null | null | null | source_pytorch/predict.py | koljamaier/cnn-classifier | 176cb5992ac90564d2ce74cfebda7bb0827edbb8 | [
"MIT"
] | 9 | 2021-03-19T02:33:43.000Z | 2022-03-11T23:55:09.000Z | source_pytorch_style_transfer/predict.py | koljamaier/cnn-classifier | 176cb5992ac90564d2ce74cfebda7bb0827edbb8 | [
"MIT"
] | null | null | null | # import libraries
import os
import numpy as np
import torch
from six import BytesIO
from torchvision import datasets, models, transforms
import torch.nn as nn
# default content type is numpy array
NP_CONTENT_TYPE = 'application/x-npy'
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.vgg16(pretrained=True) # models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
# vgg16
n_inputs = model.classifier[6].in_features
last_layer = nn.Linear(n_inputs, 133)
model.classifier[6] = last_layer
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# set to eval mode, could use no_grad
model.to(device).eval()
print("Done loading model.")
return model
# Provided input data loading
def input_fn(serialized_input_data, content_type):
"""
We assume, that data will be passed in as numpy array. With this information we can deserialize the
data straightforward
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Deserializing the input data.')
if content_type == NP_CONTENT_TYPE:
stream = BytesIO(serialized_input_data)
return np.load(stream)
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
# Provided output data handling
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
if accept == NP_CONTENT_TYPE:
stream = BytesIO()
np.save(stream, prediction_output)
return stream.getvalue(), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
# this function gets called after our model is deployed. It gives back the prediction label for the dog breed
def predict_fn(input_data, model):
print('Predicting class labels for the input data...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = torch.from_numpy(input_data.astype('float32'))
data = data.to(device)
# Put the model into evaluation mode
model.eval()
# Compute the result of applying the model to the input data
# The variable `out_label` should be a rounded value between 0 and 133 (our dog breed classes)
out = model(data)
out_np = out.cpu().detach().numpy()
out_label = out_np.round()
return out_label | 30.264368 | 109 | 0.706039 | 370 | 2,633 | 4.902703 | 0.391892 | 0.048512 | 0.021499 | 0.03473 | 0.163175 | 0.134509 | 0.08269 | 0.08269 | 0.08269 | 0.08269 | 0 | 0.008076 | 0.200532 | 2,633 | 87 | 110 | 30.264368 | 0.853682 | 0.262438 | 0 | 0.065217 | 0 | 0 | 0.15312 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0.108696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8793efeb9b152a5d597ac63b008fb86ba1a50fa4 | 2,799 | py | Python | tasky/tasks/queue.py | jreese/tasky | 681f4e5a9a60a0eb838b89f320309cfb45a56242 | [
"MIT"
] | 10 | 2016-09-24T12:08:13.000Z | 2022-03-25T12:53:45.000Z | tasky/tasks/queue.py | jreese/tasky | 681f4e5a9a60a0eb838b89f320309cfb45a56242 | [
"MIT"
] | null | null | null | tasky/tasks/queue.py | jreese/tasky | 681f4e5a9a60a0eb838b89f320309cfb45a56242 | [
"MIT"
] | null | null | null | # Copyright 2016 John Reese
# Licensed under the MIT license
import asyncio
import logging
from concurrent.futures import CancelledError
from typing import Any
from .task import Task
Log = logging.getLogger('tasky.tasks')
class QueueTask(Task):
'''Run a method on the asyncio event loop for each item inserted into this
task's work queue. Can use multiple "workers" to process the work queue.
Failed work items (those generating exceptions) will be dropped -- workers
must manually requeue any work items that need to be reprocessed.'''
WORKERS = 1
MAXSIZE = 0
QUEUE = None
OPEN = True
def __init__(self, id: int=0):
'''Initialize the shared work queue for all workers.'''
super().__init__()
if self.__class__.QUEUE is None:
self.__class__.QUEUE = asyncio.Queue(self.MAXSIZE)
self.id = max(0, id)
@property
def name(self):
return '{0}({1})'.format(self.__class__.__name__, self.id)
@classmethod
def close(cls):
'''Mark the queue as being "closed". Once closed, workers will stop
running once the work queue becomes empty.'''
Log.debug('closing %s work queue', cls.__name__)
cls.OPEN = False
async def init(self) -> None:
if self.id == 0:
Log.debug('initializing %s', self.name)
for task_id in range(1, self.WORKERS):
task = self.__class__(id=task_id)
Log.debug('spawning %s', task.name)
await self.tasky.insert(task)
async def run(self, item: Any) -> None:
'''Override this method to define what happens when your task runs.'''
await self.sleep(1.0)
async def run_task(self) -> None:
'''Initialize the queue and spawn extra worker tasks if this if the
first task. Then wait for work items to enter the task queue, and
execute the `run()` method with the current work item.'''
while self.running:
try:
item = self.QUEUE.get_nowait()
Log.debug('%s processing work item', self.name)
await self.run(item)
Log.debug('%s completed work item', self.name)
self.QUEUE.task_done()
except asyncio.QueueEmpty:
if self.OPEN:
await self.sleep(0.05)
else:
Log.debug('%s queue closed and empty, stopping', self.name)
return
except CancelledError:
Log.debug('%s cancelled, dropping work item')
self.QUEUE.task_done()
raise
except Exception:
Log.exception('%s failed work item', self.name)
self.QUEUE.task_done()
| 30.096774 | 79 | 0.589139 | 357 | 2,799 | 4.509804 | 0.389356 | 0.034783 | 0.02236 | 0.029814 | 0.040994 | 0.040994 | 0.040994 | 0.040994 | 0 | 0 | 0 | 0.008905 | 0.317971 | 2,799 | 92 | 80 | 30.423913 | 0.834468 | 0.17935 | 0 | 0.056604 | 0 | 0 | 0.099395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.09434 | 0.018868 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87954da61956d763541972b56cae90b64dbf3274 | 2,076 | py | Python | codenames/routes.py | Schluggi/codenames | 094f3621ad17597fede8d6438f41e60dbb1f7057 | [
"MIT"
] | 3 | 2020-12-13T18:16:06.000Z | 2021-04-13T09:25:23.000Z | codenames/routes.py | Schluggi/codenames | 094f3621ad17597fede8d6438f41e60dbb1f7057 | [
"MIT"
] | 12 | 2020-06-17T18:23:05.000Z | 2022-03-12T00:52:43.000Z | codenames/routes.py | Schluggi/codenames | 094f3621ad17597fede8d6438f41e60dbb1f7057 | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, flash, make_response, session, json
from . import app, models, helper, websocket
from .forms import IndexForm, GameForm
@app.route('/', methods=['GET', 'POST'])
def index():
form = IndexForm()
if form.validate_on_submit():
#: check if the game already exists
if not models.Game.query.filter_by(name=form.game_name.data).first():
#: create a new game
helper.new_game(form.game_name.data, form.game_mode.data)
flash('New game created', category='success')
return redirect(url_for('games', game_name=form.game_name.data))
return render_template('index.html', form=form)
@app.route('/g/<game_name>', methods=['GET', 'POST'])
@app.route('/g/')
def games(game_name=None):
form = GameForm()
game = models.Game.query.filter_by(name=game_name).first()
if not game:
flash('Game not found', category='error')
return redirect(url_for('index'))
session['game_id'] = game.id
if form.validate_on_submit():
#: start a new round
if form.game_mode.data:
game_mode = form.game_mode.data
else:
game_mode = game.mode
helper.new_game(game_name, game_mode, new_round=True)
flash('New round started', category='success')
#: all clients have to reload the website
websocket.reload(game.id)
#: get the field image chunks from database
image_chunks = json.loads(game.images)
return render_template('game.html', rows=image_chunks, game=game, form=form, game_modes=app.game_modes)
@app.route('/static/js/game.js')
def game():
resp = make_response(render_template('js/game.js'))
resp.headers['Content-type'] = 'text/javascript;charset=UTF-8'
return resp
@app.errorhandler(500)
def error_500(_):
flash('Game error occurred (500)', category='error')
return redirect(url_for('index'))
@app.errorhandler(404)
def error_404(_):
flash('Page not found (404)', category='error')
return redirect(url_for('index'))
| 30.086957 | 107 | 0.663295 | 288 | 2,076 | 4.631944 | 0.322917 | 0.047976 | 0.052474 | 0.05997 | 0.185907 | 0.125937 | 0.085457 | 0 | 0 | 0 | 0 | 0.011405 | 0.197495 | 2,076 | 68 | 108 | 30.529412 | 0.789316 | 0.074181 | 0 | 0.113636 | 0 | 0 | 0.139802 | 0.015128 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.068182 | 0 | 0.340909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87962fb6b6502f3e6b62209e2b99cbed8a8f548f | 1,976 | py | Python | Homework 1/question_solutions/question_3_trunc_error.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | 4 | 2020-04-29T04:34:50.000Z | 2021-11-11T07:49:08.000Z | Homework 1/question_solutions/question_3_trunc_error.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | null | null | null | Homework 1/question_solutions/question_3_trunc_error.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | 1 | 2020-04-23T07:32:44.000Z | 2020-04-23T07:32:44.000Z | from context import fe621
import numpy as np
import pandas as pd
def truncationErrorAnalysis():
"""Function to analyze the truncation error of the Trapezoidal and Simpson's
quadature rules.
"""
# Objective function
def f(x: float) -> float:
return np.where(x == 0.0, 1.0, np.sin(x) / x)
# Setting values for N
N = np.power(10, np.arange(3, 8))
# Setting values for a
a = np.power(10, np.arange(2, 7))
trapezoidal_vals = np.ndarray((N.size, a.size))
simpsons_vals = np.ndarray((N.size, a.size))
# Building function approximation table, varying N and A
for i in range(0, N.size):
for j in range(0, a.size):
# Trapezoidal rule approximation
trapezoidal_vals[i, j] = fe621.numerical_integration \
.trapezoidalRule(f=f, N=N[i], start=-a[j], stop=a[j])
# Simpsons rule trunc approximation
simpsons_vals[i, j] = fe621.numerical_integration \
.simpsonsRule(f=f, N=N[i], start=-a[j], stop=a[j])
# Computing the absolute difference from Pi (i.e. trunc error)
# and casting to DataFrame
trapezoidal_df = pd.DataFrame(np.abs(trapezoidal_vals - np.pi))
simpsons_df = pd.DataFrame(np.abs(simpsons_vals - np.pi))
# Setting row and column names
trapezoidal_df.columns = ['N = ' + str(i) for i in N]
trapezoidal_df.index = ['a = ' + str(i) for i in a]
simpsons_df.columns = ['N = ' + str(i) for i in N]
simpsons_df.index = ['a = ' + str(i) for i in a]
# Saving to CSV
trapezoidal_df.to_csv(
'Homework 1/bin/numerical_integration/trapezoidal_trunc_error.csv',
header=True, index=True, float_format='%.8e'
)
simpsons_df.to_csv(
'Homework 1/bin/numerical_integration/simpsons_trunc_error.csv',
header=True, index=True, float_format='%.8e'
)
if __name__ == '__main__':
# Part 2 - Truncation Error Analysis
truncationErrorAnalysis()
| 32.933333 | 80 | 0.630567 | 285 | 1,976 | 4.252632 | 0.326316 | 0.016502 | 0.024752 | 0.026403 | 0.381188 | 0.323432 | 0.272277 | 0.234323 | 0.169967 | 0.10396 | 0 | 0.018817 | 0.246964 | 1,976 | 59 | 81 | 33.491525 | 0.795699 | 0.220648 | 0 | 0.0625 | 0 | 0 | 0.10363 | 0.070627 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0.03125 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
879809aa9a3d9ab1577184c4594c4a71184e8e70 | 8,244 | py | Python | lombscargle/core.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | 2 | 2017-07-29T13:11:34.000Z | 2019-11-12T01:41:42.000Z | lombscargle/core.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | null | null | null | lombscargle/core.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | null | null | null | from astropy.stats import LombScargle as astropy_LombScargle
from astropy.stats.lombscargle.core import strip_units
from astropy.stats.lombscargle.implementations.main import _get_frequency_grid
from .nfftls import lombscargle_nfft
class LombScargle(astropy_LombScargle):
__doc__ = astropy_LombScargle.__doc__
def power(self, frequency, normalization='standard', method='auto',
assume_regular_frequency=False, method_kwds=None):
"""Compute the Lomb-Scargle power at the given frequencies
Parameters
----------
frequency : array_like or Quantity
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'nfft': use the O[N log N] nfft library.
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool (optional)
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : string (optional, default='standard')
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool (optional, default=True)
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in
the case of incomplete phase coverage.
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean of
the input data. This is especially important if fit_mean = False
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if method == 'nfft':
if self.nterms != 1:
raise ValueError("nfft method only works for nterms=1")
f0, df, Nf = _get_frequency_grid(strip_units(frequency),
assume_regular_frequency)
if method_kwds and 'use_fft' in method_kwds:
use_fft = method_kwds.pop('use_fft')
if use_fft:
method_kwds['exponential_sum_method'] = 'nfft'
else:
method_kwds['exponential_sum_method'] = 'slow'
power = lombscargle_nfft(*strip_units(self.t, self.y, self.dy),
f0, df, Nf,
center_data=self.center_data,
fit_mean=self.fit_mean,
normalization=normalization,
**(method_kwds or {}))
return power * self._power_unit(normalization)
else:
return super(LombScargle, self).power(frequency=frequency,
normalization=normalization,
method=method,
assume_regular_frequency=assume_regular_frequency,
method_kwds=method_kwds)
def autopower(self, method='auto', method_kwds=None,
normalization='standard', samples_per_peak=5,
nyquist_factor=5, minimum_frequency=None,
maximum_frequency=None):
"""Compute Lomb-Scargle power at automatically-determined frequencies
Parameters
----------
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'nfft': use the O[N log N] nfft library.
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
normalization : string (optional, default='standard')
Normalization to use for the periodogram.
Options are 'standard', 'model', or 'psd'.
samples_per_peak : float (optional, default=5)
The approximate number of desired samples across the typical peak
nyquist_factor : float (optional, default=5)
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float (optional)
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float (optional)
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
Returns
-------
frequency, power : ndarrays
The frequency and Lomb-Scargle power
"""
if method == 'nfft':
frequency = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
power = self.power(frequency,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=True)
return frequency, power
else:
return super(LombScargle, self).autopower(method=method,
method_kwds=method_kwds,
normalization=normalization,
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
| 54.596026 | 100 | 0.573023 | 881 | 8,244 | 5.240636 | 0.22815 | 0.034655 | 0.018194 | 0.020793 | 0.555339 | 0.501191 | 0.464371 | 0.464371 | 0.42885 | 0.42885 | 0 | 0.004749 | 0.361475 | 8,244 | 150 | 101 | 54.96 | 0.87234 | 0.513828 | 0 | 0.277778 | 0 | 0 | 0.040024 | 0.013241 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87993e598fc65ff1079e82a8b6905aa0e27e9001 | 1,188 | py | Python | models/accent_model.py | guyeshet/Keras-Project-Template | 4b324aea4a923ca0ceb1610487bf7139706fae33 | [
"Apache-2.0"
] | null | null | null | models/accent_model.py | guyeshet/Keras-Project-Template | 4b324aea4a923ca0ceb1610487bf7139706fae33 | [
"Apache-2.0"
] | null | null | null | models/accent_model.py | guyeshet/Keras-Project-Template | 4b324aea4a923ca0ceb1610487bf7139706fae33 | [
"Apache-2.0"
] | null | null | null | import librosa
from base.base_model import BaseModel
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
class AccentModel(BaseModel):
def __init__(self, config):
super(AccentModel, self).__init__(config)
self.build_model()
def build_model(self):
self.model = Sequential()
self.model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
data_format="channels_last",
input_shape=self.config.model.input_shape))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(rate=0.25))
self.model.add(Flatten())
self.model.add(Dense(128, activation='relu'))
self.model.add(Dropout(rate=0.5))
self.model.add(Dense(self.config.model.num_classes, activation='softmax'))
self.model.compile(loss=self.config.model.loss,
optimizer=self.config.model.optimizer,
metrics=['accuracy'])
| 33 | 82 | 0.625421 | 144 | 1,188 | 5.020833 | 0.361111 | 0.136929 | 0.149378 | 0.049793 | 0.268326 | 0.248963 | 0.127248 | 0.127248 | 0.127248 | 0.127248 | 0 | 0.029018 | 0.245791 | 1,188 | 35 | 83 | 33.942857 | 0.777902 | 0 | 0 | 0.083333 | 0 | 0 | 0.033698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
879ac40f2728ea332bcf4325e96c15745d65d3e6 | 5,480 | py | Python | recipesAPI/users/views.py | rainsha/polsl_mgr_obierki | cc5ddb3df7b6c75f385f64029a10bd4919827545 | [
"MIT"
] | null | null | null | recipesAPI/users/views.py | rainsha/polsl_mgr_obierki | cc5ddb3df7b6c75f385f64029a10bd4919827545 | [
"MIT"
] | null | null | null | recipesAPI/users/views.py | rainsha/polsl_mgr_obierki | cc5ddb3df7b6c75f385f64029a10bd4919827545 | [
"MIT"
] | null | null | null | from django.core.serializers.json import DjangoJSONEncoder
from django.http import JsonResponse, HttpResponse, Http404
from drf_yasg.openapi import Schema, TYPE_STRING, TYPE_OBJECT
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status, serializers, permissions
from rest_framework.generics import GenericAPIView
import json
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework_simplejwt.views import TokenRefreshView
from users.models import User
from users.serializers import *
from django.contrib import auth
class RegisterView(GenericAPIView):
serializer_class = UserSerializer
permission_classes = [permissions.AllowAny]
@swagger_auto_schema(tags=["user"])
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
data = {'isCreated': True}
return JsonResponse(data, status=status.HTTP_201_CREATED)
data = {'isCreated': False, 'errorMessage': serializer.errors}
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
class LoginView(GenericAPIView):
serializer_class = LoginSerializer
permission_classes = [permissions.AllowAny]
@swagger_auto_schema(tags=["user"],
request_body=Schema(
type=TYPE_OBJECT,
properties={
'username': Schema(type=TYPE_STRING),
'password': Schema(type=TYPE_STRING),
}
)
)
def post(self, request):
data = request.data
username = data.get('username', '')
password = data.get('password', '')
user = auth.authenticate(username=username, password=password)
if user:
token = RefreshToken.for_user(user)
data = {'id': user.id, 'name': user.username, 'accessToken': str(token.access_token),
'refreshToken': str(token)}
return JsonResponse(data, status=status.HTTP_200_OK)
return JsonResponse({'errorMessage': 'Invalid credentials'}, status=status.HTTP_401_UNAUTHORIZED)
class UserDetail(APIView):
"""
Retrieve, update or delete a user instance.
"""
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
try:
user = self.get_object(pk)
except Http404:
return JsonResponse({'isDeleted': False, 'errorMessage': "User does not exist"}, safe=False,
status=status.HTTP_404_NOT_FOUND)
return JsonResponse({'id': user.id, 'username': user.username}, safe=False, status=status.HTTP_200_OK)
def put(self, request, pk, format=None):
try:
user = self.get_object(pk)
except Http404:
return JsonResponse({'isUpdated': False, 'errorMessage': "User does not exists"}, safe=False,
status=status.HTTP_404_NOT_FOUND)
user_serializer = UserSerializer(user, data=request.data)
try:
if user_serializer.is_valid(raise_exception=True):
user_serializer.save()
return JsonResponse({'isUpdated': True, 'errorMessage': ""}, safe=False, status=status.HTTP_200_OK)
except serializers.ValidationError as valEr:
return JsonResponse({'isUpdated': False, 'errorMessage': valEr.detail}, safe=False,
status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
try:
user = self.get_object(pk)
except Http404:
return JsonResponse({'isDeleted': False, 'errorMessage': "User does not exist"}, safe=False,
status=status.HTTP_404_NOT_FOUND)
user.delete()
return JsonResponse({'isDeleted': True, 'errorMessage': ""}, safe=False,
status=status.HTTP_204_NO_CONTENT)
class UserList(APIView):
"""
Create or get user instance.
"""
def user_exists_by_name(self, name):
return User.objects.filter(username=name).exists()
def get(self, request, format=None):
users = User.objects.values('id', 'username')
data = json.dumps(list(users), cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
def post(self, request, pk, format=None):
user_serializer = UserSerializer(data=request.data)
try:
if user_serializer.is_valid(raise_exception=True):
if not self.user_exists_by_name(user_serializer.validated_data['username']):
user_serializer.save()
return JsonResponse({'isCreated': True, 'errorMessage': ""}, safe=False,
status=status.HTTP_201_CREATED)
return JsonResponse({'isCreated': False, 'errorMessage': user_serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
except serializers.ValidationError as valEr:
return JsonResponse({'isCreated': False, 'errorMessage': valEr.detail}, safe=False,
status=status.HTTP_400_BAD_REQUEST)
| 41.515152 | 115 | 0.622993 | 566 | 5,480 | 5.876325 | 0.227915 | 0.075767 | 0.067348 | 0.056825 | 0.453999 | 0.369212 | 0.322309 | 0.240529 | 0.240529 | 0.190619 | 0 | 0.014423 | 0.278832 | 5,480 | 131 | 116 | 41.832061 | 0.827176 | 0.013139 | 0 | 0.288462 | 0 | 0 | 0.078296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086538 | false | 0.028846 | 0.125 | 0.009615 | 0.451923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
879bb779932f5bf1086d8b00e1c10c5d98c48288 | 5,267 | py | Python | setup.py | shengdexiang/Office16CustomInstaller | 9032620470aa44beee6be1e55e2f47abecb9b7e1 | [
"Apache-2.0"
] | null | null | null | setup.py | shengdexiang/Office16CustomInstaller | 9032620470aa44beee6be1e55e2f47abecb9b7e1 | [
"Apache-2.0"
] | null | null | null | setup.py | shengdexiang/Office16CustomInstaller | 9032620470aa44beee6be1e55e2f47abecb9b7e1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script enable custom installation of Microsoft Office suite.
You can install/uninstall specific product.
"""
import argparse
import os
import sys
import xml.etree.ElementTree as ET
ALL_PRODUCTS = ['Word', 'Excel', 'PowerPoint', 'Access',
'Groove', 'InfoPath', 'Lync', 'OneNote', 'Project', 'Outlook',
'Publisher', 'Visio', 'SharePointDesigner', 'OneDrive']
class Setup(object):
"""Microsoft Office 2016 custom installer wrapper."""
def __init__(self, args):
self.args = args
self.config_file = 'configuration.xml'
self.lang = self._get_product_lang()
self.all_products = ALL_PRODUCTS
self.product_list_to_install = self._get_product_list()
self.product_edition = self._get_product_edition()
self._init_config_file()
self._gen_config_file()
def _get_product_list(self):
"""Get products to be installed/uninstalled..
Get products to be installed/uninstalled.
Args:
None.
Returns:
None.
"""
product_list = []
if ',' in self.args.product:
product_list.extend(self.args.product.split(','))
else:
product_list.append(self.args.product)
return product_list
def _get_product_edition(self):
"""Get product edition to be used.
Get product edition to be used.
Args:
None.
Returns:
None.
"""
return self.args.edition
def _get_product_lang(self):
"""Get product language to be used.
Get product language to be used.
Args:
None.
Returns:
None.
"""
return self.args.lang
def _init_config_file(self):
"""Initialize configuration file.
Initialize configuration file template.
Args:
None.
Returns:
None.
"""
print('Initializing Configuration File'.center(60, '='))
init_xml_str = ('<Configuration>\n'
' <Add SourcePath="Office" Branch="Current" OfficeClientEdition="64">\n'
' <Product ID="ProPlusRetail">\n'
' <Language ID="zh-cn" />\n'
' </Product>\n'
' </Add>\n'
'</Configuration>\n')
if os.path.exists(self.config_file):
os.unlink(self.config_file)
open(self.config_file, 'w').write(init_xml_str + '\n')
def _gen_config_file(self):
"""Generate configuration file.
Generate configuration file, which will be used to custom
installation.
Args:
None.
Returns:
None.
"""
print('Generating Configuration File'.center(60, '='))
tree = ET.parse(self.config_file)
root = tree.getroot()
# update product edition
root[0].set('OfficeClientEdition', self.product_edition)
# update product language
for lang in root.iter('Language'):
lang.set('ID', self.lang)
# update product that will not be installed
for product in root.iter('Product'):
for item in self.all_products:
if item not in self.product_list_to_install:
app = ET.SubElement(product, 'ExcludeApp')
app.set('ID', item)
ET.dump(root)
tree.write(self.config_file)
def run(self):
"""Class entry point.
Args:
None.
Returns:
None.
"""
if self.args.action == 'download':
os.system('.\setup.exe /download {0}'.format(self.config_file))
elif self.args.action == 'install':
os.system('.\setup.exe /configure {0}'.format(self.config_file))
else:
pass
os.unlink(self.config_file)
def get_argparser():
"""Generate a command line argument parser."""
parser = argparse.ArgumentParser(
prog=sys.argv[0],
description='Microsoft Office 2016 downloader/installer',
epilog=('e.g.: python setup.py --action install --product word '
'--edition 64 --lang zh-cn'))
parser.add_argument('-a', '--action', action='store',
default='install', help='install | download')
parser.add_argument('-p', '--product', action='store', required=True,
choices=ALL_PRODUCTS, help='product to install')
parser.add_argument('-e', '--edition', action='store', default='64',
help='product edition, e.g. 64/32')
parser.add_argument('-l', '--lang', action='store', default='zh-cn',
help='install language, e.g. en-us/zh-cn')
return parser
def main():
"""Program entry point."""
parser = get_argparser()
args = parser.parse_args()
if (args.action is None or args.product is None or
args.lang is None or args.edition is None):
parser.print_usage()
sys.exit(1)
setup = Setup(args)
setup.run()
if __name__ == '__main__':
main()
| 28.47027 | 99 | 0.555534 | 575 | 5,267 | 4.949565 | 0.295652 | 0.045678 | 0.044273 | 0.040056 | 0.166198 | 0.102249 | 0.028812 | 0.028812 | 0.028812 | 0.028812 | 0 | 0.008126 | 0.322385 | 5,267 | 184 | 100 | 28.625 | 0.789297 | 0.187013 | 0 | 0.045455 | 0 | 0 | 0.208047 | 0.012223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102273 | false | 0.011364 | 0.045455 | 0 | 0.204545 | 0.034091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
879d115e1b51db4348e7e20329e0ed60fbefd5d2 | 954 | py | Python | arike/visits/urls.py | iamsdas/arike | ab76f48f49cd794dd4b77172b347e260a03413b2 | [
"MIT"
] | null | null | null | arike/visits/urls.py | iamsdas/arike | ab76f48f49cd794dd4b77172b347e260a03413b2 | [
"MIT"
] | null | null | null | arike/visits/urls.py | iamsdas/arike | ab76f48f49cd794dd4b77172b347e260a03413b2 | [
"MIT"
] | null | null | null | from django.urls import path
from arike.visits.views import (
ScheduleCreateView,
ScheduleDeleteView,
ScheduleDetailView,
ScheduleListVeiw,
ScheduleUpdateView,
TreatmentNoteCreateView,
TreatmentsListVeiw,
VisitDetailsCreateView,
)
app_name = "visits"
urlpatterns = [
path("schedule/", view=ScheduleListVeiw.as_view(), name="list"),
path("create/", view=ScheduleCreateView.as_view(), name="create"),
path("<pk>/visit/", view=VisitDetailsCreateView.as_view(), name="visit"),
path("<pk>/", view=ScheduleDetailView.as_view(), name="view"),
path("<pk>/update/", view=ScheduleUpdateView.as_view(), name="update"),
path("<pk>/delete/", view=ScheduleDeleteView.as_view(), name="delete"),
path("<pk>/treatments/", view=TreatmentsListVeiw.as_view(), name="treatments"),
path(
"<pk>/treatments/<id>/addnote",
view=TreatmentNoteCreateView.as_view(),
name="add_note",
),
]
| 32.896552 | 83 | 0.681342 | 94 | 954 | 6.808511 | 0.351064 | 0.075 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 954 | 28 | 84 | 34.071429 | 0.790123 | 0 | 0 | 0 | 0 | 0 | 0.162474 | 0.02935 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
879f5cb728abbf0e5820bf60a16e817ad084aa07 | 1,790 | py | Python | sketchify/xdog_blend.py | DBSAMOR/sketch-i2v | 222f06a1983c5b5bb87ce8c74f1d987c2b3d99ed | [
"MIT"
] | 232 | 2019-08-19T01:11:24.000Z | 2022-03-21T13:53:09.000Z | sketchify/xdog_blend.py | DBSAMOR/sketch-i2v | 222f06a1983c5b5bb87ce8c74f1d987c2b3d99ed | [
"MIT"
] | 4 | 2019-09-02T03:20:00.000Z | 2020-02-04T05:27:30.000Z | sketchify/xdog_blend.py | DBSAMOR/sketch-i2v | 222f06a1983c5b5bb87ce8c74f1d987c2b3d99ed | [
"MIT"
] | 34 | 2019-08-19T10:14:26.000Z | 2022-03-23T01:05:43.000Z | import cv2
import numpy as np
from scipy import ndimage
def dog(img, size=(0,0), k=1.6, sigma=0.5, gamma=1):
img1 = cv2.GaussianBlur(img, size, sigma)
img2 = cv2.GaussianBlur(img, size, sigma * k)
return (img1 - gamma * img2)
def xdog(img, sigma=0.5, k=1.6, gamma=1, epsilon=1, phi=1):
aux = dog(img, sigma=sigma, k=k, gamma=gamma) / 255
for i in range(0, aux.shape[0]):
for j in range(0, aux.shape[1]):
if(aux[i, j] < epsilon):
aux[i, j] = 1*255
else:
aux[i, j] = 255*(1 + np.tanh(phi * (aux[i, j])))
return aux
def get_xdog_image(img, sigma=0.4, k=2.5, gamma=0.95, epsilon=-0.5, phi=10**9):
xdog_image = xdog(img, sigma=sigma, k=k, gamma=gamma, epsilon=epsilon, phi=phi).astype(np.uint8)
return xdog_image
def add_intensity(img, intensity):
if intensity == 1:
return img
inten_const = 255.0 ** (1 - intensity)
return (inten_const * (img ** intensity)).astype(np.uint8)
def blend_xdog_and_sketch(illust, sketch, intensity=1.7, degamma=(1/1.5), blend=0, **kwargs):
gray_image = cv2.cvtColor(illust, cv2.COLOR_BGR2GRAY)
gamma_sketch = add_intensity(sketch, intensity)
if blend > 0:
xdog_image = get_xdog_image(gray_image, **kwargs)
xdog_blurred = cv2.GaussianBlur(xdog_image, (5, 5), 1)
xdog_residual_blur = cv2.addWeighted(xdog_blurred, 0.75, xdog_image, 0.25, 0)
if gamma_sketch.shape != xdog_residual_blur.shape:
gamma_sketch = cv2.resize(gamma_sketch, xdog_residual_blur.shape, interpolation=cv2.INTER_AREA)
blended_image = cv2.addWeighted(xdog_residual_blur, blend, gamma_sketch, (1-blend), 0)
else:
blended_image = gamma_sketch
return add_intensity(blended_image, degamma) | 38.913043 | 107 | 0.643575 | 281 | 1,790 | 3.957295 | 0.256228 | 0.056655 | 0.017986 | 0.039568 | 0.122302 | 0.044964 | 0.044964 | 0 | 0 | 0 | 0 | 0.059243 | 0.217318 | 1,790 | 46 | 108 | 38.913043 | 0.734475 | 0 | 0 | 0.054054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.081081 | 0 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a08ee0b9a91c3600f9993ce0331c05385252cf | 3,033 | py | Python | tests/setup_parser.py | jcao1022/tiflash | 841d4d2c6eeba35ef62c7a8feefc476182175e3d | [
"MIT"
] | 1 | 2019-04-11T05:52:56.000Z | 2019-04-11T05:52:56.000Z | tests/setup_parser.py | jcao1022/tiflash | 841d4d2c6eeba35ef62c7a8feefc476182175e3d | [
"MIT"
] | null | null | null | tests/setup_parser.py | jcao1022/tiflash | 841d4d2c6eeba35ef62c7a8feefc476182175e3d | [
"MIT"
] | null | null | null | import os
import platform
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
class TestSetupError(Exception):
"""Generic Error with parsing Test Setup configuration"""
pass
class TestSetup(object):
"""Class used for accessing various settings in test setup configuartion
file: setup.cfg
"""
def __init__(self):
self.cfg = ConfigParser(allow_no_value=True)
self.cfg.optionxform = str
self.cfg.read("./setup.cfg")
def get_ccs_prefix(self):
"""Returns the set ccs_prefix
Returns:
str: ccs_prefix variable set in setup.cfg
"""
return self.cfg.get('environment', 'ccs_prefix')
def get_ccs_versions(self):
"""Returns a tuple of CCS versions installed
Returns:
tuple: a tuple of ints representing CCS versions installed in test
setup
"""
versions = map(str.strip, self.cfg.get('environment', 'ccs_versions').split(','))
return tuple(versions)
def get_ccs_installs(self):
"""Returns a tuple of all CCS install paths
Returns:
tuple: a tuple of strs being the full paths to each CCS
installation
"""
system = platform.system()
versions = self.get_ccs_versions()
ccs_paths = map(str.strip, self.cfg.get('environment', 'ccs_installs').split(','))
ccs_paths = tuple(ccs_paths)
for path in ccs_paths:
if not os.path.exists(path):
raise TestSetupError("CCS Install: %s could not be found. "
"Remove this ccs version from setup.cfg"
% path)
return tuple(ccs_paths)
def get_target_config_directory(self):
"""Returns the target configuation directory
Returns:
str: Path to target configuration directory
"""
ccxml_dir = self.cfg.get("environment", "ccxml_dir")
if not os.path.exists(ccxml_dir):
raise TestSetupError("Target Config Directory: %s could not"
" be found." % ccxml_dir)
return ccxml_dir
def get_devices(self):
"""Returns a dict of devices with specified configurations (devices.cfg)
Returns:
dict: dict of device dicts in format:
{ devicename:
{
serno: SERNO,
connection: CONN,
devicetype: DEVTYPE
}
}
"""
devices = dict()
device_list = [ dev for dev in self.cfg.options('devices') if
self.cfg.getboolean('devices', dev) ]
for devname in device_list:
dev = dict()
options = self.cfg.options(devname)
for o in options:
dev[o] = self.cfg.get(devname, o)
devices[devname] = dev
return devices
| 28.885714 | 90 | 0.563798 | 333 | 3,033 | 5.03003 | 0.321321 | 0.04597 | 0.029851 | 0.050149 | 0.14209 | 0.041791 | 0.041791 | 0.041791 | 0 | 0 | 0 | 0 | 0.351137 | 3,033 | 104 | 91 | 29.163462 | 0.851118 | 0.285856 | 0 | 0 | 0 | 0 | 0.122141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0.021739 | 0.108696 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a0b5c529797765dcec85fd5ef6d46f2a931cfb | 3,238 | py | Python | vrs/resolver.py | open-voice-network/pyvrs | 73898c53f67d1ad5798e4c39c366a2c168108bfc | [
"Apache-2.0"
] | null | null | null | vrs/resolver.py | open-voice-network/pyvrs | 73898c53f67d1ad5798e4c39c366a2c168108bfc | [
"Apache-2.0"
] | 2 | 2022-01-18T16:18:12.000Z | 2022-03-28T16:08:43.000Z | vrs/resolver.py | open-voice-network/pyvrs | 73898c53f67d1ad5798e4c39c366a2c168108bfc | [
"Apache-2.0"
] | null | null | null | import base64
import dns.resolver
import json
import logging
import requests
import shlex
from vrs import is_base64, is_json
from configparser import ConfigParser
logger = logging.getLogger('pyvrs')
class VRSDecodeError(Exception):
"""Catchall VRS error for decoding issues."""
class Resolver:
"""Base resolver class."""
class RESTResolver(Resolver):
def __init__(self, conf):
self.conf = conf
self.url = self.conf['url']
self.email = self.conf['email']
self.password = self.conf['password']
self.session = requests.Session()
def login(self):
login_url = f"{self.url}/api/login"
login_data = {'email': self.email, 'password': self.password}
logger.debug(f'{login_url} {login_data}')
response = self.session.post(login_url, json=login_data)
response.raise_for_status()
def resolve(self, name):
"""Resolve keywords against known ReST APIs."""
try:
self.login()
records_url = f"{self.url}/api/records/{name}"
logger.debug(f'querying: {records_url}')
response = self.session.get(records_url)
logger.debug(f'response: {response}')
response.raise_for_status()
yield response.text
except Exception as e:
logger.warn(f"{e}")
yield
class DNSResolver(Resolver):
def __init__(self, conf):
self.conf = conf
def resolve(self, name):
"""Resolve any TXT records in <subdomain>.<domain>"""
try:
concat = name + "." + self.conf["hostname"]
answers = dns.resolver.resolve(concat, 'TXT')
logger.debug(f'querying: {answers.qname}')
for a in answers:
yield self.decode(a)
except Exception as e:
logger.warn(e)
yield
def decode(self, rdata):
logger.debug(f"rdata: '{rdata}'")
try:
txt = (rdata.to_text().encode('raw_unicode_escape')
.decode('unicode_escape').strip("'\""))
logger.debug(f"txt: '{txt}'")
if is_base64(txt):
return str(base64.b64decode(txt), 'utf8').strip()
elif is_json(txt):
return json.loads(txt)
elif all([r in txt for r in ('dest', 'name', 'country')]):
# this is in plaintext, not encoded
d = {}
for i in shlex.split(txt):
d.update([i.split("=")])
return d
else:
return rdata.strings
except Exception as ex:
raise VRSDecodeError(ex)
def GetResolver(conf):
if 'password' in conf:
return RESTResolver(conf)
elif 'hostname' in conf:
return DNSResolver(conf)
else:
raise Exception(f"Invalid config block: {conf}")
def resolve(name, conf):
"""Resolve the name via each resolver config block."""
cp = ConfigParser()
cp.read(conf)
for section in cp.sections():
resolver = GetResolver(cp[section])
logger.debug(f"resolving with section [{section}] ==> {resolver}")
for record in resolver.resolve(name):
yield record
| 29.171171 | 74 | 0.57134 | 372 | 3,238 | 4.895161 | 0.306452 | 0.035146 | 0.046129 | 0.020868 | 0.112026 | 0.069193 | 0.03844 | 0.03844 | 0 | 0 | 0 | 0.004895 | 0.306053 | 3,238 | 110 | 75 | 29.436364 | 0.805518 | 0.072267 | 0 | 0.204819 | 0 | 0 | 0.122565 | 0.009738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096386 | false | 0.036145 | 0.096386 | 0 | 0.313253 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a167db0b2bc93064d8e1008b52e1b7af27bbdd | 1,166 | py | Python | test/writing/test_template.py | patricksanders/policy_sentry | 3559ce8d3a19728b4f64dfff4cbdf075e7629b39 | [
"MIT"
] | null | null | null | test/writing/test_template.py | patricksanders/policy_sentry | 3559ce8d3a19728b4f64dfff4cbdf075e7629b39 | [
"MIT"
] | 20 | 2020-03-20T06:13:09.000Z | 2022-02-10T18:15:35.000Z | test/writing/test_template.py | ssmbct-netops/policy_sentry | c9f4752c633fe229220b7f476aa766ea65330489 | [
"MIT"
] | null | null | null | import unittest
from policy_sentry.writing.template import create_actions_template, create_crud_template
class TemplateTestCase(unittest.TestCase):
def test_actions_template(self):
desired_msg = """# Generate my policy when I know the Actions
mode: actions
name: myrole
description: '' # For human auditability
role_arn: '' # For human auditability
actions:
- ''"""
actions_template = create_actions_template("myrole")
self.assertEqual(desired_msg, actions_template)
def test_crud_template(self):
desired_msg = """# Generate my policy when I know the access levels and ARNs
mode: crud
name: myrole
description: '' # For human auditability
role_arn: '' # For human auditability
# Insert ARNs under each access level below
# If you do not need to use certain access levels, delete them.
read:
- ''
write:
- ''
list:
- ''
tagging:
- ''
permissions-management:
- ''
# If the policy needs to use IAM actions that cannot be restricted to ARNs,
# like ssm:DescribeParameters, specify those actions here.
wildcard:
- ''"""
crud_template = create_crud_template("myrole")
self.assertEqual(desired_msg, crud_template)
| 28.439024 | 88 | 0.734991 | 149 | 1,166 | 5.597315 | 0.483221 | 0.089928 | 0.095923 | 0.06235 | 0.376499 | 0.376499 | 0.282974 | 0.282974 | 0.282974 | 0.282974 | 0 | 0 | 0.174099 | 1,166 | 40 | 89 | 29.15 | 0.866044 | 0 | 0 | 0.351351 | 0 | 0 | 0.577187 | 0.039451 | 0 | 0 | 0 | 0 | 0.054054 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.135135 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a60c95cfcb01535bd33a02ec71b3ce88f7fdc1 | 2,566 | py | Python | examples/graph/test_dice.py | HenryKenlay/DeepRobust | ea8871d970257a9c11715cd059a5331177a00395 | [
"MIT"
] | 1 | 2020-06-12T07:45:06.000Z | 2020-06-12T07:45:06.000Z | examples/graph/test_dice.py | lorenzobasile/DeepRobust | 3f56dcc45f1fed788423d32cc179c26513416e2e | [
"MIT"
] | null | null | null | examples/graph/test_dice.py | lorenzobasile/DeepRobust | 3f56dcc45f1fed788423d32cc179c26513416e2e | [
"MIT"
] | null | null | null | import torch
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from deeprobust.graph.defense import GCN
from deeprobust.graph.global_attack import DICE
from deeprobust.graph.utils import *
from deeprobust.graph.data import Dataset
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
print('cuda: %s' % args.cuda)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
data = Dataset(root='/tmp/', name=args.dataset)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
idx_unlabeled = np.union1d(idx_val, idx_test)
# Setup Attack Model
model = DICE()
n_perturbations = int(args.ptb_rate * (adj.sum()//2))
modified_adj = model.attack(adj, labels, n_perturbations)
adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
modified_adj = normalize_adj(modified_adj)
modified_adj = sparse_mx_to_torch_sparse_tensor(modified_adj)
modified_adj = modified_adj.to(device)
def test(adj):
''' test on GCN '''
# adj = normalize_adj_tensor(adj)
gcn = GCN(nfeat=features.shape[1],
nhid=16,
nclass=labels.max().item() + 1,
dropout=0.5, device=device)
gcn = gcn.to(device)
optimizer = optim.Adam(gcn.parameters(),
lr=0.01, weight_decay=5e-4)
gcn.fit(features, adj, labels, idx_train) # train without model picking
# gcn.fit(features, adj, labels, idx_train, idx_val) # train with validation model picking
output = gcn.output
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return acc_test.item()
def main():
print('=== testing GCN on original(clean) graph ===')
test(adj)
print('=== testing GCN on perturbed graph ===')
test(modified_adj)
if __name__ == '__main__':
main()
| 32.075 | 141 | 0.697973 | 367 | 2,566 | 4.711172 | 0.340599 | 0.050896 | 0.043956 | 0.038172 | 0.142857 | 0.065934 | 0.035859 | 0 | 0 | 0 | 0 | 0.009731 | 0.159002 | 2,566 | 79 | 142 | 32.481013 | 0.791474 | 0.070148 | 0 | 0 | 0 | 0 | 0.108677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.163636 | 0 | 0.218182 | 0.072727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a93beab125fd0c5d8d7106fb3c4f852c610456 | 3,742 | py | Python | mzmlripper/logger.py | croningp/mzmlripper | f8c75a3380c9502815a8df4bdf45d372c31275ed | [
"MIT"
] | 1 | 2021-06-15T03:04:04.000Z | 2021-06-15T03:04:04.000Z | mzmlripper/logger.py | croningp/mzmlripper | f8c75a3380c9502815a8df4bdf45d372c31275ed | [
"MIT"
] | null | null | null | mzmlripper/logger.py | croningp/mzmlripper | f8c75a3380c9502815a8df4bdf45d372c31275ed | [
"MIT"
] | 2 | 2021-05-08T06:31:47.000Z | 2021-06-15T03:03:20.000Z | """
.. module:: prototools.logger
:platforms: Unix
:synopsis: Custom logger with ANSI coloring
.. moduleauthor:: Graham Keenan 2020
"""
# System imports
import time
import logging
from typing import Optional
ANSI_COLORS = {
'black': '\u001b[30m',
'red': '\u001b[31m',
'green': '\u001b[32m',
'yellow': '\u001b[33m',
'blue': '\u001b[34m',
'magenta': '\u001b[35m',
'cyan': '\u001b[36m',
'white': '\u001b[37m',
'bold': '\u001b[1m',
'reset': '\u001b[0m'
}
def colour_item(
msg: str, color: Optional[str] = '', bold: Optional[bool] = False
) -> str:
"""Colours a message with an ANSI color and escapes it at the end.
Options for bold text.
Args:
msg (str): Message to colour
color (str): Colour of the text
bold (Optional[bool], optional): Bold the message. Defaults to False.
Returns:
str: ANSI formatted message
"""
color = ANSI_COLORS[color] if color in ANSI_COLORS else ''
return (
f'{color}{ANSI_COLORS["bold"]}{msg}{ANSI_COLORS["reset"]}' if bold
else f'{color}{msg}{ANSI_COLORS["reset"]}'
)
def make_logger(
name: str, filename: Optional[str] = '', debug: Optional[bool] = False
) -> logging.Logger:
"""Creates a logger using the custom ProtoFormatter with options for
file output.
Args:
name (str): Name of the logger
filename (Optional[str], optional): Output log file. Defaults to ''.
debug (Optional[bool], optional): Debug mode. Defaults to False.
Returns:
logging.Logger: Logger
"""
# Get logger and set level
logger = logging.getLogger(name)
level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level)
# Custom ANSI colour formatter
formatter = ProtoFormatter()
# Using file logging, add FileHandler
if filename:
fh = logging.FileHandler(filename=filename)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Setup stream handler
sh = logging.StreamHandler()
sh.setLevel(level)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.propagate = False
return logger
class ProtoFormatter(logging.Formatter):
"""Custom Formatter to support ANSI colouring
Inherits:
logging.Formatter: Base Formatter
"""
def __init__(self):
super().__init__()
def format(self, record: logging.LogRecord) -> str:
"""Formats the LogRecord with custom formatting
Args:
record (logging.LogRecord): Record to format
Returns:
str: Formatted Text
"""
# Get level and level number
level, levelno, msg = record.levelname, record.levelno, record.msg
# Colour level name depending on level severity
if levelno == logging.DEBUG:
level = colour_item(level, color='red')
elif levelno == logging.INFO:
level = colour_item(level, color='green')
elif levelno == logging.WARN:
level = colour_item(level, color='yellow', bold=True)
msg = colour_item(msg, color='yellow')
elif levelno == logging.ERROR:
level = colour_item(level, color='red', bold=True)
msg = colour_item(msg, color='red', bold=True)
elif levelno == logging.CRITICAL:
level = colour_item(level, color='red', bold=True)
msg = colour_item(msg, color='red')
# Log the current time
timestamp = time.strftime('%d-%m-%Y|%H:%M:%S')
# Colour the logger name
name = colour_item(record.name, color='cyan')
# Formatted message
return f'[{timestamp}] - {name}::{level} -- {msg}'
| 27.313869 | 77 | 0.611972 | 438 | 3,742 | 5.171233 | 0.312785 | 0.04415 | 0.033113 | 0.04415 | 0.100221 | 0.078146 | 0.065784 | 0.05298 | 0.05298 | 0.05298 | 0 | 0.018895 | 0.264564 | 3,742 | 136 | 78 | 27.514706 | 0.804142 | 0.321753 | 0 | 0.032258 | 0 | 0 | 0.137815 | 0.037395 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.048387 | 0 | 0.177419 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87a9a06fcaf0be41d44e4b1eb59937be34e9e629 | 3,236 | py | Python | ros/fetch_vr/scripts/camera_transform_publisher.py | scottwillmoore/fetch_vr | 22c28e6c1d95655806ea2667a4397556bbddd580 | [
"MIT"
] | 1 | 2022-03-06T15:24:38.000Z | 2022-03-06T15:24:38.000Z | ros/fetch_vr/scripts/camera_transform_publisher.py | scottwillmoore/fetch_vr | 22c28e6c1d95655806ea2667a4397556bbddd580 | [
"MIT"
] | null | null | null | ros/fetch_vr/scripts/camera_transform_publisher.py | scottwillmoore/fetch_vr | 22c28e6c1d95655806ea2667a4397556bbddd580 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import aruco_msgs.msg
import geometry_msgs.msg
import rospy
import tf2_ros
from tf import transformations as t
class ExternalCamera():
def __init__(self):
rospy.init_node("camera_transform_publisher")
one_time = rospy.get_param("~one_time", False)
rate = rospy.get_param("~rate", 10)
time_out = rospy.get_param("~time_out", 10.0)
assert isinstance(one_time, bool)
assert isinstance(rate, int)
assert isinstance(time_out, float)
rospy.loginfo("one_time: %s", one_time)
rospy.loginfo("Rate: %f" % rate)
rospy.loginfo("Time out: %d" % time_out)
first_time = True
rate = rospy.Rate(rate)
self.transform_broadcaster = tf2_ros.StaticTransformBroadcaster()
self.marker_subscriber = rospy.Subscriber("external_camera_marker_publisher/markers", aruco_msgs.msg.MarkerArray, self.callback)
def callback(self, marker_array):
# while not rospy.is_shutdown():
# # if not one_time or first_time:
# first_time = False
# try:
# marker_array = rospy.wait_for_message("external_camera_marker_publisher/markers", aruco_msgs.msg.MarkerArray, time_out)
# except rospy.ROSException:
# rospy.logerr("Timed out!")
# raise
for marker in marker_array.markers:
if marker.id == 200:
transform = geometry_msgs.msg.TransformStamped()
transform.header.stamp = rospy.Time.now()
transform.header.frame_id = "marker_200"
transform.child_frame_id = "camera_link"
trans = (marker.pose.pose.position.x, marker.pose.pose.position.y, marker.pose.pose.position.z)
rot = (marker.pose.pose.orientation.x, marker.pose.pose.orientation.y, marker.pose.pose.orientation.z, marker.pose.pose.orientation.w)
transform_mat = t.concatenate_matrices(t.translation_matrix(trans), t.quaternion_matrix(rot))
inv_transform_mat = t.inverse_matrix(transform_mat)
inv_trans = t.translation_from_matrix(inv_transform_mat)
inv_rot = t.quaternion_from_matrix(inv_transform_mat)
transform.transform.translation.x = inv_trans[0]
transform.transform.translation.y = inv_trans[1]
transform.transform.translation.z = inv_trans[2]
transform.transform.rotation.x = inv_rot[0]
transform.transform.rotation.y = inv_rot[1]
transform.transform.rotation.z = inv_rot[2]
transform.transform.rotation.w = inv_rot[3]
try:
self.transform_broadcaster.sendTransform(transform)
print("Published transform from marker to external camera\n")
except:
print("Marker not yet detected by camera\n")
# self.rate.sleep()
if __name__ == "__main__":
try:
ExternalCamera()
rospy.spin()
except rospy.ROSInterruptException:
pass
| 40.45 | 154 | 0.604759 | 361 | 3,236 | 5.188366 | 0.31856 | 0.037373 | 0.052322 | 0.05339 | 0.089696 | 0.063001 | 0.063001 | 0.063001 | 0.063001 | 0 | 0 | 0.00885 | 0.301607 | 3,236 | 79 | 155 | 40.962025 | 0.819912 | 0.105377 | 0 | 0.038462 | 0 | 0 | 0.082178 | 0.022885 | 0 | 0 | 0 | 0 | 0.057692 | 1 | 0.038462 | false | 0.019231 | 0.096154 | 0 | 0.153846 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87aa4958b8bdc3ba6a551199bf30e998942db846 | 2,418 | py | Python | src/clusterfuzz/_internal/tests/core/metrics/fuzzer_logs_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 5,023 | 2019-02-07T16:57:56.000Z | 2022-03-31T01:08:05.000Z | src/clusterfuzz/_internal/tests/core/metrics/fuzzer_logs_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 2,303 | 2019-02-07T17:36:36.000Z | 2022-03-31T15:44:38.000Z | src/clusterfuzz/_internal/tests/core/metrics/fuzzer_logs_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 564 | 2019-02-07T17:34:24.000Z | 2022-03-26T09:25:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fuzzer_logs test."""
import datetime
import unittest
import mock
from clusterfuzz._internal.metrics import fuzzer_logs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
class FuzzerLogsTest(unittest.TestCase):
"""Tests for logs uploading."""
def setUp(self):
test_helpers.patch_environ(self)
environment.set_value('FUZZER_NAME', 'fuzzer_1')
environment.set_value('JOB_NAME', 'fake_job')
# To be used for generation of date and time when uploading a log.
self.fake_utcnow = datetime.datetime(2017, 3, 21, 11, 15, 13, 666666)
self.fake_log_time = datetime.datetime(2017, 4, 22, 12, 16, 14, 777777)
test_helpers.patch(self, [
'datetime.datetime',
'clusterfuzz._internal.google_cloud_utils.storage.write_data',
])
self.mock.datetime.utcnow.return_value = self.fake_utcnow
def test_upload_to_logs(self):
"""Test a simple call to upload_to_logs."""
mock_gsutil = mock.MagicMock()
self.mock.write_data.return_value = mock_gsutil
fuzzer_logs.upload_to_logs('fake-gcs-bucket', 'fake content')
self.mock.write_data.assert_called_once_with(
'fake content',
'gs://fake-gcs-bucket/fuzzer_1/fake_job/2017-03-21/11:15:13:666666.log')
def test_upload_to_logs_with_all_arguments(self):
"""Test a call to upload_to_logs with all arguments being passed."""
mock_gsutil = mock.MagicMock()
self.mock.write_data.return_value = mock_gsutil
fuzzer_logs.upload_to_logs(
'gcs-bucket',
'fake content',
time=self.fake_log_time,
fuzzer_name='fuzzer_2',
job_type='another_job')
self.mock.write_data.assert_called_once_with(
'fake content',
'gs://gcs-bucket/fuzzer_2/another_job/2017-04-22/12:16:14:777777.log')
| 36.636364 | 80 | 0.729942 | 356 | 2,418 | 4.761236 | 0.396067 | 0.035398 | 0.042478 | 0.040118 | 0.252507 | 0.189971 | 0.156932 | 0.156932 | 0.156932 | 0.156932 | 0 | 0.044754 | 0.168321 | 2,418 | 65 | 81 | 37.2 | 0.79811 | 0.313896 | 0 | 0.243243 | 0 | 0.054054 | 0.208615 | 0.12 | 0 | 0 | 0 | 0 | 0.054054 | 1 | 0.081081 | false | 0 | 0.162162 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87aab1ba0bcf39dd2d9b04cea80403f3afe84bdf | 2,192 | py | Python | test_unitest_fool.py | HeyArtem/python_less_10_HW | addb7d9467acfa14035542cf020482a403af9543 | [
"MIT"
] | null | null | null | test_unitest_fool.py | HeyArtem/python_less_10_HW | addb7d9467acfa14035542cf020482a403af9543 | [
"MIT"
] | null | null | null | test_unitest_fool.py | HeyArtem/python_less_10_HW | addb7d9467acfa14035542cf020482a403af9543 | [
"MIT"
] | null | null | null | import unittest
from game_fool import Card
from game_fool import Deck
class Test_game_fool(unittest.TestCase):
def test_1_fool_init(self):
'''
Тест1
Создал карту с мастью (suit)=2
и достоинством (rank)=3
Здесь сравниваю suit
'''
game_foll_unitest = Card(2,3)
self.assertEqual(2, game_foll_unitest.suit)
def test_2_fool_rank(self):
'''
Тест 2
Создал карту с мастью (suit)=2
и достоинством (rank)=3
Здесь сравниваю rank
'''
game_foll_unitest = Card(2, 3)
self.assertEqual(3, game_foll_unitest.rank)
def test_3_fool_str(self):
'''
Тест 3
Тестирую строчное представление карты
Создал карту Card(2(масть-Черви), 3(ранк-8))
'''
game_foll_unitest = Card(2, 3)
self.assertEqual('8 of Черви ♡', str(game_foll_unitest))
def test_4_fool_init_Deck(self):
'''
Тест 4
Тестирую количество карт в созданной колоде
Их должно быть 36
'''
game_foll_unitest = Deck()
self.assertEqual(36, (len(game_foll_unitest.cards)))
def test_5_fool_init_shuffle(self):
'''
Тест 5
Тестирую метод shuffle
тасую карты в колоде
'''
game_foll_unitest = Deck() # создали колоду game_foll_unitest -> колода версия 1
cards_before = game_foll_unitest.cards[:] # создали перемннную cards_before которая равна колода версия 1 (сохранение последовательности добиваемя срезом)
swich = False # создали переменную Ложь
game_foll_unitest.shuffle() # тасуем колоду
cards_after = game_foll_unitest.cards # создали пременную которой присвоено перетасованная колоду версии 1, теперь это колода -> Версия 2
for i,j in zip(cards_before, cards_after): # сравниваем по позициям колоды Версия 1 и версия 2 если есть расхождения, то переменной sich присваиватся True
if i != j:
swich = True
self.assertEqual(True, swich) # Спавниваем два значения True и swich (тоже будет True, если карты перетасованы)
if __name__ == '__main__':
unittest.main()
| 31.768116 | 162 | 0.639599 | 282 | 2,192 | 4.762411 | 0.386525 | 0.077439 | 0.145197 | 0.042442 | 0.202532 | 0.162323 | 0.162323 | 0.162323 | 0.081906 | 0.081906 | 0 | 0.022959 | 0.284672 | 2,192 | 68 | 163 | 32.235294 | 0.832908 | 0.394161 | 0 | 0.178571 | 0 | 0 | 0.018018 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 1 | 0.178571 | false | 0 | 0.107143 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87af438c04c9d0f92c9bbe504b18984d32f16dff | 13,009 | py | Python | causalml/inference/meta/base.py | rainfireliang/causalml | d58024d8de4ab6136c5519949b58a22dd885df29 | [
"Apache-2.0"
] | 2,919 | 2019-08-12T23:02:10.000Z | 2022-03-31T21:59:34.000Z | causalml/inference/meta/base.py | rainfireliang/causalml | d58024d8de4ab6136c5519949b58a22dd885df29 | [
"Apache-2.0"
] | 317 | 2019-08-13T14:16:22.000Z | 2022-03-26T08:44:06.000Z | causalml/inference/meta/base.py | rainfireliang/causalml | d58024d8de4ab6136c5519949b58a22dd885df29 | [
"Apache-2.0"
] | 466 | 2019-08-18T01:45:14.000Z | 2022-03-31T08:11:53.000Z | from abc import ABCMeta, abstractclassmethod
import logging
import numpy as np
import pandas as pd
from causalml.inference.meta.explainer import Explainer
from causalml.inference.meta.utils import check_p_conditions, convert_pd_to_np
from causalml.propensity import compute_propensity_score
logger = logging.getLogger('causalml')
class BaseLearner(metaclass=ABCMeta):
@abstractclassmethod
def fit(self, X, treatment, y, p=None):
pass
@abstractclassmethod
def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
pass
def fit_predict(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000,
return_components=False, verbose=True):
self.fit(X, treatment, y, p)
return self.predict(X, treatment, y, p, return_components, verbose)
@abstractclassmethod
def estimate_ate(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000):
pass
def bootstrap(self, X, treatment, y, p=None, size=10000):
"""Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population."""
idxs = np.random.choice(np.arange(0, X.shape[0]), size=size)
X_b = X[idxs]
if p is not None:
p_b = {group: _p[idxs] for group, _p in p.items()}
else:
p_b = None
treatment_b = treatment[idxs]
y_b = y[idxs]
self.fit(X=X_b, treatment=treatment_b, y=y_b, p=p_b)
return self.predict(X=X, p=p)
@staticmethod
def _format_p(p, t_groups):
"""Format propensity scores into a dictionary of {treatment group: propensity scores}.
Args:
p (np.ndarray, pd.Series, or dict): propensity scores
t_groups (list): treatment group names.
Returns:
dict of {treatment group: propensity scores}
"""
check_p_conditions(p, t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
return p
def _set_propensity_models(self, X, treatment, y):
"""Set self.propensity and self.propensity_models.
It trains propensity models for all treatment groups, save them in self.propensity_models, and
save propensity scores in self.propensity in dictionaries with treatment groups as keys.
It will use self.model_p if available to train propensity models. Otherwise, it will use a default
PropensityModel (i.e. ElasticNetPropensityModel).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
"""
logger.info('Generating propensity score')
p = dict()
p_model = dict()
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
w_filt = (treatment_filt == group).astype(int)
w = (treatment == group).astype(int)
propensity_model = self.model_p if hasattr(self, 'model_p') else None
p[group], p_model[group] = compute_propensity_score(X=X_filt, treatment=w_filt,
p_model=propensity_model,
X_pred=X, treatment_pred=w)
self.propensity_model = p_model
self.propensity = p
def get_importance(self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(method=method, control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes, normalize=normalize,
test_size=test_size, random_state=random_state)
return explainer.get_importance()
def get_shap_values(self, X=None, model_tau_feature=None, tau=None, features=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
"""
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes)
return explainer.get_shap_values()
def plot_importance(self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None):
"""
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(method=method, control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, classes=self._classes, normalize=normalize,
test_size=test_size, random_state=random_state)
explainer.plot_importance()
def plot_shap_values(self, X=None, tau=None, model_tau_feature=None, features=None, shap_dict=None, **kwargs):
"""
Plots distribution of shapley values.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix. Required if shap_dict is None.
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, override_checks=override_checks, classes=self._classes)
explainer.plot_shap_values(shap_dict=shap_dict)
def plot_shap_dependence(self, treatment_group, feature_idx, X, tau, model_tau_feature=None, features=None,
shap_dict=None, interaction_idx='auto', **kwargs):
"""
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(method='shapley', control_name=self.control_name,
X=X, tau=tau, model_tau=model_tau_feature,
features=features, override_checks=override_checks,
classes=self._classes)
explainer.plot_shap_dependence(treatment_group=treatment_group,
feature_idx=feature_idx,
shap_dict=shap_dict,
interaction_idx=interaction_idx,
**kwargs)
| 54.890295 | 120 | 0.639019 | 1,648 | 13,009 | 4.926578 | 0.165049 | 0.019707 | 0.027713 | 0.010839 | 0.654884 | 0.612268 | 0.596502 | 0.596502 | 0.579751 | 0.572977 | 0 | 0.003468 | 0.290722 | 13,009 | 236 | 121 | 55.122881 | 0.87645 | 0.50711 | 0 | 0.252632 | 0 | 0 | 0.013453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126316 | false | 0.031579 | 0.115789 | 0 | 0.305263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b274375a58a74d68ee6f91801f5f5d232a7bb2 | 16,887 | py | Python | archive/preprocess.py | whashi44/uscis-analysis | 975402c77cb8e74654f2568117c08af190116ce1 | [
"MIT"
] | null | null | null | archive/preprocess.py | whashi44/uscis-analysis | 975402c77cb8e74654f2568117c08af190116ce1 | [
"MIT"
] | null | null | null | archive/preprocess.py | whashi44/uscis-analysis | 975402c77cb8e74654f2568117c08af190116ce1 | [
"MIT"
] | null | null | null | """
Preprocess the uscis I485 Adjustment of status data
1. Download the csv files from the website
2. Rename the file and omit unnecessary files
3. Extract information from csv and save it into csv file
Currently works from 2014 qtr 1 through 2019 qtr 2
"""
# standard library
from os.path import basename
import os
from urllib.parse import urljoin
import re
import shutil
import csv
# External library
import requests
from bs4 import BeautifulSoup
from natsort import natsorted
import numpy as np
import pandas as pd
from fiscalyear import FiscalQuarter
def main():
data_path = "data"
raw_path = "raw"
# download(raw_path)
# rename(data_path, raw_path)
data, header = extract(data_path)
modify(data, header)
# -----------------------------------------------------------------------------------------------------
def download(path="raw"):
"""
Automatically download all the csv file from uscis website with specified url
"""
try:
print(f"Making folder: {path}")
os.mkdir(path)
except FileExistsError:
raise
# finally:
# os.chdir(path)
# print(f"Changed directory to: {path}")
base = "https://www.uscis.gov/tools/reports-studies/immigration-forms-data?topic_id=20658&field_native_doc_issue_date_value%5Bvalue%5D%5Bmonth%5D=&field_native_doc_issue_date_value_1%5Bvalue%5D%5Byear%5D=&combined=&items_per_page=100"
with requests.Session() as s:
# stream will make it generator and parse faster
url = s.get(base, stream=True).text
# Specifying the parse is necessary to avoid warning
soup = BeautifulSoup(url, "html.parser")
# creating generator with all the url that end with .csv
# the soup.select will include other unnecessary html parameter, hence urljoin is used to extract just the href
for link in (urljoin("", a["href"]) for a in soup.select("a[href$='.csv']")):
# Since the link still has value such as "https://....", basename() is used to extract only the csv file name to open
file = basename(link)
# Creating a path to raw folder to save file
file_path = f"{path}/{file}"
with open(file_path, "wb") as write_file:
print(f"saving .csv file:{link} to path")
write_file.write(requests.get(link).content)
# -----------------------------------------------------------------------------------------------------
def rename(data_path="data", raw_path="raw"):
"""
Rename files because the original file name had inconsistency
Remove other files that is not useful for analysis (i.e. one with inconsistent format, especially before 2014)
Remove fy 2013 csv because the csv format was completely different.
"""
# data_path = "data"
# raw_path = "raw"
# make data deposit directory
try:
print(f"Making folder: {data_path}")
os.mkdir(data_path)
except FileExistsError:
raise
# finally:
# # Change directory to raw so that raw files can be retrieved
# os.chdir(raw_path)
# print(f"Changed directory to: {raw_path}")
# Get all the files from raw folder
all_files = os.listdir(raw_path)
# Extracting useful files
files = [file for file in all_files if "fy" in file]
# Extracting unuseful files
remove_files = [file for file in all_files if "fy" not in file]
years = []
quarters = []
# Find year and quarter information from file name ex. "I485_data_fy2014_qtr3.csv"
for file in files:
numbers = re.findall(
r"\d+", file # for number
) # 0th is I485, 1st is fiscal year, 2nd is quarter
years.append(numbers[1])
quarters.append(numbers[2])
# Rename files to uniform format, and save copy to "data" folder
for file, year, quarter in zip(files, years, quarters):
print(f"Copying and renaming the filename from: \n{file}")
new_name = f"I485_data_fy{year}_qtr{quarter}.csv"
shutil.copyfile(f"{raw_path}/{file}", f"{data_path}/{new_name}")
print(f"To: {new_name}")
# remove special file, which has inconsistent format
print(f"Removing special file, the 2013 quarter 3, due to its inconsistent format")
os.remove(f"{data_path}/I485_data_fy2013_qtr3.csv")
# -----------------------------------------------------------------------------------------------------
def extract(data_path="data"):
"""Read csv file from data folder and perform following:
1. Extract header information from the first file, assuming all other file have same/similar header information
2. Extract state and city application information
3. Convert them to numpy array and return
"""
# # Change directory to data folder
# try:
# os.chdir("data")
# except FileNotFoundError:
# raise FileNotFoundError
# Grab all the files so you can iterate through
files = os.listdir(data_path)
# I want to use the newest report to extract the basic header because it has the most information.
files = natsorted(files, reverse=True)
# Grab header information
with open(f"{data_path}/{files[0]}", "r") as read_file:
# read file as csv, it automatically skip comma if there is quote
csv_file = csv.reader(read_file, delimiter=",")
header = []
for row in csv_file:
# removing leading and trailing white space
row = list(map(str.strip, row))
# lower casing all the item to avoid word mismatch
row = list(map(str.lower, row))
# for special case, 2019 qtr 1 and qtr 2
other_count = 1
# Looking at the file structure, there is a category for green card application,
# As well as the result of application for each category, hence concatnate would be
# appropriate to increase consistency and uniformity
# pre-2017qtr1 has family-based1, instead of family-based
if "family-based" in row or "family-based1" in row:
category_name = "" # for "family-based", "employment"
categories = row # for green card category
results = next(csv_file) # for application status
found_family = False # flag
found_other = False
# For concatinating the category and result
for category, status in zip(categories, results):
# Checking the condition, if category name appears, store the category name
# If not, then use the previous category name
# Then, cancatnate the category name and the status with ":"
# For family based green card
if "family" in category:
category_name = "Family"
# family should come first, hence the flag is true
found_family = True
# For employment based green card
elif "employment" in category:
category_name = "Employment"
# For humanitarian based green card
elif "humanitarian" in category:
category_name = "Humanitarian"
# For other category
elif "other" in category:
category_name = "Other"
found_other = True
# For total count of application
elif "total" in category:
category_name = "Total"
# For 2019 qtr 1 and qtr 2 with shifted "total"
elif other_count == 4:
category_name = "Total"
# For keeping track of "other" to make sure "total" is included
elif found_other:
other_count += 1
# For first couple empty cases
elif not found_family:
pass
# There are some numbers after the result (i.e. Application2) so strip those
status = "".join(i for i in status if not i.isdigit())
# concatnate to create better category
value = category_name + ":" + status
header.append(value)
# Fill those empty header
header[0:3] = ["State", "City", "Abbreviation"]
# eliminate those empty strings in the end
header = header[0:23]
# Add year and quarter
header.append("Year")
header.append("Quarter")
# Grab states and city information
city_cases = []
for file in files:
print(f"working on file:{file}")
# Find the year and quarter from the file name
numbers = re.findall(
r"\d+", file # for number
) # 0th is I485, 1st is fiscal year, 2nd is quarter
year = numbers[1]
quarter = numbers[2]
with open(f"{data_path}/{file}", "r") as read_file:
# csv_file is a list of list
csv_file = csv.reader(read_file, delimiter=",")
# Each row is a list
for row in csv_file:
# removing leading and trailing white space
row = list(map(str.strip, row))
# lower casing for case-insensitive comparison
row = list(map(str.lower, row))
# If it finds the section of the total number of case, store that
if "total" in row[0]:
total_case_numbers = row
# For looping through states
# if the first column(state) is alabama, or alaska (before 2017_qtr3)
if row[0] == "alabama" or row[0] == "alaska":
# Loop until final city, vermont
while row[1] != "vermont":
# if 1st column is not empty, meaning this row is state
if row[0] != "":
# grab the state name
state_name = row[0].title()
# For special case in 2017, 1st quarter for guam, the row is shifted so we need to grab it now
if (
(year == "2017")
and (quarter == "1")
and (state_name == "Guam")
):
# Grab current line because it has all the information
row_with_state = row
row_with_state[0] = state_name
# check the next line, which has the city name
row = next(csv_file)
# removing leading and trailing white space
row = list(map(str.strip, row))
# create lower case
row = list(map(str.lower, row))
# Grab city name
row_with_state[1] = row[1].title()
# capitalize state abbreviation
row_with_state[2] = row[2].upper()
row_with_state[23] = year
row_with_state[24] = quarter
# if the 1st column is empty, meaning this row is city
# some year has repeating the header at the middle of the line, hence 2nd if statement is counter for that (see 2018 qtr 1 Kentucky)
elif row[0] == "" and row[1] != "":
row_with_state = row
# adding the state name to the initial part
row_with_state[0] = state_name
# initialize the city name
row_with_state[1] = row_with_state[1].title()
# capitalize state abbreviation
row_with_state[2] = row_with_state[2].upper()
# Some year has empty strings in the end of the row, hence simply substitute
try:
row_with_state[23] = year
row_with_state[24] = quarter
# Some states do not have empty strings in the of the row, hence handle that
except IndexError:
# for pre 2014, there is no city abbreviation, insert empty string to avoid index error later on
if year == "2014":
row_with_state.insert(2, "")
# other case append instead of inject
row_with_state.append(year)
row_with_state.append(quarter)
# Add to state list
city_cases.append(row_with_state)
# keep checking the next row
row = next(csv_file)
# removing leading and trailing white space
row = list(map(str.strip, row))
row = list(map(str.lower, row)) # create lower case
# convert the list of list to array of array
city_cases = np.array([np.array(x)[0:25] for x in city_cases])
# print(np.shape(city_cases))
# convert the list to array
header = np.array(header)
print("Converting empty strings, 'd' and 'D' to NaN")
# Convert the empty strings and d or D to NaN
city_cases[city_cases == ""] = np.NaN
city_cases[city_cases == "d"] = np.NaN
city_cases[city_cases == "D"] = np.NaN
# Convert the hyphen to 0
print("Converting '-' to 0")
city_cases[city_cases == "-"] = 0
return city_cases, header
def modify(data, header):
"""Take the data and convert to pandas data frame"""
# Dataframe for easier manipulation
df_original = pd.DataFrame(data=data, columns=header)
# We don't need abbreviation for cities, so drop those
df_original = df_original.drop(columns="Abbreviation")
# Currently, there are excess columns, so simplify by adding category row
# So there will be only columns for State,city,received,approved,denied,pending,category
# First, let's slice the data frame into location(state & city), family, employment, humanitarian, other, total, and time (year & quarter)
# Create a copy to avoid settingwithcopywarning
location = df_original.iloc[:, 0:2].copy()
family = df_original.iloc[:, 2:6].copy()
employment = df_original.iloc[:, 6:10].copy()
humanitarian = df_original.iloc[:, 10:14].copy()
other = df_original.iloc[:, 14:18].copy()
total = df_original.iloc[:, 18:22].copy()
time = df_original.iloc[:, 22:24].copy()
# Using fiscal year package, we can identify the date and year
# start attribute indicate the start of the fiscal quarter, date() is simply to return only dates, not time
time["Start_date"] = time.apply(
lambda row: FiscalQuarter(row.Year, row.Quarter).start.date(), axis=1
)
time["End_date"] = time.apply(
lambda row: FiscalQuarter(row.Year, row.Quarter).end.date(), axis=1
)
# Store all data frame into list to loop through
all_df = [family, employment, humanitarian, other, total]
# Let's rename the columns name, and then put "category" column with corresponding names
category_list = ["Family", "Employment", "Humanitarian", "Other", "Total"]
# I also want to change the name of the columns
new_names = ["Received", "Approved", "Denied", "Pending"]
for i, df in enumerate(all_df):
# Grab column names
col_names = list(df.columns.values)
# Create dictionary key = col names, value = new_names
name_dict = dict(zip(col_names, new_names))
# Rename the columns i.e. Family:application_received -> Received
df = df.rename(columns=name_dict)
# Keep the category i.e. Family, employment
df["Category"] = category_list[i]
# concatinate location(state,city) this data frame, and time
df = pd.concat([location, df, time], axis=1, sort=False)
all_df[i] = df
df_final = pd.concat(all_df)
# There is annoying comma in the number, so remove those
for col in new_names:
df_final[col] = df_final[col].str.replace(",", "")
save_file = "I485_data_all.csv"
# save to csv file, without the index name
df_final.to_csv(save_file, index=False)
print(f"saved to {save_file}")
# print(df_final)
if __name__ == "__main__":
main()
| 43.523196 | 238 | 0.561142 | 2,076 | 16,887 | 4.473988 | 0.214836 | 0.013566 | 0.023256 | 0.011197 | 0.177864 | 0.138351 | 0.102175 | 0.089901 | 0.089901 | 0.077735 | 0 | 0.018608 | 0.341268 | 16,887 | 387 | 239 | 43.635659 | 0.816343 | 0.394327 | 0 | 0.192708 | 0 | 0.005208 | 0.108208 | 0.011569 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026042 | false | 0.005208 | 0.0625 | 0 | 0.09375 | 0.052083 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b44680c68879d718bb0507b7b071f19f8ece93 | 4,709 | py | Python | mdbuild.py | c0d3z3r0/mdBuild | b1b2e3cf90bc3d3636b8d1e35babf75ed2f5409a | [
"MIT"
] | null | null | null | mdbuild.py | c0d3z3r0/mdBuild | b1b2e3cf90bc3d3636b8d1e35babf75ed2f5409a | [
"MIT"
] | null | null | null | mdbuild.py | c0d3z3r0/mdBuild | b1b2e3cf90bc3d3636b8d1e35babf75ed2f5409a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
__author__ = 'Michael Niewoehner <c0d3z3r0>'
__email__ = 'mniewoeh@stud.hs-offenburg.de'
import os
import sys
import re
import base64
import subprocess
import argparse
def checkDependencies():
dep = ['hoedown', 'wkhtmltopdf']
missing = []
for d in dep:
if subprocess.getstatusoutput('which ' + d)[0]:
missing.append(d)
if missing:
print("Please install missing dependencies: " + ', '.join(missing))
sys.exit(1)
def readFileToList(file):
f = open(file, 'r')
return f.readlines()
def file2base64(file):
try:
f = open(file, 'rb')
return base64.b64encode(f.read()).decode()
except FileNotFoundError:
return ""
def writeListToFile(file, lines):
f = open(file, 'w')
f.writelines(lines)
f.close()
def readStyles(styles):
st = []
for style in styles:
s = readFileToList(style)
s.insert(0, '<style type="text/css">\n')
s.append('</style>\n')
st.extend(s)
return st
def getHeader():
header = []
header_html = """\
<!DOCTYPE html><html>
<head>
<meta charset="utf-8">
<title>%s</title>
</head>
<body>""" % out_fname
header.extend(header_html.splitlines(keepends=True))
styles = readStyles([
'style/GitHub2.css', 'style/prism.css', 'style/custom.css'
])
for s in styles:
header.insert(-2, s)
return header
def getFooter():
footer = []
footer.append('<script type="text/javascript">\n')
footer.extend(readFileToList('style/prism.js'))
footer.extend("""\
</script>
</body>
</html>""".splitlines(keepends=True))
return footer
def markdown2Html(file):
md = ['\n\n']
md.extend(subprocess.getoutput(
'hoedown --all-block --all-flags --all-negative --all-span %s' % file
).splitlines(keepends=True))
for m in md:
if args.docs.index(file) == 0 and '<h1>' in m:
md[md.index(m)] = re.sub(
'<h1>', '<h1 style=\'page-break-before: avoid;\'>', m)
if 'img src' in m:
src = re.search('src="(.*?)"', m).group(1)
ext = re.search('(?<=\.).{1,4}?$', src).group(0)
b64 = file2base64(os.path.dirname(file) + '/' + src)
newimg = 'data:image/' + ext + ';base64,' + b64
md[md.index(m)] = re.sub(src, newimg, m)
if 'language-sh' in m:
md[md.index(m)] = re.sub('language-sh', 'language-bash', m)
return md
def html2pdf(input, title, output):
subprocess.getoutput(
'wkhtmltopdf --dpi 150 --print-media-type --title ' +
' '.join([title, input, output])
)
def main():
checkDependencies()
viewer = {
'linux': {'pdf': 'evince', 'html': 'firefox'},
'darwin': {'pdf': 'open', 'html': 'open'}}[sys.platform]
output = []
output.extend(getHeader())
for doc in args.docs:
output.extend(markdown2Html(doc))
output.extend(getFooter())
if not args.html and not args.both:
writeListToFile('/tmp/%s.html' % out_fname, output)
html2pdf('/tmp/%s.html' % out_fname, out_fname, '%s.pdf' % out_fname)
if not args.no_open:
subprocess.Popen('%s %s.pdf' % (viewer['pdf'], out_fname),
shell=True)
if args.both:
writeListToFile('%s.html' % out_fname, output)
html2pdf('%s.html' % out_fname, out_fname, '%s.pdf' % out_fname)
if not args.no_open:
subprocess.Popen('%s %s.html' % (viewer['html'], out_fname),
shell=True)
subprocess.Popen('%s %s.pdf' % (viewer['pdf'], out_fname),
shell=True)
elif args.html:
writeListToFile('%s.html' % out_fname, output)
if not args.no_open:
subprocess.Popen('%s %s.html' % (viewer['html'], out_fname),
shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='mdbuild')
htmlpdf= parser.add_mutually_exclusive_group()
htmlpdf.add_argument('-t', '--html', action='store_true',
help='create html only')
htmlpdf.add_argument('-b', '--both', action='store_true',
help='create pdf and html')
parser.add_argument('-o', '--output', help='output filename')
parser.add_argument('-n', '--no-open', action='store_true',
help='do not open file after build')
parser.add_argument('docs', nargs='+', help='documents to include')
args = parser.parse_args()
if args.output:
out_fname = args.output
else:
out_fname = re.search('[^\.]+', args.docs[0]).group(0)
main()
| 27.7 | 77 | 0.5653 | 570 | 4,709 | 4.584211 | 0.319298 | 0.048986 | 0.032147 | 0.024876 | 0.196326 | 0.173364 | 0.128205 | 0.128205 | 0.114428 | 0.114428 | 0 | 0.013545 | 0.263113 | 4,709 | 169 | 78 | 27.863905 | 0.739481 | 0.00446 | 0 | 0.098485 | 0 | 0 | 0.199915 | 0.011521 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.045455 | 0 | 0.174242 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b601d038e77ca7cab9f2c9d3e4980c8347af3b | 1,416 | py | Python | test/test_fd.py | Xiang-cd/realsafe | 39f632e950562fa00ac26d34d13b2691c9c5f013 | [
"MIT"
] | 2 | 2021-01-27T06:14:50.000Z | 2021-10-30T08:23:48.000Z | test/test_fd.py | Xiang-cd/realsafe | 39f632e950562fa00ac26d34d13b2691c9c5f013 | [
"MIT"
] | 2 | 2021-08-25T16:14:37.000Z | 2022-02-10T02:26:07.000Z | test/test_fd.py | Xiang-cd/realsafe | 39f632e950562fa00ac26d34d13b2691c9c5f013 | [
"MIT"
] | 1 | 2022-01-05T04:36:22.000Z | 2022-01-05T04:36:22.000Z | #!/usr/bin/env python3
import os
import tensorflow as tf
import numpy as np
from realsafe import CrossEntropyLoss, BIM
from realsafe.model.loader import load_model_from_path
from realsafe.dataset import imagenet, dataset_to_iterator
batch_size = 25
session = tf.Session()
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../example/imagenet/resnet152_fd.py')
rs_model = load_model_from_path(model_path)
model = rs_model.load(session)
xs_ph = tf.placeholder(model.x_dtype, shape=(batch_size, *model.x_shape))
lgs, lbs = model.logits_and_labels(xs_ph)
dataset = imagenet.load_dataset_for_classifier(model, load_target=True)
dataset = dataset.batch(batch_size).take(10)
loss = CrossEntropyLoss(model)
attack = BIM(
model=model,
batch_size=batch_size,
loss=loss,
goal='ut',
distance_metric='l_inf',
session=session
)
attack.config(
iteration=50,
magnitude=8.0 / 255.0,
alpha=0.5 / 255.0,
)
accs, adv_accs = [], []
for filenames, xs, ys, ys_target in dataset_to_iterator(dataset, session):
xs_adv = attack.batch_attack(xs, ys=ys)
lbs_pred = session.run(lbs, feed_dict={xs_ph: xs})
lbs_adv = session.run(lbs, feed_dict={xs_ph: xs_adv})
accs.append(np.equal(ys, lbs_pred).astype(np.float).mean())
adv_accs.append(np.equal(ys, lbs_adv).astype(np.float).mean())
print(accs[-1], adv_accs[-1])
print(np.mean(accs), np.mean(adv_accs)) | 27.764706 | 108 | 0.731638 | 226 | 1,416 | 4.353982 | 0.380531 | 0.045732 | 0.026423 | 0.034553 | 0.105691 | 0.105691 | 0.105691 | 0.054878 | 0 | 0 | 0 | 0.019512 | 0.131356 | 1,416 | 51 | 109 | 27.764706 | 0.780488 | 0.014831 | 0 | 0 | 0 | 0 | 0.030108 | 0.02509 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b623971f6ce4c2d43cab30e8be7cf30931d68c | 3,692 | py | Python | Sketches/JMB/mysite/settings.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/JMB/mysite/settings.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/JMB/mysite/settings.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '/home/jason/mysite/mysite.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'bbfgxp&2+t&=yo!0@wey-_n4fcxhx8gdllmp%1s#%z85w_opv5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
"django.contrib.admin",
'mysite.polls'
)
| 36.196078 | 111 | 0.728061 | 512 | 3,692 | 5.181641 | 0.496094 | 0.034301 | 0.016585 | 0.027139 | 0.094233 | 0.050509 | 0.021108 | 0 | 0 | 0 | 0 | 0.012033 | 0.167118 | 3,692 | 101 | 112 | 36.554455 | 0.850732 | 0.656826 | 0 | 0 | 0 | 0 | 0.448105 | 0.352554 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b8637f693d0b8229b1b49f56096e77a5c26a9c | 5,584 | py | Python | mdns/Phidget22Python/Phidget22/Devices/IR.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | mdns/Phidget22Python/Phidget22/Devices/IR.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | mdns/Phidget22Python/Phidget22/Devices/IR.py | rabarar/phidget_docker | ceca56c86d27f291a4300a1257c02096862335ec | [
"MIT"
] | null | null | null | import sys
import ctypes
from Phidget22.PhidgetSupport import PhidgetSupport
from Phidget22.Async import *
from Phidget22.CodeInfo import CodeInfo
from Phidget22.IRCodeEncoding import IRCodeEncoding
from Phidget22.IRCodeLength import IRCodeLength
from Phidget22.PhidgetException import PhidgetException
from Phidget22.Phidget import Phidget
class IR(Phidget):
def __init__(self):
Phidget.__init__(self)
self.handle = ctypes.c_void_p()
if sys.platform == 'win32':
self._CodeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint32, ctypes.c_int)
else:
self._CodeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint32, ctypes.c_int)
self._Code = None
self._onCode = None
if sys.platform == 'win32':
self._LearnFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(CodeInfo))
else:
self._LearnFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(CodeInfo))
self._Learn = None
self._onLearn = None
if sys.platform == 'win32':
self._RawDataFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int32), ctypes.c_int32)
else:
self._RawDataFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_int32), ctypes.c_int32)
self._RawData = None
self._onRawData = None
__func = PhidgetSupport.getDll().PhidgetIR_create
__func.restype = ctypes.c_int32
res = __func(ctypes.byref(self.handle))
if res > 0:
raise PhidgetException(res)
def __del__(self):
Phidget.__del__(self)
def _localCodeEvent(self, handle, userPtr, code, bitCount, isRepeat):
if self._Code == None:
return
code = code.decode('utf-8')
self._Code(self, code, bitCount, isRepeat)
def setOnCodeHandler(self, handler):
if handler == None:
self._Code = None
self._onCode = None
else:
self._Code = handler
self._onCode = self._CodeFactory(self._localCodeEvent)
try:
__func = PhidgetSupport.getDll().PhidgetIR_setOnCodeHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onCode, None)
except RuntimeError:
self._Code = None
self._onCode = None
def _localLearnEvent(self, handle, userPtr, code, codeInfo):
if self._Learn == None:
return
code = code.decode('utf-8')
if codeInfo != None:
codeInfo = codeInfo.contents
codeInfo.toPython()
self._Learn(self, code, codeInfo)
def setOnLearnHandler(self, handler):
if handler == None:
self._Learn = None
self._onLearn = None
else:
self._Learn = handler
self._onLearn = self._LearnFactory(self._localLearnEvent)
try:
__func = PhidgetSupport.getDll().PhidgetIR_setOnLearnHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onLearn, None)
except RuntimeError:
self._Learn = None
self._onLearn = None
def _localRawDataEvent(self, handle, userPtr, data, dataLen):
if self._RawData == None:
return
data = [data[i] for i in range(dataLen)]
self._RawData(self, data)
def setOnRawDataHandler(self, handler):
if handler == None:
self._RawData = None
self._onRawData = None
else:
self._RawData = handler
self._onRawData = self._RawDataFactory(self._localRawDataEvent)
try:
__func = PhidgetSupport.getDll().PhidgetIR_setOnRawDataHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onRawData, None)
except RuntimeError:
self._RawData = None
self._onRawData = None
def getLastCode(self):
_code = (ctypes.c_char * 33)()
_codeLen = ctypes.c_int32(33)
_bitCount = ctypes.c_uint32()
__func = PhidgetSupport.getDll().PhidgetIR_getLastCode
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_code), _codeLen, ctypes.byref(_bitCount))
if result > 0:
raise PhidgetException(result)
return _code.value.decode('utf-8'), _bitCount.value
def getLastLearnedCode(self):
_code = (ctypes.c_char * 33)()
_codeLen = ctypes.c_int32(33)
_codeInfo = CodeInfo()
__func = PhidgetSupport.getDll().PhidgetIR_getLastLearnedCode
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_code), _codeLen, ctypes.byref(_codeInfo))
if result > 0:
raise PhidgetException(result)
return _code.value.decode('utf-8'), _codeInfo.toPython()
def transmit(self, code, codeInfo):
_code = ctypes.create_string_buffer(code.encode('utf-8'))
_codeInfo = codeInfo.fromPython()
__func = PhidgetSupport.getDll().PhidgetIR_transmit
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_code), ctypes.byref(_codeInfo))
if result > 0:
raise PhidgetException(result)
def transmitRaw(self, data, carrierFrequency, dutyCycle, gap):
_data = (ctypes.c_uint32 * len(data))(*data)
_dataLen = ctypes.c_int32(len(data))
_carrierFrequency = ctypes.c_uint32(carrierFrequency)
_dutyCycle = ctypes.c_double(dutyCycle)
_gap = ctypes.c_uint32(gap)
__func = PhidgetSupport.getDll().PhidgetIR_transmitRaw
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_data), _dataLen, _carrierFrequency, _dutyCycle, _gap)
if result > 0:
raise PhidgetException(result)
def transmitRepeat(self):
__func = PhidgetSupport.getDll().PhidgetIR_transmitRepeat
__func.restype = ctypes.c_int32
result = __func(self.handle)
if result > 0:
raise PhidgetException(result)
RAW_DATA_LONG_SPACE = 4294967295
IR_MAX_CODE_BIT_COUNT = 128
IR_MAX_CODE_STR_LENGTH = 33
| 29.860963 | 132 | 0.744807 | 714 | 5,584 | 5.501401 | 0.147059 | 0.078411 | 0.04888 | 0.039715 | 0.470978 | 0.437882 | 0.333503 | 0.300917 | 0.300917 | 0.273931 | 0 | 0.020584 | 0.147385 | 5,584 | 186 | 133 | 30.021505 | 0.804453 | 0 | 0 | 0.438356 | 0 | 0 | 0.007163 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089041 | false | 0 | 0.061644 | 0 | 0.212329 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b8e5f7f61ff1ffcca34bab7f115ef927c9aaf8 | 6,016 | py | Python | mux_python/models/metric.py | moaazsidat/mux-python | 3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c | [
"MIT"
] | 36 | 2019-02-28T21:18:39.000Z | 2022-03-04T19:58:45.000Z | mux_python/models/metric.py | moaazsidat/mux-python | 3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c | [
"MIT"
] | 7 | 2019-04-01T14:48:34.000Z | 2022-03-04T16:31:34.000Z | mux_python/models/metric.py | moaazsidat/mux-python | 3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c | [
"MIT"
] | 9 | 2019-11-29T03:57:58.000Z | 2022-03-02T17:29:25.000Z | # coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Contact: devex@mux.com
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class Metric(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'value': 'float',
'type': 'str',
'name': 'str',
'metric': 'str',
'measurement': 'str'
}
attribute_map = {
'value': 'value',
'type': 'type',
'name': 'name',
'metric': 'metric',
'measurement': 'measurement'
}
def __init__(self, value=None, type=None, name=None, metric=None, measurement=None, local_vars_configuration=None): # noqa: E501
"""Metric - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._type = None
self._name = None
self._metric = None
self._measurement = None
self.discriminator = None
if value is not None:
self.value = value
if type is not None:
self.type = type
if name is not None:
self.name = name
if metric is not None:
self.metric = metric
if measurement is not None:
self.measurement = measurement
@property
def value(self):
"""Gets the value of this Metric. # noqa: E501
:return: The value of this Metric. # noqa: E501
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Metric.
:param value: The value of this Metric. # noqa: E501
:type value: float
"""
self._value = value
@property
def type(self):
"""Gets the type of this Metric. # noqa: E501
:return: The type of this Metric. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Metric.
:param type: The type of this Metric. # noqa: E501
:type type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this Metric. # noqa: E501
:return: The name of this Metric. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Metric.
:param name: The name of this Metric. # noqa: E501
:type name: str
"""
self._name = name
@property
def metric(self):
"""Gets the metric of this Metric. # noqa: E501
:return: The metric of this Metric. # noqa: E501
:rtype: str
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this Metric.
:param metric: The metric of this Metric. # noqa: E501
:type metric: str
"""
self._metric = metric
@property
def measurement(self):
"""Gets the measurement of this Metric. # noqa: E501
:return: The measurement of this Metric. # noqa: E501
:rtype: str
"""
return self._measurement
@measurement.setter
def measurement(self, measurement):
"""Sets the measurement of this Metric.
:param measurement: The measurement of this Metric. # noqa: E501
:type measurement: str
"""
self._measurement = measurement
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Metric):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Metric):
return True
return self.to_dict() != other.to_dict()
| 25.709402 | 204 | 0.553358 | 699 | 6,016 | 4.672389 | 0.194564 | 0.036742 | 0.073484 | 0.073484 | 0.288732 | 0.256583 | 0.248928 | 0.091855 | 0.06981 | 0.02327 | 0 | 0.015869 | 0.350565 | 6,016 | 233 | 205 | 25.819742 | 0.820067 | 0.301695 | 0 | 0.110092 | 0 | 0 | 0.032049 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155963 | false | 0 | 0.045872 | 0 | 0.366972 | 0.018349 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87b93feaa8699f8cd81fb2e0b56446953fa173b5 | 1,816 | py | Python | loglog.py | nmillerns/affine_invariant_functions | 0d5091d67fe8949341328b0324870b44358321bc | [
"MIT"
] | 2 | 2020-06-03T04:30:48.000Z | 2020-06-03T04:34:36.000Z | loglog.py | nmillerns/affine_invariant_functions | 0d5091d67fe8949341328b0324870b44358321bc | [
"MIT"
] | null | null | null | loglog.py | nmillerns/affine_invariant_functions | 0d5091d67fe8949341328b0324870b44358321bc | [
"MIT"
] | 1 | 2020-06-03T04:30:56.000Z | 2020-06-03T04:30:56.000Z | import cv2
import numpy as np
import sys
from utils import *
def logsawtooth(x: float) -> float:
return (x+1)/(2**np.floor(np.log2(x+1))) - 1.
class LogLogImagePattern(ImageSurface):
def __init__(self, img: np.array):
super().__init__(img)
self.domain = SurfaceDomain(-1, -1, 15, 15, False, False, False, False)
def __call__(self, x: float, y: float) -> typing.Tuple[int, int, int]:
u, v = logsawtooth(x), 1. - logsawtooth(y)
return self.img[int(v*(self.height-1)),int(u*(self.width-1)),:]
def main(args: typing.List[str]) -> int:
if len(args) != 1:
print("Usage: loglog.py (imgfile.png)")
return 1
plotter = ColorSurfacePlotter(900, 900, show_axis = True, axis_thickness = .02)
f = LogLogImagePattern(crop_max_square_from_img(cv2.imread(args[0])))
scale = 1.
translation = 0.
frame = 0
while scale > 0.5:
A, b = A_b_from_params(rotation_angle=0, scale=scale, b=np.array([[1],[1]]), b_scale=translation)
plotter.plot_affine(f, A=A, b=b, window=SurfaceDomain(-1.99, -1.99, 7, 7, False, False, False, False))
print(frame, scale, translation)
plotter.save(f'animation{frame}.png')
scale -= .031250
frame += 1
while translation >= -0.5:
A, b = A_b_from_params(rotation_angle=0, scale=scale, b=np.array([[1],[1]]), b_scale=translation)
plotter.plot_affine(f, A=A, b=b, window=SurfaceDomain(-1.99, -1.99, 7, 7, False, False, False, False))
print(frame, scale, translation)
plotter.save(f'animation{frame}.png')
translation -= 0.0625
frame += 1
print("See results in animation*.png and pattern.png")
plotter.save('pattern.png')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 36.32 | 110 | 0.616189 | 267 | 1,816 | 4.048689 | 0.340824 | 0.083256 | 0.083256 | 0.055504 | 0.353377 | 0.353377 | 0.353377 | 0.353377 | 0.353377 | 0.353377 | 0 | 0.050177 | 0.220815 | 1,816 | 49 | 111 | 37.061224 | 0.713781 | 0 | 0 | 0.243902 | 0 | 0 | 0.073789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.097561 | 0.02439 | 0.317073 | 0.097561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87ba26c6db22b72a61c66f9d345fd9b49a972171 | 1,042 | py | Python | api_builder/configuration.py | zmiller91/aws-lambda-api-builder | 86026b5c134faa33eaa1e1268e0206cb074e3285 | [
"MIT"
] | null | null | null | api_builder/configuration.py | zmiller91/aws-lambda-api-builder | 86026b5c134faa33eaa1e1268e0206cb074e3285 | [
"MIT"
] | null | null | null | api_builder/configuration.py | zmiller91/aws-lambda-api-builder | 86026b5c134faa33eaa1e1268e0206cb074e3285 | [
"MIT"
] | null | null | null | import json
import os
import inspect
import api_builder
import time
EXECUTABLE_NAME = 'api_builder'
APPLICATION_DESCRIPTION = '''
This is a CLI to build, package, and release AWS APIs using API Gateway and Lambda.
'''
ZLAB_CONF_FILE = "zlab-conf.json"
STATIC_DIR = os.path.join(os.path.dirname(inspect.getfile(api_builder)), "static")
_base_dir = os.getcwd()
_build_dir = os.path.join(_base_dir, "build")
_private_dir = os.path.join(_build_dir, "private")
_deps_dir = os.path.join(_private_dir, "deps")
_zip_dir = os.path.join(_private_dir, "lib")
_project_name = os.path.basename(_base_dir)
_cf_dir = os.path.join(_base_dir, "cloudformation")
def check_bootstrap():
if not os.path.exists(ZLAB_CONF_FILE):
raise ValueError("Application not bootstrapped. Run `zlab bootstrap --name {ApplicationName}` to boostrap")
def get_zlab_conf():
with open(ZLAB_CONF_FILE) as conf:
return json.load(conf)
def write_zlab_conf(conf):
f = open(os.path.join(ZLAB_CONF_FILE), 'w')
f.write(json.dumps(conf))
f.close() | 30.647059 | 116 | 0.738964 | 162 | 1,042 | 4.475309 | 0.41358 | 0.082759 | 0.096552 | 0.107586 | 0.118621 | 0.118621 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134357 | 1,042 | 34 | 117 | 30.647059 | 0.803769 | 0 | 0 | 0 | 0 | 0 | 0.228188 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.178571 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |