max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
snake/hc.py | Wenbing-Yao/aigames | 2 | 6620951 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from copy import deepcopy
from itertools import combinations
from os.path import expandvars
import ipdb
import numpy as np
import pygame
from snake_env import SnakeEnv
from snake_env import GRID_WIDTH_NUM
from snake_env import GRID_HEIGHT_NUM
from time import sleep
import numpy as np
from snake_env import SnakeEnv
from collections import deque
import pickle
MAX_DEPTH = 18
def show_graph(graph, flags, env, update=True, width=2, extra=None):
# env.reset()
if not extra:
env.render(update=not update)
else:
env.render(extra, update=not update)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
env.draw_connection(sp, ep, color=(0, 0xff, 0), width=width)
if update:
pygame.display.update()
def deletion(graph, env):
flags = {}
edges = set()
perm = np.random.permutation(len(graph))
for sp in graph:
for ep in graph[sp]:
flags[(sp, ep)] = 1
edges.add((sp, ep))
show_graph(graph, flags, env)
sleep(1)
perm = np.random.permutation(len(edges))
edges = list(edges)
for i in perm:
sp, ep = edges[i]
# for sp, ep in edges:
if len(graph[sp]) > 2 and len(graph[ep]) > 2:
if ep in graph[sp]:
graph[sp].remove(ep)
flags[(sp, ep)] = 0
if sp in graph[ep]:
graph[ep].remove(sp)
flags[(ep, sp)] = 0
if i % 10 == 0:
show_graph(graph, flags, env)
return graph, flags
def destroy_path(graph, total_graph, flags, start, end, w, visited, deps=1):
if deps > MAX_DEPTH or start in visited:
visited.add(start)
return 0
visited.add(start)
edges = []
for ep in total_graph[start]:
# print('-' * 4 * deps, 'flags', (
# start,
# ep,
# ), flags[(start, ep)])
if flags[(start, ep)] == w:
edges.append((start, ep))
# print('-' * 4 * deps, 'start at', start, w)
# print('-' * 4 * deps, 'found', edges)
for edge in edges:
ep = edge[1]
if ep in visited:
continue
# print('-' * 4 * deps, ep)
if flags[edge] == 1 and w == 1 and end == ep:
# print('-' * 4 * deps, 'found 1', start, ep)
return [ep]
res = destroy_path(graph,
total_graph,
flags,
start=ep,
end=end,
w=1 - w,
visited=visited,
deps=deps + 1)
if res:
res.append(ep)
return res
else:
# visited.remove(ep)
# print('-' * 4 * deps, 'ends 0', ep)
pass
return []
def reflect(e, graph, flags):
if flags[e]:
flags[e] = 0
graph[e[0]].remove(e[1])
else:
flags[e] = 1
graph[e[0]].add(e[1])
def reflect_path(path, graph, flags):
for i in range(len(path) - 1):
e1 = (path[i], path[i + 1])
e2 = (path[i + 1], path[i])
reflect(e1, graph, flags)
reflect(e2, graph, flags)
def get_sd(graph):
SD = []
for v in graph:
if len(graph[v]) > 2:
SD.append(v)
return SD
def destroy(graph, total_graph, flags, env=None):
SD = get_sd(graph)
if not SD:
return 0
w = 1
for start, end in combinations(SD, 2):
# print(start, end)
visited = set()
path = destroy_path(graph, total_graph, flags, start, end, w, visited)
show_graph(graph, flags, env)
if path:
path.append(start)
# print(paths)
reflect_path(path, graph, flags)
break
else:
print('not found')
if env:
show_graph(graph, flags, env)
return len(get_sd(graph))
def connecting_path(start,
end,
w,
f,
graph,
total_graph,
flags,
visited,
deps=1):
edges = []
visited.add(start)
global MAX_DEPTH
if len(visited) > 15:
return []
if len(visited) > MAX_DEPTH:
return []
for ep in total_graph[start]:
if flags[(start, ep)] == w:
edges.append((start, ep))
# print('-' * deps * 2, edges)
for edge in edges:
sp, ep = edge
if ep != end and ep in visited:
# print('-' * deps * 2, edge, end, flags[edge], 'visited', visited)
continue
# print('-' * deps * 2, edge, end, flags[edge])
if flags[edge] == 0 and end == edge[1]:
# print('-' * deps, 'found: ', edge)
return [edge[1]]
if end == edge[1]:
continue
res = connecting_path(ep, end, 1 - w, 1 - f, graph, total_graph, flags,
visited, deps + 1)
if not res:
visited.remove(ep)
continue
res.append(ep)
# print(res)
return res
return []
def get_circle(graph, origin=0):
visited = set()
start = origin
while True:
if start in visited:
return visited
visited.add(start)
for v in graph[start]:
if v in visited:
continue
start = v
break
def get_list_circle(graph, origin=0):
visited = set()
start = origin
seq = []
while True:
if start in visited:
return seq
visited.add(start)
seq.append(start)
for v in graph[start]:
if v in visited:
continue
start = v
break
def get_all_sets(graph):
total = set(graph.keys())
batch = []
while total:
v = total.pop()
batch.append(get_circle(graph, origin=v))
total -= batch[-1]
batch = sorted(batch, key=lambda b: len(b))
return batch
def get_smallest_circle(graph):
return get_all_sets(graph)[0].pop()
def num_of_circle(graph):
return len(get_all_sets(graph))
def sorted_vetexes(graph):
vetexes = []
for batch in get_all_sets(graph):
vetexes.extend(batch)
return vetexes
def has_one_circle(graph, origin=0):
get_all_sets(graph)
return len(get_circle(graph)) == len(graph)
def connector(graph, total_graph, flags, env=None):
if has_one_circle(graph):
print('match')
show_graph(graph, flags, env)
return True
for v in sorted_vetexes(graph):
w = 1
visited = set()
path = connecting_path(v, v, w, w, graph, total_graph, flags, visited)
if path:
path.append(v)
path = list(reversed(path))
n_circles = num_of_circle(graph)
reflect_path(path, graph, flags)
post_n_circles = num_of_circle(graph)
if post_n_circles > n_circles:
reflect_path(path, graph, flags)
else:
show_graph(graph, flags, env)
print(post_n_circles)
if post_n_circles == 1:
return True
return False
def build_graph(row, col):
# print(row, col)
graphs = {}
for r in range(row):
for c in range(0, col - 1):
start = r * col + c
if start not in graphs:
graphs[start] = set()
if start + 1 not in graphs:
graphs[start + 1] = set()
graphs[start + 1].add(start)
graphs[start].add(start + 1)
for r in range(0, row - 1):
for c in range(col):
start = r * col + c
if start not in graphs:
graphs[start] = set()
if start + col not in graphs:
graphs[start + col] = set()
graphs[start].add(start + col)
graphs[start + col].add(start)
return graphs
def bfs(grid, start, dst):
queue = deque([[start]])
seen = set([start])
height, width = grid.shape
wall = [1, 2]
while queue:
path = queue.popleft()
x, y = path[-1]
if (y, x) == dst:
return path
for x2, y2 in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
if 0 <= x2 < width and 0 <= y2 < height and grid[y2][
x2] not in wall and (x2, y2) not in seen:
queue.append(path + [(x2, y2)])
seen.add((x2, y2))
def dfs_policy(obs, env):
directions = {
(-1, 0): env.right,
(1, 0): env.left,
(0, -1): env.down,
(0, 1): env.up
}
try:
src = np.where(obs == 2)
src = int(src[1]), int(src[0])
dst = np.where(obs == -1)
dst = int(dst[0]), int(dst[1])
except Exception:
return None, None
paths = bfs(obs, start=src, dst=dst)
if paths is None or len(paths) <= 1:
return None, None
dst = paths[1]
dire = src[0] - dst[0], src[1] - dst[1]
action = directions[dire]
return action, dst
def rel_pos(pos, rel, n_max):
return (pos - rel) % n_max
def draw_graph():
print(GRID_HEIGHT_NUM, GRID_WIDTH_NUM)
# input()
graph = build_graph(row=GRID_HEIGHT_NUM, col=GRID_WIDTH_NUM)
total_graph = deepcopy(graph)
env = SnakeEnv(set_life=100000, alg='HC + BFS', no_sight_disp=True)
env.reset()
sleep(1)
graph, flags = deletion(graph, env)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
# print(sp, ep)
env.draw_connection(sp, ep, width=4)
# env.render()
import pygame
pygame.display.update()
pre_len = None
while True:
sd_len = destroy(graph, total_graph, flags, env=env)
print('sd: ', sd_len)
if pre_len is not None and pre_len == sd_len:
global MAX_DEPTH
print('+1')
MAX_DEPTH += 1
pre_len = sd_len
show_graph(graph, flags, env)
if not sd_len:
break
sleep(1)
show_graph(graph, flags, env)
counter = 0
while not connector(graph, total_graph, flags, env):
counter += 1
print('counter: ', counter)
sleep(1)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
env.draw_connection(sp, ep, color=(0xff, 0xff, 0), width=4)
import pygame
show_graph(graph, flags, env)
circle = get_list_circle(graph)
print(circle)
pos_encoder = {pos: i for i, pos in enumerate(circle)}
# pos_decoder = {i: pos for i, pos in enumerate(circle)}
pos_xy_decoder = {
i: (pos % GRID_WIDTH_NUM, pos // GRID_WIDTH_NUM)
for i, pos in enumerate(circle)
}
pos_xy_encoder = {(pos % GRID_WIDTH_NUM, pos // GRID_WIDTH_NUM): i
for i, pos in enumerate(circle)}
obs = env.reset()
c = 0
while True:
c += 1
if len(env.status.snake_body) < 15:
remainder = 20
elif len(env.status.snake_body) < 30:
remainder = 20
elif len(env.status.snake_body) < 60:
remainder = 30
elif len(env.status.snake_body) < 90:
remainder = 30
elif len(env.status.snake_body) < 120:
remainder = 40
elif len(env.status.snake_body) < 150:
remainder = 80
elif len(env.status.snake_body) < 300:
remainder = 100
elif len(env.status.snake_body) < (GRID_WIDTH_NUM * GRID_HEIGHT_NUM -
10):
remainder = 30
else:
remainder = 5
bfs_action, dst = dfs_policy(obs, env)
bfs_dst_idx = 100000000
if dst:
bfs_dst_idx = pos_xy_encoder[dst]
head = env.status.snake_body[0]
head_pos, tail_pos = pos_xy_encoder[head], pos_xy_encoder[
env.status.snake_body[-1]]
head_idx, tail_idx = pos_xy_encoder[head], pos_xy_encoder[
env.status.snake_body[-1]]
hc_next_pos = pos_xy_decoder[(head_pos + 1) % len(graph)]
directions = {
(-1, 0): env.right,
(1, 0): env.left,
(0, -1): env.down,
(0, 1): env.up
}
dire = head[0] - hc_next_pos[0], head[1] - hc_next_pos[1]
print(head, hc_next_pos, dst, dst not in env.status.snake_body[:-1])
print(head_idx, tail_idx, bfs_dst_idx)
action = directions[dire]
if not env.status.food_pos:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
break
food_idx = pos_xy_encoder[env.status.food_pos]
if bfs_action:
print(food_idx, bfs_dst_idx, head_idx, tail_idx)
print(rel_pos(food_idx, tail_idx, len(graph)),
rel_pos(bfs_dst_idx, tail_idx, len(graph)),
rel_pos(head_idx, tail_idx, len(graph)),
rel_pos(tail_idx, tail_idx, len(graph)))
if rel_pos(food_idx, tail_idx, len(graph)) >= rel_pos(
bfs_dst_idx, tail_idx, len(graph)) >= rel_pos(
head_idx, tail_idx, len(graph)) >= rel_pos(
tail_idx, tail_idx, len(graph)):
action = bfs_action
pass
reward, obs, done, _ = env(action)
if done:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
print(done)
break
# env.screen.blit(env.background, (0, 0))
if c % remainder == 0:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
# env.render(blit=False)
show_graph(graph, flags, env, update=True, width=1)
sleep(10)
input()
if __name__ == '__main__':
draw_graph()
| # -*- coding: utf-8 -*-
from copy import deepcopy
from itertools import combinations
from os.path import expandvars
import ipdb
import numpy as np
import pygame
from snake_env import SnakeEnv
from snake_env import GRID_WIDTH_NUM
from snake_env import GRID_HEIGHT_NUM
from time import sleep
import numpy as np
from snake_env import SnakeEnv
from collections import deque
import pickle
MAX_DEPTH = 18
def show_graph(graph, flags, env, update=True, width=2, extra=None):
# env.reset()
if not extra:
env.render(update=not update)
else:
env.render(extra, update=not update)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
env.draw_connection(sp, ep, color=(0, 0xff, 0), width=width)
if update:
pygame.display.update()
def deletion(graph, env):
flags = {}
edges = set()
perm = np.random.permutation(len(graph))
for sp in graph:
for ep in graph[sp]:
flags[(sp, ep)] = 1
edges.add((sp, ep))
show_graph(graph, flags, env)
sleep(1)
perm = np.random.permutation(len(edges))
edges = list(edges)
for i in perm:
sp, ep = edges[i]
# for sp, ep in edges:
if len(graph[sp]) > 2 and len(graph[ep]) > 2:
if ep in graph[sp]:
graph[sp].remove(ep)
flags[(sp, ep)] = 0
if sp in graph[ep]:
graph[ep].remove(sp)
flags[(ep, sp)] = 0
if i % 10 == 0:
show_graph(graph, flags, env)
return graph, flags
def destroy_path(graph, total_graph, flags, start, end, w, visited, deps=1):
if deps > MAX_DEPTH or start in visited:
visited.add(start)
return 0
visited.add(start)
edges = []
for ep in total_graph[start]:
# print('-' * 4 * deps, 'flags', (
# start,
# ep,
# ), flags[(start, ep)])
if flags[(start, ep)] == w:
edges.append((start, ep))
# print('-' * 4 * deps, 'start at', start, w)
# print('-' * 4 * deps, 'found', edges)
for edge in edges:
ep = edge[1]
if ep in visited:
continue
# print('-' * 4 * deps, ep)
if flags[edge] == 1 and w == 1 and end == ep:
# print('-' * 4 * deps, 'found 1', start, ep)
return [ep]
res = destroy_path(graph,
total_graph,
flags,
start=ep,
end=end,
w=1 - w,
visited=visited,
deps=deps + 1)
if res:
res.append(ep)
return res
else:
# visited.remove(ep)
# print('-' * 4 * deps, 'ends 0', ep)
pass
return []
def reflect(e, graph, flags):
if flags[e]:
flags[e] = 0
graph[e[0]].remove(e[1])
else:
flags[e] = 1
graph[e[0]].add(e[1])
def reflect_path(path, graph, flags):
for i in range(len(path) - 1):
e1 = (path[i], path[i + 1])
e2 = (path[i + 1], path[i])
reflect(e1, graph, flags)
reflect(e2, graph, flags)
def get_sd(graph):
SD = []
for v in graph:
if len(graph[v]) > 2:
SD.append(v)
return SD
def destroy(graph, total_graph, flags, env=None):
SD = get_sd(graph)
if not SD:
return 0
w = 1
for start, end in combinations(SD, 2):
# print(start, end)
visited = set()
path = destroy_path(graph, total_graph, flags, start, end, w, visited)
show_graph(graph, flags, env)
if path:
path.append(start)
# print(paths)
reflect_path(path, graph, flags)
break
else:
print('not found')
if env:
show_graph(graph, flags, env)
return len(get_sd(graph))
def connecting_path(start,
end,
w,
f,
graph,
total_graph,
flags,
visited,
deps=1):
edges = []
visited.add(start)
global MAX_DEPTH
if len(visited) > 15:
return []
if len(visited) > MAX_DEPTH:
return []
for ep in total_graph[start]:
if flags[(start, ep)] == w:
edges.append((start, ep))
# print('-' * deps * 2, edges)
for edge in edges:
sp, ep = edge
if ep != end and ep in visited:
# print('-' * deps * 2, edge, end, flags[edge], 'visited', visited)
continue
# print('-' * deps * 2, edge, end, flags[edge])
if flags[edge] == 0 and end == edge[1]:
# print('-' * deps, 'found: ', edge)
return [edge[1]]
if end == edge[1]:
continue
res = connecting_path(ep, end, 1 - w, 1 - f, graph, total_graph, flags,
visited, deps + 1)
if not res:
visited.remove(ep)
continue
res.append(ep)
# print(res)
return res
return []
def get_circle(graph, origin=0):
visited = set()
start = origin
while True:
if start in visited:
return visited
visited.add(start)
for v in graph[start]:
if v in visited:
continue
start = v
break
def get_list_circle(graph, origin=0):
visited = set()
start = origin
seq = []
while True:
if start in visited:
return seq
visited.add(start)
seq.append(start)
for v in graph[start]:
if v in visited:
continue
start = v
break
def get_all_sets(graph):
total = set(graph.keys())
batch = []
while total:
v = total.pop()
batch.append(get_circle(graph, origin=v))
total -= batch[-1]
batch = sorted(batch, key=lambda b: len(b))
return batch
def get_smallest_circle(graph):
return get_all_sets(graph)[0].pop()
def num_of_circle(graph):
return len(get_all_sets(graph))
def sorted_vetexes(graph):
vetexes = []
for batch in get_all_sets(graph):
vetexes.extend(batch)
return vetexes
def has_one_circle(graph, origin=0):
get_all_sets(graph)
return len(get_circle(graph)) == len(graph)
def connector(graph, total_graph, flags, env=None):
if has_one_circle(graph):
print('match')
show_graph(graph, flags, env)
return True
for v in sorted_vetexes(graph):
w = 1
visited = set()
path = connecting_path(v, v, w, w, graph, total_graph, flags, visited)
if path:
path.append(v)
path = list(reversed(path))
n_circles = num_of_circle(graph)
reflect_path(path, graph, flags)
post_n_circles = num_of_circle(graph)
if post_n_circles > n_circles:
reflect_path(path, graph, flags)
else:
show_graph(graph, flags, env)
print(post_n_circles)
if post_n_circles == 1:
return True
return False
def build_graph(row, col):
# print(row, col)
graphs = {}
for r in range(row):
for c in range(0, col - 1):
start = r * col + c
if start not in graphs:
graphs[start] = set()
if start + 1 not in graphs:
graphs[start + 1] = set()
graphs[start + 1].add(start)
graphs[start].add(start + 1)
for r in range(0, row - 1):
for c in range(col):
start = r * col + c
if start not in graphs:
graphs[start] = set()
if start + col not in graphs:
graphs[start + col] = set()
graphs[start].add(start + col)
graphs[start + col].add(start)
return graphs
def bfs(grid, start, dst):
queue = deque([[start]])
seen = set([start])
height, width = grid.shape
wall = [1, 2]
while queue:
path = queue.popleft()
x, y = path[-1]
if (y, x) == dst:
return path
for x2, y2 in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
if 0 <= x2 < width and 0 <= y2 < height and grid[y2][
x2] not in wall and (x2, y2) not in seen:
queue.append(path + [(x2, y2)])
seen.add((x2, y2))
def dfs_policy(obs, env):
directions = {
(-1, 0): env.right,
(1, 0): env.left,
(0, -1): env.down,
(0, 1): env.up
}
try:
src = np.where(obs == 2)
src = int(src[1]), int(src[0])
dst = np.where(obs == -1)
dst = int(dst[0]), int(dst[1])
except Exception:
return None, None
paths = bfs(obs, start=src, dst=dst)
if paths is None or len(paths) <= 1:
return None, None
dst = paths[1]
dire = src[0] - dst[0], src[1] - dst[1]
action = directions[dire]
return action, dst
def rel_pos(pos, rel, n_max):
return (pos - rel) % n_max
def draw_graph():
print(GRID_HEIGHT_NUM, GRID_WIDTH_NUM)
# input()
graph = build_graph(row=GRID_HEIGHT_NUM, col=GRID_WIDTH_NUM)
total_graph = deepcopy(graph)
env = SnakeEnv(set_life=100000, alg='HC + BFS', no_sight_disp=True)
env.reset()
sleep(1)
graph, flags = deletion(graph, env)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
# print(sp, ep)
env.draw_connection(sp, ep, width=4)
# env.render()
import pygame
pygame.display.update()
pre_len = None
while True:
sd_len = destroy(graph, total_graph, flags, env=env)
print('sd: ', sd_len)
if pre_len is not None and pre_len == sd_len:
global MAX_DEPTH
print('+1')
MAX_DEPTH += 1
pre_len = sd_len
show_graph(graph, flags, env)
if not sd_len:
break
sleep(1)
show_graph(graph, flags, env)
counter = 0
while not connector(graph, total_graph, flags, env):
counter += 1
print('counter: ', counter)
sleep(1)
for sp in graph:
for ep in graph[sp]:
if flags[(sp, ep)]:
env.draw_connection(sp, ep, color=(0xff, 0xff, 0), width=4)
import pygame
show_graph(graph, flags, env)
circle = get_list_circle(graph)
print(circle)
pos_encoder = {pos: i for i, pos in enumerate(circle)}
# pos_decoder = {i: pos for i, pos in enumerate(circle)}
pos_xy_decoder = {
i: (pos % GRID_WIDTH_NUM, pos // GRID_WIDTH_NUM)
for i, pos in enumerate(circle)
}
pos_xy_encoder = {(pos % GRID_WIDTH_NUM, pos // GRID_WIDTH_NUM): i
for i, pos in enumerate(circle)}
obs = env.reset()
c = 0
while True:
c += 1
if len(env.status.snake_body) < 15:
remainder = 20
elif len(env.status.snake_body) < 30:
remainder = 20
elif len(env.status.snake_body) < 60:
remainder = 30
elif len(env.status.snake_body) < 90:
remainder = 30
elif len(env.status.snake_body) < 120:
remainder = 40
elif len(env.status.snake_body) < 150:
remainder = 80
elif len(env.status.snake_body) < 300:
remainder = 100
elif len(env.status.snake_body) < (GRID_WIDTH_NUM * GRID_HEIGHT_NUM -
10):
remainder = 30
else:
remainder = 5
bfs_action, dst = dfs_policy(obs, env)
bfs_dst_idx = 100000000
if dst:
bfs_dst_idx = pos_xy_encoder[dst]
head = env.status.snake_body[0]
head_pos, tail_pos = pos_xy_encoder[head], pos_xy_encoder[
env.status.snake_body[-1]]
head_idx, tail_idx = pos_xy_encoder[head], pos_xy_encoder[
env.status.snake_body[-1]]
hc_next_pos = pos_xy_decoder[(head_pos + 1) % len(graph)]
directions = {
(-1, 0): env.right,
(1, 0): env.left,
(0, -1): env.down,
(0, 1): env.up
}
dire = head[0] - hc_next_pos[0], head[1] - hc_next_pos[1]
print(head, hc_next_pos, dst, dst not in env.status.snake_body[:-1])
print(head_idx, tail_idx, bfs_dst_idx)
action = directions[dire]
if not env.status.food_pos:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
break
food_idx = pos_xy_encoder[env.status.food_pos]
if bfs_action:
print(food_idx, bfs_dst_idx, head_idx, tail_idx)
print(rel_pos(food_idx, tail_idx, len(graph)),
rel_pos(bfs_dst_idx, tail_idx, len(graph)),
rel_pos(head_idx, tail_idx, len(graph)),
rel_pos(tail_idx, tail_idx, len(graph)))
if rel_pos(food_idx, tail_idx, len(graph)) >= rel_pos(
bfs_dst_idx, tail_idx, len(graph)) >= rel_pos(
head_idx, tail_idx, len(graph)) >= rel_pos(
tail_idx, tail_idx, len(graph)):
action = bfs_action
pass
reward, obs, done, _ = env(action)
if done:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
print(done)
break
# env.screen.blit(env.background, (0, 0))
if c % remainder == 0:
show_graph(graph,
flags,
env,
update=True,
width=1,
extra='倍速: {} X'.format(remainder * 5))
# env.render(blit=False)
show_graph(graph, flags, env, update=True, width=1)
sleep(10)
input()
if __name__ == '__main__':
draw_graph() | en | 0.327114 | # -*- coding: utf-8 -*- # env.reset() # for sp, ep in edges: # print('-' * 4 * deps, 'flags', ( # start, # ep, # ), flags[(start, ep)]) # print('-' * 4 * deps, 'start at', start, w) # print('-' * 4 * deps, 'found', edges) # print('-' * 4 * deps, ep) # print('-' * 4 * deps, 'found 1', start, ep) # visited.remove(ep) # print('-' * 4 * deps, 'ends 0', ep) # print(start, end) # print(paths) # print('-' * deps * 2, edges) # print('-' * deps * 2, edge, end, flags[edge], 'visited', visited) # print('-' * deps * 2, edge, end, flags[edge]) # print('-' * deps, 'found: ', edge) # print(res) # print(row, col) # input() # print(sp, ep) # env.render() # pos_decoder = {i: pos for i, pos in enumerate(circle)} # env.screen.blit(env.background, (0, 0)) # env.render(blit=False) | 2.656873 | 3 |
app.py | sooftware/KoSpeech-Flask | 3 | 6620952 | import os
import os.path
import torch
import librosa
import soundfile as sf
from modules.const import *
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
from kospeech.utils import label_to_string, id2char, EOS_token
from modules.parser import parse_audio
from modules.converter import Pcm2Wav, Wav2Pcm
# Basic setting
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['AUDIO_FOLDER'] = './audio_to_play/'
AUDIO_TO_PLAY_PATH = os.path.join(app.config['AUDIO_FOLDER'], 'uploaded_audio.wav')
show_graph = False
# Load weight file
model = torch.load('./weight_file/KsponSpeech_87.44%.pt', map_location=DEVICE).module
model.listener.device = DEVICE
model.speller.device = DEVICE
model.speller.max_length = 150
model.eval()
# Create object
# PCM => WAV, WAV => PCM
pcm2wav = Pcm2Wav()
wav2pcm = Wav2Pcm()
def convert2pcm(wave_path, pcm_path):
""" Convert recorded files to pcm """
y, sr = librosa.load(wave_path, sr=32000)
y = librosa.to_mono(y)
y = librosa.resample(y, 32000, 16000)
sf.write(wave_path, y, 16000, format='wav', endian='little', subtype='PCM_16')
wav2pcm(wave_path, pcm_path)
def allowed_file(filename):
""" Check file format """
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS, filename.split('.')[1]
@app.route("/", methods=['GET', 'POST'])
def index():
global show_graph
# If hit play button
if request.method == 'POST':
if os.path.isfile(AUDIO_TO_PLAY_PATH):
os.remove(AUDIO_TO_PLAY_PATH)
file = request.files['file']
uploaded_file_path = UPLOAD_FOLDER + file.filename
is_valid, extension = allowed_file(file.filename) # check condition
if is_valid:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Convert format
if extension.lower() == 'pcm':
pcm2wav(uploaded_file_path, AUDIO_TO_PLAY_PATH)
elif extension.lower() == 'wav':
convert2pcm(uploaded_file_path, AUDIO_TO_PLAY_PATH)
# Extract feature & Inference by model
spectrogram = parse_audio('./uploaded_audio/%s' % filename)
output = model(spectrogram.unsqueeze(0), torch.IntTensor([len(spectrogram)]), teacher_forcing_ratio=0.0)[0]
logit = torch.stack(output, dim=1).to(DEVICE)
y_hat = logit.max(-1)[1]
prediction = str(label_to_string(y_hat, id2char, EOS_token)[0])
os.remove(uploaded_file_path)
return render_template('uploaded.html',
audio_path='.%s' % AUDIO_TO_PLAY_PATH,
prediction=prediction)
# Root page
return render_template('homepage.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| import os
import os.path
import torch
import librosa
import soundfile as sf
from modules.const import *
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
from kospeech.utils import label_to_string, id2char, EOS_token
from modules.parser import parse_audio
from modules.converter import Pcm2Wav, Wav2Pcm
# Basic setting
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['AUDIO_FOLDER'] = './audio_to_play/'
AUDIO_TO_PLAY_PATH = os.path.join(app.config['AUDIO_FOLDER'], 'uploaded_audio.wav')
show_graph = False
# Load weight file
model = torch.load('./weight_file/KsponSpeech_87.44%.pt', map_location=DEVICE).module
model.listener.device = DEVICE
model.speller.device = DEVICE
model.speller.max_length = 150
model.eval()
# Create object
# PCM => WAV, WAV => PCM
pcm2wav = Pcm2Wav()
wav2pcm = Wav2Pcm()
def convert2pcm(wave_path, pcm_path):
""" Convert recorded files to pcm """
y, sr = librosa.load(wave_path, sr=32000)
y = librosa.to_mono(y)
y = librosa.resample(y, 32000, 16000)
sf.write(wave_path, y, 16000, format='wav', endian='little', subtype='PCM_16')
wav2pcm(wave_path, pcm_path)
def allowed_file(filename):
""" Check file format """
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS, filename.split('.')[1]
@app.route("/", methods=['GET', 'POST'])
def index():
global show_graph
# If hit play button
if request.method == 'POST':
if os.path.isfile(AUDIO_TO_PLAY_PATH):
os.remove(AUDIO_TO_PLAY_PATH)
file = request.files['file']
uploaded_file_path = UPLOAD_FOLDER + file.filename
is_valid, extension = allowed_file(file.filename) # check condition
if is_valid:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Convert format
if extension.lower() == 'pcm':
pcm2wav(uploaded_file_path, AUDIO_TO_PLAY_PATH)
elif extension.lower() == 'wav':
convert2pcm(uploaded_file_path, AUDIO_TO_PLAY_PATH)
# Extract feature & Inference by model
spectrogram = parse_audio('./uploaded_audio/%s' % filename)
output = model(spectrogram.unsqueeze(0), torch.IntTensor([len(spectrogram)]), teacher_forcing_ratio=0.0)[0]
logit = torch.stack(output, dim=1).to(DEVICE)
y_hat = logit.max(-1)[1]
prediction = str(label_to_string(y_hat, id2char, EOS_token)[0])
os.remove(uploaded_file_path)
return render_template('uploaded.html',
audio_path='.%s' % AUDIO_TO_PLAY_PATH,
prediction=prediction)
# Root page
return render_template('homepage.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| en | 0.708539 | # Basic setting # Load weight file # Create object # PCM => WAV, WAV => PCM Convert recorded files to pcm Check file format # If hit play button # check condition # Convert format # Extract feature & Inference by model # Root page | 2.333569 | 2 |
packs/processor/finescale_processing.py | CiceroAraujo/NU_ADM | 0 | 6620953 | import numpy as np
import scipy.sparse as sp
from scipy.sparse import linalg
def newton_iteration_finescale(F_Jacobian, Ts, adjs, p, s, time_step, wells,all_ids, rel_tol=1e-3):
pressure = p.copy()
swns = s.copy()
swn1s = s.copy()
converged=False
count=0
dt=time_step
# data_impress['swn1s']=data_impress['swns'].copy()
# all_ids=GID_0
# not_prod=np.setdiff1d(all_ids,wells['all_wells'])
while not converged:
swns[wells['ws_inj']]=1
J, q=get_jacobian_matrix(Ts, adjs, swns, swn1s, time_step, wells, F_Jacobian, all_ids, p)
# Ts, adjs, Swns, Swn1s, F_Jacobian, ID_vol
# J=FIM.J
# q=FIM.q
sol=-linalg.spsolve(J, q)
n=int(len(q)/2)
pressure+=sol[0:n]
swns+=sol[n:]
swns[wells['ws_inj']]=1
# converged=max(abs(sol[n:][not_prod]))<rel_tol
print(max(abs(sol)),max(abs(sol)),'fs')
count+=1
if count>20:
print('excedded maximum number of iterations finescale')
return False, count, pressure, swns
# saturation[wells['ws_prod']]=saturation[wells['viz_prod']].sum()/len(wells['viz_prod'])
return True, count, pressure, swns
def get_jacobian_matrix(Ts, Adjs, Swns, Swn1s, time_step, wells, F_Jacobian, ID_vol, p):
# Ts, adjs, swns, swn1s, time_step, wells, F_Jacobian
n=len(ID_vol)
count=0
# Swns=self.swns
# Swn1s=self.swn1s
Swns[Swns<0]=0
Swns[Swns>1]=1
# Swn1s[Swn1s<0]=0
# Swn1s[Swn1s>1]=1
# ID_vol=self.ids
lines=[]
cols=[]
data=[]
lines.append(ID_vol)
cols.append(n+ID_vol)
data.append(F_Jacobian.c_o(0.3,np.repeat(time_step,n)))
# J[ID_vol][n+ID_vol]+=float(F_Jacobian().c_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt}))
lines.append(n+ID_vol)
cols.append(n+ID_vol)
data.append(F_Jacobian.c_w(0.3,np.repeat(time_step,n)))
# J[n+ID_vol][n+ID_vol]+=float(F_Jacobian().c_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt}))
linesq=[]
dataq=[]
linesq.append(ID_vol)
dataq.append(F_Jacobian.acum_o(0.3,time_step,Swns,Swn1s))
# q[ID_vol]+=float(F_Jacobian().acum_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]}))
linesq.append(n+ID_vol)
dataq.append(F_Jacobian.acum_w(0.3,time_step,Swns,Swn1s))
# q[n+ID_vol]+=float(F_Jacobian().acum_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]}))
# Adjs=np.array(self.adjs)
adj0=np.array(Adjs[:,0])
adj1=np.array(Adjs[:,1])
ids0=ID_vol[adj0]
ids1=ID_vol[adj1]
ID_vol=ids0
id_j=ids1
swns0=Swns[ids0]
swns1=Swns[ids1]
press0=p[adj0]
press1=p[adj1]
pf0=press0
pf1=press1
up0=pf0>pf1
up1=pf0<=pf1
nfi=len(Adjs)
swf=np.zeros(nfi)
swf[up0]=swns0[up0]
swf[up1]=swns1[up1]
id_up=np.zeros(nfi,dtype=np.int32)
id_up[up0]=ids0[up0]
id_up[up1]=ids1[up1]
# Ts=self.Ts
J00=F_Jacobian.J[0][0](Ts,swf)
# J00=float(self.F_Jacobian[0][0].subs({T:1, Sw:swf}))
J01=F_Jacobian.J[0][1](Ts,swf, pf0, pf1)
# J01=float(self.F_Jacobian[0][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj}))
J10=F_Jacobian.J[1][0](Ts,swf)
# J10=float(self.F_Jacobian[1][0].subs({T:1, Sw:swf}))
J11=F_Jacobian.J[1][1](Ts,swf, pf0, pf1)
# J11=float(self.F_Jacobian[1][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj}))
linesq.append(ID_vol)
dataq.append(-F_Jacobian.F_o(Ts,swf, pf0, pf1))
linesq.append(id_j)
dataq.append(-F_Jacobian.F_o(Ts,swf, pf1, pf0))
# q[ID_vol]-=float(F_Jacobian().F_o.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj}))
linesq.append(n+ID_vol)
dataq.append(-F_Jacobian.F_w(Ts,swf, pf0, pf1))
linesq.append(n+id_j)
dataq.append(-F_Jacobian.F_w(Ts,swf, pf1, pf0))
# q[n+ID_vol]-=float(F_Jacobian().F_w.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj}))
lines.append(ID_vol)
cols.append(ID_vol)
data.append(-J00)
lines.append(id_j)
cols.append(id_j)
data.append(-J00)
# J[ID_vol][ID_vol]-=J00
lines.append(ID_vol)
cols.append(id_j)
data.append(J00)
lines.append(id_j)
cols.append(ID_vol)
data.append(J00)
# J[ID_vol][id_j]+=J00
lines.append(n+ID_vol)
cols.append(ID_vol)
data.append(-J10)
lines.append(n+id_j)
cols.append(id_j)
data.append(-J10)
# J[n+ID_vol][ID_vol]-=J10
lines.append(n+ID_vol)
cols.append(id_j)
data.append(J10)
lines.append(n+id_j)
cols.append(ID_vol)
data.append(J10)
# J[n+ID_vol][id_j]+=J10
lines.append(ID_vol)
cols.append(n+id_up)
data.append(-J01)
lines.append(id_j)
cols.append(n+id_up)
data.append(J01)
# J[ID_vol][n+id_up]-=J01
lines.append(n+ID_vol)
cols.append(n+id_up)
data.append(-J11)
lines.append(n+id_j)
cols.append(n+id_up)
data.append(J11)
# J[n+ID_vol][n+id_up]-=J11
lines=np.concatenate(lines)
cols=np.concatenate(cols)
data=np.concatenate(data)
linesq=np.concatenate(linesq)
dataq=np.concatenate(dataq)
q=np.bincount(linesq, weights=dataq)
lines, cols, data, q = apply_BC(lines, cols, data, q, wells)
J=sp.csc_matrix((data,(lines,cols)),shape=(2*n,2*n))
return(J, q)
def apply_BC(lines, cols, data, q, wells):
n=int(len(q)/2)
q[wells['ws_p']]=0
q[wells['ws_inj']+n]=0
if (wells['count']==0) and (len(wells['values_q'])>0):
q[wells['ws_q']]+=wells['values_q']
for l in wells['ws_p']:
data[lines==l]=0
lines=np.append(lines,l)
cols=np.append(cols,l)
data=np.append(data,1)
for l in np.setdiff1d(wells['ws_inj'],wells['ws_q']):
data[lines==l+n]=0
lines=np.append(lines,l+n)
cols=np.append(cols,l+n)
data=np.append(data,1)
return lines, cols, data, q
| import numpy as np
import scipy.sparse as sp
from scipy.sparse import linalg
def newton_iteration_finescale(F_Jacobian, Ts, adjs, p, s, time_step, wells,all_ids, rel_tol=1e-3):
pressure = p.copy()
swns = s.copy()
swn1s = s.copy()
converged=False
count=0
dt=time_step
# data_impress['swn1s']=data_impress['swns'].copy()
# all_ids=GID_0
# not_prod=np.setdiff1d(all_ids,wells['all_wells'])
while not converged:
swns[wells['ws_inj']]=1
J, q=get_jacobian_matrix(Ts, adjs, swns, swn1s, time_step, wells, F_Jacobian, all_ids, p)
# Ts, adjs, Swns, Swn1s, F_Jacobian, ID_vol
# J=FIM.J
# q=FIM.q
sol=-linalg.spsolve(J, q)
n=int(len(q)/2)
pressure+=sol[0:n]
swns+=sol[n:]
swns[wells['ws_inj']]=1
# converged=max(abs(sol[n:][not_prod]))<rel_tol
print(max(abs(sol)),max(abs(sol)),'fs')
count+=1
if count>20:
print('excedded maximum number of iterations finescale')
return False, count, pressure, swns
# saturation[wells['ws_prod']]=saturation[wells['viz_prod']].sum()/len(wells['viz_prod'])
return True, count, pressure, swns
def get_jacobian_matrix(Ts, Adjs, Swns, Swn1s, time_step, wells, F_Jacobian, ID_vol, p):
# Ts, adjs, swns, swn1s, time_step, wells, F_Jacobian
n=len(ID_vol)
count=0
# Swns=self.swns
# Swn1s=self.swn1s
Swns[Swns<0]=0
Swns[Swns>1]=1
# Swn1s[Swn1s<0]=0
# Swn1s[Swn1s>1]=1
# ID_vol=self.ids
lines=[]
cols=[]
data=[]
lines.append(ID_vol)
cols.append(n+ID_vol)
data.append(F_Jacobian.c_o(0.3,np.repeat(time_step,n)))
# J[ID_vol][n+ID_vol]+=float(F_Jacobian().c_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt}))
lines.append(n+ID_vol)
cols.append(n+ID_vol)
data.append(F_Jacobian.c_w(0.3,np.repeat(time_step,n)))
# J[n+ID_vol][n+ID_vol]+=float(F_Jacobian().c_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt}))
linesq=[]
dataq=[]
linesq.append(ID_vol)
dataq.append(F_Jacobian.acum_o(0.3,time_step,Swns,Swn1s))
# q[ID_vol]+=float(F_Jacobian().acum_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]}))
linesq.append(n+ID_vol)
dataq.append(F_Jacobian.acum_w(0.3,time_step,Swns,Swn1s))
# q[n+ID_vol]+=float(F_Jacobian().acum_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]}))
# Adjs=np.array(self.adjs)
adj0=np.array(Adjs[:,0])
adj1=np.array(Adjs[:,1])
ids0=ID_vol[adj0]
ids1=ID_vol[adj1]
ID_vol=ids0
id_j=ids1
swns0=Swns[ids0]
swns1=Swns[ids1]
press0=p[adj0]
press1=p[adj1]
pf0=press0
pf1=press1
up0=pf0>pf1
up1=pf0<=pf1
nfi=len(Adjs)
swf=np.zeros(nfi)
swf[up0]=swns0[up0]
swf[up1]=swns1[up1]
id_up=np.zeros(nfi,dtype=np.int32)
id_up[up0]=ids0[up0]
id_up[up1]=ids1[up1]
# Ts=self.Ts
J00=F_Jacobian.J[0][0](Ts,swf)
# J00=float(self.F_Jacobian[0][0].subs({T:1, Sw:swf}))
J01=F_Jacobian.J[0][1](Ts,swf, pf0, pf1)
# J01=float(self.F_Jacobian[0][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj}))
J10=F_Jacobian.J[1][0](Ts,swf)
# J10=float(self.F_Jacobian[1][0].subs({T:1, Sw:swf}))
J11=F_Jacobian.J[1][1](Ts,swf, pf0, pf1)
# J11=float(self.F_Jacobian[1][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj}))
linesq.append(ID_vol)
dataq.append(-F_Jacobian.F_o(Ts,swf, pf0, pf1))
linesq.append(id_j)
dataq.append(-F_Jacobian.F_o(Ts,swf, pf1, pf0))
# q[ID_vol]-=float(F_Jacobian().F_o.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj}))
linesq.append(n+ID_vol)
dataq.append(-F_Jacobian.F_w(Ts,swf, pf0, pf1))
linesq.append(n+id_j)
dataq.append(-F_Jacobian.F_w(Ts,swf, pf1, pf0))
# q[n+ID_vol]-=float(F_Jacobian().F_w.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj}))
lines.append(ID_vol)
cols.append(ID_vol)
data.append(-J00)
lines.append(id_j)
cols.append(id_j)
data.append(-J00)
# J[ID_vol][ID_vol]-=J00
lines.append(ID_vol)
cols.append(id_j)
data.append(J00)
lines.append(id_j)
cols.append(ID_vol)
data.append(J00)
# J[ID_vol][id_j]+=J00
lines.append(n+ID_vol)
cols.append(ID_vol)
data.append(-J10)
lines.append(n+id_j)
cols.append(id_j)
data.append(-J10)
# J[n+ID_vol][ID_vol]-=J10
lines.append(n+ID_vol)
cols.append(id_j)
data.append(J10)
lines.append(n+id_j)
cols.append(ID_vol)
data.append(J10)
# J[n+ID_vol][id_j]+=J10
lines.append(ID_vol)
cols.append(n+id_up)
data.append(-J01)
lines.append(id_j)
cols.append(n+id_up)
data.append(J01)
# J[ID_vol][n+id_up]-=J01
lines.append(n+ID_vol)
cols.append(n+id_up)
data.append(-J11)
lines.append(n+id_j)
cols.append(n+id_up)
data.append(J11)
# J[n+ID_vol][n+id_up]-=J11
lines=np.concatenate(lines)
cols=np.concatenate(cols)
data=np.concatenate(data)
linesq=np.concatenate(linesq)
dataq=np.concatenate(dataq)
q=np.bincount(linesq, weights=dataq)
lines, cols, data, q = apply_BC(lines, cols, data, q, wells)
J=sp.csc_matrix((data,(lines,cols)),shape=(2*n,2*n))
return(J, q)
def apply_BC(lines, cols, data, q, wells):
n=int(len(q)/2)
q[wells['ws_p']]=0
q[wells['ws_inj']+n]=0
if (wells['count']==0) and (len(wells['values_q'])>0):
q[wells['ws_q']]+=wells['values_q']
for l in wells['ws_p']:
data[lines==l]=0
lines=np.append(lines,l)
cols=np.append(cols,l)
data=np.append(data,1)
for l in np.setdiff1d(wells['ws_inj'],wells['ws_q']):
data[lines==l+n]=0
lines=np.append(lines,l+n)
cols=np.append(cols,l+n)
data=np.append(data,1)
return lines, cols, data, q
| en | 0.251126 | # data_impress['swn1s']=data_impress['swns'].copy() # all_ids=GID_0 # not_prod=np.setdiff1d(all_ids,wells['all_wells']) # Ts, adjs, Swns, Swn1s, F_Jacobian, ID_vol # J=FIM.J # q=FIM.q # converged=max(abs(sol[n:][not_prod]))<rel_tol # saturation[wells['ws_prod']]=saturation[wells['viz_prod']].sum()/len(wells['viz_prod']) # Ts, adjs, swns, swn1s, time_step, wells, F_Jacobian # Swns=self.swns # Swn1s=self.swn1s # Swn1s[Swn1s<0]=0 # Swn1s[Swn1s>1]=1 # ID_vol=self.ids # J[ID_vol][n+ID_vol]+=float(F_Jacobian().c_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt})) # J[n+ID_vol][n+ID_vol]+=float(F_Jacobian().c_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt})) # q[ID_vol]+=float(F_Jacobian().acum_o.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]})) # q[n+ID_vol]+=float(F_Jacobian().acum_w.subs({Dx:self.Dx, Dy:self.Dy, phi:0.3, Dt:self.dt, Sw:Swns[count], Swn:Swn1s[count]})) # Adjs=np.array(self.adjs) # Ts=self.Ts # J00=float(self.F_Jacobian[0][0].subs({T:1, Sw:swf})) # J01=float(self.F_Jacobian[0][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj})) # J10=float(self.F_Jacobian[1][0].subs({T:1, Sw:swf})) # J11=float(self.F_Jacobian[1][1].subs({T:1, Sw:swf, p_i:pv, p_j:pj})) # q[ID_vol]-=float(F_Jacobian().F_o.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj})) # q[n+ID_vol]-=float(F_Jacobian().F_w.subs({T:1.0, Sw:Swns1[count_fac], p_i:pv, p_j:pj})) # J[ID_vol][ID_vol]-=J00 # J[ID_vol][id_j]+=J00 # J[n+ID_vol][ID_vol]-=J10 # J[n+ID_vol][id_j]+=J10 # J[ID_vol][n+id_up]-=J01 # J[n+ID_vol][n+id_up]-=J11 | 2.239981 | 2 |
sir/amqp/message.py | jsanzmex/sir | 11 | 6620954 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2014 <NAME>
# License: MIT, see LICENSE for details
"""
This module contains functions and classes to parse and represent the content
of an AMQP message.
"""
from sir.trigger_generation.sql_generator import MSG_JSON_TABLE_NAME_KEY, MSG_JSON_OPERATION_TYPE
from enum import Enum
import ujson
MESSAGE_TYPES = Enum("MESSAGE_TYPES", "delete index")
QUEUE_TO_TYPE = {
"search.delete": MESSAGE_TYPES.delete,
"search.index": MESSAGE_TYPES.index,
}
class Message(object):
"""
A parsed message from AMQP.
"""
def __init__(self, message_type, table_name, columns, operation):
"""
Construct a new message object.
A message contains a set of columns (dict) which can be used to determine
which row has been updated. In case of messages from the `index` queue
it will be a set of PK columns, and `gid` column for `delete` queue messages.
:param message_type: Type of the message. A member of :class:`MESSAGE_TYPES`.
:param str table_name: Name of the table the message is associated with.
:param dict columns: Dictionary mapping columns of the table to their values.
"""
self.message_type = message_type
self.table_name = table_name
self.columns = columns
self.operation = operation
@classmethod
def from_amqp_message(cls, queue_name, amqp_message):
"""
Parses an AMQP message.
:param str queue_name: Name of the queue where the message originated from.
:param amqp.basic_message.Message amqp_message: Message object from the queue.
:rtype: :class:`sir.amqp.message.Message`
"""
if queue_name not in QUEUE_TO_TYPE.keys():
raise ValueError("Unknown queue: %s" % queue_name)
else:
message_type = QUEUE_TO_TYPE[queue_name]
try:
data = ujson.loads(amqp_message.body)
except ValueError as e:
raise InvalidMessageContentException("Invalid message format (expected JSON): %s" % e)
table_name = data.pop(MSG_JSON_TABLE_NAME_KEY, None)
if not table_name:
raise InvalidMessageContentException("Table name is missing")
# After table name is extracted from the message only PK(s) should be left.
if not data:
# For the `index` queue the data will be a set of PKs, and for `delete`
# queue it will be a GID value.
raise InvalidMessageContentException("Reference values are not specified")
operation = data.pop(MSG_JSON_OPERATION_TYPE, "")
return cls(message_type, table_name, data, operation)
class InvalidMessageContentException(ValueError):
"""
Exception indicating an error with the content of an AMQP message.
"""
pass
| #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2014 <NAME>
# License: MIT, see LICENSE for details
"""
This module contains functions and classes to parse and represent the content
of an AMQP message.
"""
from sir.trigger_generation.sql_generator import MSG_JSON_TABLE_NAME_KEY, MSG_JSON_OPERATION_TYPE
from enum import Enum
import ujson
MESSAGE_TYPES = Enum("MESSAGE_TYPES", "delete index")
QUEUE_TO_TYPE = {
"search.delete": MESSAGE_TYPES.delete,
"search.index": MESSAGE_TYPES.index,
}
class Message(object):
"""
A parsed message from AMQP.
"""
def __init__(self, message_type, table_name, columns, operation):
"""
Construct a new message object.
A message contains a set of columns (dict) which can be used to determine
which row has been updated. In case of messages from the `index` queue
it will be a set of PK columns, and `gid` column for `delete` queue messages.
:param message_type: Type of the message. A member of :class:`MESSAGE_TYPES`.
:param str table_name: Name of the table the message is associated with.
:param dict columns: Dictionary mapping columns of the table to their values.
"""
self.message_type = message_type
self.table_name = table_name
self.columns = columns
self.operation = operation
@classmethod
def from_amqp_message(cls, queue_name, amqp_message):
"""
Parses an AMQP message.
:param str queue_name: Name of the queue where the message originated from.
:param amqp.basic_message.Message amqp_message: Message object from the queue.
:rtype: :class:`sir.amqp.message.Message`
"""
if queue_name not in QUEUE_TO_TYPE.keys():
raise ValueError("Unknown queue: %s" % queue_name)
else:
message_type = QUEUE_TO_TYPE[queue_name]
try:
data = ujson.loads(amqp_message.body)
except ValueError as e:
raise InvalidMessageContentException("Invalid message format (expected JSON): %s" % e)
table_name = data.pop(MSG_JSON_TABLE_NAME_KEY, None)
if not table_name:
raise InvalidMessageContentException("Table name is missing")
# After table name is extracted from the message only PK(s) should be left.
if not data:
# For the `index` queue the data will be a set of PKs, and for `delete`
# queue it will be a GID value.
raise InvalidMessageContentException("Reference values are not specified")
operation = data.pop(MSG_JSON_OPERATION_TYPE, "")
return cls(message_type, table_name, data, operation)
class InvalidMessageContentException(ValueError):
"""
Exception indicating an error with the content of an AMQP message.
"""
pass
| en | 0.702803 | #!/usr/bin/env python # coding: utf-8 # Copyright (c) 2014 <NAME> # License: MIT, see LICENSE for details This module contains functions and classes to parse and represent the content of an AMQP message. A parsed message from AMQP. Construct a new message object. A message contains a set of columns (dict) which can be used to determine which row has been updated. In case of messages from the `index` queue it will be a set of PK columns, and `gid` column for `delete` queue messages. :param message_type: Type of the message. A member of :class:`MESSAGE_TYPES`. :param str table_name: Name of the table the message is associated with. :param dict columns: Dictionary mapping columns of the table to their values. Parses an AMQP message. :param str queue_name: Name of the queue where the message originated from. :param amqp.basic_message.Message amqp_message: Message object from the queue. :rtype: :class:`sir.amqp.message.Message` # After table name is extracted from the message only PK(s) should be left. # For the `index` queue the data will be a set of PKs, and for `delete` # queue it will be a GID value. Exception indicating an error with the content of an AMQP message. | 2.737389 | 3 |
snippet_fmt/__init__.py | python-formate/snippet-fmt | 0 | 6620955 | #!/usr/bin/env python3
#
# __init__.py
"""
Format and validate code snippets in reStructuredText files.
"""
#
# Copyright © 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/asottile/blacken-docs
# Copyright (c) 2018 <NAME>
# MIT Licensed
#
# stdlib
import contextlib
import re
import textwrap
from typing import Dict, Iterator, List, Match, NamedTuple, Optional
# 3rd party
import click
import entrypoints # type: ignore
from consolekit.terminal_colours import ColourTrilean, resolve_color_default
from consolekit.utils import coloured_diff
from domdf_python_tools.paths import PathPlus
from domdf_python_tools.stringlist import StringList
from domdf_python_tools.typing import PathLike
from formate.utils import syntaxerror_for_file
# this package
from snippet_fmt.config import SnippetFmtConfigDict
from snippet_fmt.formatters import Formatter, format_ini, format_json, format_python, format_toml, noformat
__author__: str = "<NAME>"
__copyright__: str = "2021 <NAME>"
__license__: str = "MIT License"
__version__: str = "0.1.4"
__email__: str = "<EMAIL>"
__all__ = ["CodeBlockError", "RSTReformatter", "reformat_file"]
TRAILING_NL_RE = re.compile(r'\n+\Z', re.MULTILINE)
class CodeBlockError(NamedTuple):
"""
Represents an exception raised when parsing and reformatting a code block.
"""
#: The character offset where the exception was raised.
offset: int
#: The exception itself.
exc: Exception
class RSTReformatter:
"""
Reformat code snippets in a reStructuredText file.
:param filename: The filename to reformat.
:param config: The ``snippet_fmt`` configuration, parsed from a TOML file (or similar).
"""
#: The filename being reformatted.
filename: str
#: The filename being reformatted, as a POSIX-style path.
file_to_format: PathPlus
#: The ``formate`` configuration, parsed from a TOML file (or similar).
config: SnippetFmtConfigDict
errors: List[CodeBlockError]
def __init__(self, filename: PathLike, config: SnippetFmtConfigDict):
self.file_to_format = PathPlus(filename)
self.filename = self.file_to_format.as_posix()
self.config = config
self._unformatted_source = self.file_to_format.read_text()
self._reformatted_source: Optional[str] = None
self.errors = []
self._formatters: Dict[str, Formatter] = {
"bash": noformat,
"python": format_python,
"python3": format_python,
"toml": format_toml,
"ini": format_ini,
"json": format_json,
}
self.load_extra_formatters()
def run(self) -> bool:
"""
Run the reformatter.
:return: Whether the file was changed.
"""
content = StringList(self._unformatted_source)
content.blankline(ensure_single=True)
directives = '|'.join(self.config["directives"])
pattern = re.compile(
rf'(?P<before>'
rf'^(?P<indent>[ \t]*)\.\.[ \t]*('
rf'({directives})::\s*(?P<lang>[A-Za-z0-9-_]+)?)\n'
rf'((?P=indent)[ \t]+:.*\n)*' # Limitation: should be `(?P=body_indent)` rather than `[ \t]+`
rf'\n*'
rf')'
rf'(?P<code>^((?P=indent)(?P<body_indent>[ \t]+).*)?\n(^((?P=indent)(?P=body_indent).*)?\n)*)',
re.MULTILINE,
)
self._reformatted_source = pattern.sub(self.process_match, str(content))
for error in self.errors:
lineno = self._unformatted_source[:error.offset].count('\n') + 1
click.echo(f"{self.filename}:{lineno}: {error.exc.__class__.__name__}: {error.exc}", err=True)
return self._reformatted_source != self._unformatted_source
def process_match(self, match: Match[str]) -> str:
"""
Process a :meth:`re.Match <re.Match.expand>` for a single code block.
:param match:
"""
lang = match.group("lang")
if lang in self.config["languages"]:
lang_config = self.config["languages"][lang]
# TODO: show warning if not found and in "strict" mode
formatter = self._formatters.get(lang.lower(), noformat)
else:
lang_config = {}
formatter = noformat
trailing_ws_match = TRAILING_NL_RE.search(match["code"])
assert trailing_ws_match
trailing_ws = trailing_ws_match.group()
code = textwrap.dedent(match["code"])
with self._collect_error(match):
with syntaxerror_for_file(self.filename):
code = formatter(code, **lang_config)
code = textwrap.indent(code, match["indent"] + match["body_indent"])
return f'{match["before"]}{code.rstrip()}{trailing_ws}'
def get_diff(self) -> str:
"""
Returns the diff between the original and reformatted file content.
"""
# Based on yapf
# Apache 2.0 License
after = self.to_string().split('\n')
before = self._unformatted_source.split('\n')
return coloured_diff(
before,
after,
self.filename,
self.filename,
"(original)",
"(reformatted)",
lineterm='',
)
def to_string(self) -> str:
"""
Return the reformatted file as a string.
"""
if self._reformatted_source is None:
raise ValueError("'Reformatter.run()' must be called first!")
return self._reformatted_source
def to_file(self) -> None:
"""
Write the reformatted source to the original file.
"""
self.file_to_format.write_text(self.to_string())
@contextlib.contextmanager
def _collect_error(self, match: Match[str]) -> Iterator[None]:
try:
yield
except Exception as e:
self.errors.append(CodeBlockError(match.start(), e))
def load_extra_formatters(self) -> None:
"""
Load custom formatters defined via entry points.
"""
group = "snippet_fmt.formatters"
for distro_config, _ in entrypoints.iter_files_distros():
if group in distro_config:
for name, epstr in distro_config[group].items():
with contextlib.suppress(entrypoints.BadEntryPoint, ImportError):
# TODO: show warning for bad entry point if verbose, or "strict"?
ep = entrypoints.EntryPoint.from_string(epstr, name)
self._formatters[name] = ep.load()
def reformat_file(
filename: PathLike,
config: SnippetFmtConfigDict,
colour: ColourTrilean = None,
):
"""
Reformat the given reStructuredText file, and show the diff if changes were made.
:param filename: The filename to reformat.
:param config: The ``snippet-fmt`` configuration, parsed from a TOML file (or similar).
:param colour: Whether to force coloured output on (:py:obj:`True`) or off (:py:obj:`False`).
"""
r = RSTReformatter(filename, config)
ret = r.run()
if ret:
click.echo(r.get_diff(), color=resolve_color_default(colour))
r.to_file()
return ret
| #!/usr/bin/env python3
#
# __init__.py
"""
Format and validate code snippets in reStructuredText files.
"""
#
# Copyright © 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/asottile/blacken-docs
# Copyright (c) 2018 <NAME>
# MIT Licensed
#
# stdlib
import contextlib
import re
import textwrap
from typing import Dict, Iterator, List, Match, NamedTuple, Optional
# 3rd party
import click
import entrypoints # type: ignore
from consolekit.terminal_colours import ColourTrilean, resolve_color_default
from consolekit.utils import coloured_diff
from domdf_python_tools.paths import PathPlus
from domdf_python_tools.stringlist import StringList
from domdf_python_tools.typing import PathLike
from formate.utils import syntaxerror_for_file
# this package
from snippet_fmt.config import SnippetFmtConfigDict
from snippet_fmt.formatters import Formatter, format_ini, format_json, format_python, format_toml, noformat
__author__: str = "<NAME>"
__copyright__: str = "2021 <NAME>"
__license__: str = "MIT License"
__version__: str = "0.1.4"
__email__: str = "<EMAIL>"
__all__ = ["CodeBlockError", "RSTReformatter", "reformat_file"]
TRAILING_NL_RE = re.compile(r'\n+\Z', re.MULTILINE)
class CodeBlockError(NamedTuple):
"""
Represents an exception raised when parsing and reformatting a code block.
"""
#: The character offset where the exception was raised.
offset: int
#: The exception itself.
exc: Exception
class RSTReformatter:
"""
Reformat code snippets in a reStructuredText file.
:param filename: The filename to reformat.
:param config: The ``snippet_fmt`` configuration, parsed from a TOML file (or similar).
"""
#: The filename being reformatted.
filename: str
#: The filename being reformatted, as a POSIX-style path.
file_to_format: PathPlus
#: The ``formate`` configuration, parsed from a TOML file (or similar).
config: SnippetFmtConfigDict
errors: List[CodeBlockError]
def __init__(self, filename: PathLike, config: SnippetFmtConfigDict):
self.file_to_format = PathPlus(filename)
self.filename = self.file_to_format.as_posix()
self.config = config
self._unformatted_source = self.file_to_format.read_text()
self._reformatted_source: Optional[str] = None
self.errors = []
self._formatters: Dict[str, Formatter] = {
"bash": noformat,
"python": format_python,
"python3": format_python,
"toml": format_toml,
"ini": format_ini,
"json": format_json,
}
self.load_extra_formatters()
def run(self) -> bool:
"""
Run the reformatter.
:return: Whether the file was changed.
"""
content = StringList(self._unformatted_source)
content.blankline(ensure_single=True)
directives = '|'.join(self.config["directives"])
pattern = re.compile(
rf'(?P<before>'
rf'^(?P<indent>[ \t]*)\.\.[ \t]*('
rf'({directives})::\s*(?P<lang>[A-Za-z0-9-_]+)?)\n'
rf'((?P=indent)[ \t]+:.*\n)*' # Limitation: should be `(?P=body_indent)` rather than `[ \t]+`
rf'\n*'
rf')'
rf'(?P<code>^((?P=indent)(?P<body_indent>[ \t]+).*)?\n(^((?P=indent)(?P=body_indent).*)?\n)*)',
re.MULTILINE,
)
self._reformatted_source = pattern.sub(self.process_match, str(content))
for error in self.errors:
lineno = self._unformatted_source[:error.offset].count('\n') + 1
click.echo(f"{self.filename}:{lineno}: {error.exc.__class__.__name__}: {error.exc}", err=True)
return self._reformatted_source != self._unformatted_source
def process_match(self, match: Match[str]) -> str:
"""
Process a :meth:`re.Match <re.Match.expand>` for a single code block.
:param match:
"""
lang = match.group("lang")
if lang in self.config["languages"]:
lang_config = self.config["languages"][lang]
# TODO: show warning if not found and in "strict" mode
formatter = self._formatters.get(lang.lower(), noformat)
else:
lang_config = {}
formatter = noformat
trailing_ws_match = TRAILING_NL_RE.search(match["code"])
assert trailing_ws_match
trailing_ws = trailing_ws_match.group()
code = textwrap.dedent(match["code"])
with self._collect_error(match):
with syntaxerror_for_file(self.filename):
code = formatter(code, **lang_config)
code = textwrap.indent(code, match["indent"] + match["body_indent"])
return f'{match["before"]}{code.rstrip()}{trailing_ws}'
def get_diff(self) -> str:
"""
Returns the diff between the original and reformatted file content.
"""
# Based on yapf
# Apache 2.0 License
after = self.to_string().split('\n')
before = self._unformatted_source.split('\n')
return coloured_diff(
before,
after,
self.filename,
self.filename,
"(original)",
"(reformatted)",
lineterm='',
)
def to_string(self) -> str:
"""
Return the reformatted file as a string.
"""
if self._reformatted_source is None:
raise ValueError("'Reformatter.run()' must be called first!")
return self._reformatted_source
def to_file(self) -> None:
"""
Write the reformatted source to the original file.
"""
self.file_to_format.write_text(self.to_string())
@contextlib.contextmanager
def _collect_error(self, match: Match[str]) -> Iterator[None]:
try:
yield
except Exception as e:
self.errors.append(CodeBlockError(match.start(), e))
def load_extra_formatters(self) -> None:
"""
Load custom formatters defined via entry points.
"""
group = "snippet_fmt.formatters"
for distro_config, _ in entrypoints.iter_files_distros():
if group in distro_config:
for name, epstr in distro_config[group].items():
with contextlib.suppress(entrypoints.BadEntryPoint, ImportError):
# TODO: show warning for bad entry point if verbose, or "strict"?
ep = entrypoints.EntryPoint.from_string(epstr, name)
self._formatters[name] = ep.load()
def reformat_file(
filename: PathLike,
config: SnippetFmtConfigDict,
colour: ColourTrilean = None,
):
"""
Reformat the given reStructuredText file, and show the diff if changes were made.
:param filename: The filename to reformat.
:param config: The ``snippet-fmt`` configuration, parsed from a TOML file (or similar).
:param colour: Whether to force coloured output on (:py:obj:`True`) or off (:py:obj:`False`).
"""
r = RSTReformatter(filename, config)
ret = r.run()
if ret:
click.echo(r.get_diff(), color=resolve_color_default(colour))
r.to_file()
return ret
| en | 0.780825 | #!/usr/bin/env python3 # # __init__.py Format and validate code snippets in reStructuredText files. # # Copyright © 2021 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # Parts based on https://github.com/asottile/blacken-docs # Copyright (c) 2018 <NAME> # MIT Licensed # # stdlib # 3rd party # type: ignore # this package Represents an exception raised when parsing and reformatting a code block. #: The character offset where the exception was raised. #: The exception itself. Reformat code snippets in a reStructuredText file. :param filename: The filename to reformat. :param config: The ``snippet_fmt`` configuration, parsed from a TOML file (or similar). #: The filename being reformatted. #: The filename being reformatted, as a POSIX-style path. #: The ``formate`` configuration, parsed from a TOML file (or similar). Run the reformatter. :return: Whether the file was changed. # Limitation: should be `(?P=body_indent)` rather than `[ \t]+` Process a :meth:`re.Match <re.Match.expand>` for a single code block. :param match: # TODO: show warning if not found and in "strict" mode Returns the diff between the original and reformatted file content. # Based on yapf # Apache 2.0 License Return the reformatted file as a string. Write the reformatted source to the original file. Load custom formatters defined via entry points. # TODO: show warning for bad entry point if verbose, or "strict"? Reformat the given reStructuredText file, and show the diff if changes were made. :param filename: The filename to reformat. :param config: The ``snippet-fmt`` configuration, parsed from a TOML file (or similar). :param colour: Whether to force coloured output on (:py:obj:`True`) or off (:py:obj:`False`). | 1.580421 | 2 |
ffws/parser/SpectraMaxM2.py | mojaie/flashflood-workspace-sample | 0 | 6620956 | <gh_stars>0
import csv
def content_loader(lines):
"""Load datafile from SpectraMax M2.
TODO: multilayer
Args:
lines: lines of input file (or file object)
Returns:
parsed data
"""
bs = 20 # block_size
bo = 1 # block_offset
pr = 16 # plate_rows
ro = 2 # row_offset
pc = 24 # plate_cols
co = 2 # col_offset
raw = [row for row in csv.reader(lines, delimiter="\t")]
blocks = [raw[i*bs+bo:(i + 1)*bs+bo] for i in range(len(raw) // bs)]
parsed = {"plates": []}
for block in blocks:
plate = {}
plate["plateId"] = block[0][1]
plate["layerIndex"] = 0
plate["wellValues"] = []
for row in block[ro:ro+pr]:
for cell in row[co:co+pc]:
try:
value = float(cell)
except ValueError:
value = "NaN"
plate["wellValues"].append(value)
parsed["plates"].append(plate)
return parsed
def file_loader(path):
with open(path, encoding="shift-jis", newline="") as f:
results = content_loader(f)
return results
| import csv
def content_loader(lines):
"""Load datafile from SpectraMax M2.
TODO: multilayer
Args:
lines: lines of input file (or file object)
Returns:
parsed data
"""
bs = 20 # block_size
bo = 1 # block_offset
pr = 16 # plate_rows
ro = 2 # row_offset
pc = 24 # plate_cols
co = 2 # col_offset
raw = [row for row in csv.reader(lines, delimiter="\t")]
blocks = [raw[i*bs+bo:(i + 1)*bs+bo] for i in range(len(raw) // bs)]
parsed = {"plates": []}
for block in blocks:
plate = {}
plate["plateId"] = block[0][1]
plate["layerIndex"] = 0
plate["wellValues"] = []
for row in block[ro:ro+pr]:
for cell in row[co:co+pc]:
try:
value = float(cell)
except ValueError:
value = "NaN"
plate["wellValues"].append(value)
parsed["plates"].append(plate)
return parsed
def file_loader(path):
with open(path, encoding="shift-jis", newline="") as f:
results = content_loader(f)
return results | en | 0.298939 | Load datafile from SpectraMax M2. TODO: multilayer Args: lines: lines of input file (or file object) Returns: parsed data # block_size # block_offset # plate_rows # row_offset # plate_cols # col_offset | 2.759207 | 3 |
pipelitools/preprocessing/features.py | nastiag67/tools | 1 | 6620957 | <filename>pipelitools/preprocessing/features.py
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.feature_selection import VarianceThreshold, RFE, SelectFromModel
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
def test_features():
print('test_features: ok')
class FeatureSelectionPipeline:
""" A pipeline for feature selection
Note: Assumes the response is the last column in df.
Parameters
----------
df : pd.DataFrame
Dataframe used for feature selection (includes features and the response).
"""
def __init__(self, df):
self.df = df
self.X = df.iloc[:, :-1]
self.y = df.iloc[:, -1]
def low_variance(self, threshold):
""" Feature selection based on low variance.
Parameters
----------
threshold : float
Threshold against which the variance is calculated.
Example
-------
reduced_df = low_variance(df, 0.01)
X_test_new_reduced = low_variance(X_test_new, 0.01)
Returns
-------
Dataframe with selected features.
"""
# Normalize the data
normalized_df = self.df / self.df.mean()
# Create a VarianceThreshold feature selector
sel = VarianceThreshold(threshold=threshold)
# Fit the selector to normalized df
# because higher values may have higher variances => need to adjust for that
sel.fit(normalized_df)
# Create a boolean mask: gives True/False value on if each feature’s Var > threshold
mask = sel.get_support()
# Apply the mask to create a reduced dataframe
reduced_df = self.df.loc[:, mask]
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df
def RFE_selection(self, n_features_to_select, step, mask=None):
"""Recursive Feature Elimination based on random forest classifier.
Parameters
----------
n_features_to_select : int
Number of features to be selected.
step : int
How many features to remove at each step.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
n_features_to_select=300
reduced_df, mask = RFE_selection(df, n_features_to_select=n_features_to_select, step=1, mask=None)
X_test_new_reduced = RFE_selection(X_test_new, n_features_to_select=n_features_to_select, step=1, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
# DROP THE LEAST IMPORTANT FEATURES ONE BY ONE
# Set the feature eliminator to remove 2 features on each step
rfe = RFE(estimator=RandomForestClassifier(random_state=42),
n_features_to_select=n_features_to_select,
step=step,
verbose=0)
# Fit the model to the training data
rfe.fit(self.X, self.y)
# Create a mask: remaining column names
mask = rfe.support_
# Apply the mask to the feature dataset X
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
def ensemble(self, models, n_features_to_select, mask=None):
""" Feature selection method which uses ensembles to select features.
Parameters
----------
models : list of sklearn models
List of models.
n_features_to_select : int
Number of features to be selected.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
# MODEL1
gbc = GradientBoostingClassifier()
# MODEL2
lda = LinearDiscriminantAnalysis(n_components=2)
models={'GBC': gbc, 'LDA': lda}
reduced_df, mask = f.ensemble(df, models, n_features_to_select=493, mask=None)
X_test_new_reduced = f.ensemble(X_test_new, n_features_to_select=493, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
rfe_masks = []
for modelname, model in zip(models.keys(), models.values()):
print(f"RFE using the model: {modelname}")
# Select n_features_to_selec with RFE on a GradientBoostingRegressor, drop 3 features on each step
rfe_modelname = RFE(estimator=model, n_features_to_select=n_features_to_select, step=1, verbose=0)
rfe_modelname.fit(self.X, self.y)
# Assign the support array to gb_mask
mask_modelname = rfe_modelname.support_
rfe_masks.append(mask_modelname)
# Sum the votes of the models
n_models = len(rfe_masks)
votes = np.sum(rfe_masks, axis=0)
# Create a mask for features selected by all 2 models
meta_mask = votes >= n_models
# Apply the dimensionality reduction on X
reduced_X = self.X.loc[:, meta_mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, meta_mask
def tree_based(self, threshold, mask=None):
""" Feature selection method which uses trees to select features.
Parameters
----------
threshold : int
Threshold based on which the features will be selected.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
threshold=0.0016
reduced_df, mask = tree_based(df, threshold=threshold, mask=None)
X_test_new_reduced = tree_based(X_test_new, threshold=threshold, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
# Fit the random forest model to the training data
rf = RandomForestClassifier(random_state=42)
rf.fit(self.X, self.y)
# Print the importances per feature
# for unimportant features – almost 0
# better than RFE, since the resulting values here r comparable bn features by default, cuz always sum to 1
# => DON’T NEED TO SCALE THE DATA
# Create a mask for features importances above the threshold
mask = rf.feature_importances_ >= threshold
# Apply the mask to the feature dataset X to implement the feature selection
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
# MUST BE CAREFUL WITH DROPPING SEVERAL FEATURES AT ONCE, BETTER DO IT ONE BY ONE USING RFE
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
def extra_trees(self, st_scaler=True, mask=None):
""" Ferature selection method based on extra trees classifier.
Parameters
----------
st_scaler : default=True
True if standard scaler should be used.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df[mask]
return reduced_df
if st_scaler:
# scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(self.X)
else:
X_scaled = self.X
# extract feature importances
model = ExtraTreesClassifier(random_state=42)
model.fit(X_scaled, self.y)
importances = pd.DataFrame(model.feature_importances_)
# Select only the features which have an importance bigger than the mean importance of the whole dataset
sfm = SelectFromModel(model, threshold=importances.mean())
sfm.fit(X_scaled, self.y)
# Create a mask for features importances
feature_idx = sfm.get_support()
mask = self.X.columns[feature_idx]
# Apply the mask to the feature dataset X to implement the feature selection
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
# def L1(self):
# """ """
# pass
# class FeatureEngineeringPipeline:
# """ """
# def __init__(self, df):
# self.df = df
#
# def normalization(self):
# """ """
# pass
#
# def standartization(self):
# """ """
# pass
#
# def _imputation(self, how):
# """how - one of the following:
# - average,
# - same value outside of normal range,
# - value from the middle of the range,
# - use the missing value as target for regression problem,
# - increase dimensionality by adding a binary indicator feature for each feature with missing values
#
# Parameters
# ----------
# how :
#
#
# Returns
# -------
#
# """
#
# pass
#
# def missing(self, remove=True, impute=False, learn=False):
# """
#
# Parameters
# ----------
# remove :
# (Default value = True)
# impute :
# (Default value = False)
# learn :
# (Default value = False)
#
# Returns
# -------
#
# """
#
# if impute:
# result = self._imputation(how='method')
#
# return result
#
# def normalization(self):
# """ """
# pass
if __name__ == '__main__':
test_features()
| <filename>pipelitools/preprocessing/features.py
import pandas as pd
import numpy as np
from datetime import datetime
from sklearn.feature_selection import VarianceThreshold, RFE, SelectFromModel
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
def test_features():
print('test_features: ok')
class FeatureSelectionPipeline:
""" A pipeline for feature selection
Note: Assumes the response is the last column in df.
Parameters
----------
df : pd.DataFrame
Dataframe used for feature selection (includes features and the response).
"""
def __init__(self, df):
self.df = df
self.X = df.iloc[:, :-1]
self.y = df.iloc[:, -1]
def low_variance(self, threshold):
""" Feature selection based on low variance.
Parameters
----------
threshold : float
Threshold against which the variance is calculated.
Example
-------
reduced_df = low_variance(df, 0.01)
X_test_new_reduced = low_variance(X_test_new, 0.01)
Returns
-------
Dataframe with selected features.
"""
# Normalize the data
normalized_df = self.df / self.df.mean()
# Create a VarianceThreshold feature selector
sel = VarianceThreshold(threshold=threshold)
# Fit the selector to normalized df
# because higher values may have higher variances => need to adjust for that
sel.fit(normalized_df)
# Create a boolean mask: gives True/False value on if each feature’s Var > threshold
mask = sel.get_support()
# Apply the mask to create a reduced dataframe
reduced_df = self.df.loc[:, mask]
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df
def RFE_selection(self, n_features_to_select, step, mask=None):
"""Recursive Feature Elimination based on random forest classifier.
Parameters
----------
n_features_to_select : int
Number of features to be selected.
step : int
How many features to remove at each step.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
n_features_to_select=300
reduced_df, mask = RFE_selection(df, n_features_to_select=n_features_to_select, step=1, mask=None)
X_test_new_reduced = RFE_selection(X_test_new, n_features_to_select=n_features_to_select, step=1, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
# DROP THE LEAST IMPORTANT FEATURES ONE BY ONE
# Set the feature eliminator to remove 2 features on each step
rfe = RFE(estimator=RandomForestClassifier(random_state=42),
n_features_to_select=n_features_to_select,
step=step,
verbose=0)
# Fit the model to the training data
rfe.fit(self.X, self.y)
# Create a mask: remaining column names
mask = rfe.support_
# Apply the mask to the feature dataset X
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
def ensemble(self, models, n_features_to_select, mask=None):
""" Feature selection method which uses ensembles to select features.
Parameters
----------
models : list of sklearn models
List of models.
n_features_to_select : int
Number of features to be selected.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
# MODEL1
gbc = GradientBoostingClassifier()
# MODEL2
lda = LinearDiscriminantAnalysis(n_components=2)
models={'GBC': gbc, 'LDA': lda}
reduced_df, mask = f.ensemble(df, models, n_features_to_select=493, mask=None)
X_test_new_reduced = f.ensemble(X_test_new, n_features_to_select=493, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
rfe_masks = []
for modelname, model in zip(models.keys(), models.values()):
print(f"RFE using the model: {modelname}")
# Select n_features_to_selec with RFE on a GradientBoostingRegressor, drop 3 features on each step
rfe_modelname = RFE(estimator=model, n_features_to_select=n_features_to_select, step=1, verbose=0)
rfe_modelname.fit(self.X, self.y)
# Assign the support array to gb_mask
mask_modelname = rfe_modelname.support_
rfe_masks.append(mask_modelname)
# Sum the votes of the models
n_models = len(rfe_masks)
votes = np.sum(rfe_masks, axis=0)
# Create a mask for features selected by all 2 models
meta_mask = votes >= n_models
# Apply the dimensionality reduction on X
reduced_X = self.X.loc[:, meta_mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, meta_mask
def tree_based(self, threshold, mask=None):
""" Feature selection method which uses trees to select features.
Parameters
----------
threshold : int
Threshold based on which the features will be selected.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Example
-------
threshold=0.0016
reduced_df, mask = tree_based(df, threshold=threshold, mask=None)
X_test_new_reduced = tree_based(X_test_new, threshold=threshold, mask=mask)
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df.loc[:, mask]
return reduced_df
# Fit the random forest model to the training data
rf = RandomForestClassifier(random_state=42)
rf.fit(self.X, self.y)
# Print the importances per feature
# for unimportant features – almost 0
# better than RFE, since the resulting values here r comparable bn features by default, cuz always sum to 1
# => DON’T NEED TO SCALE THE DATA
# Create a mask for features importances above the threshold
mask = rf.feature_importances_ >= threshold
# Apply the mask to the feature dataset X to implement the feature selection
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
# MUST BE CAREFUL WITH DROPPING SEVERAL FEATURES AT ONCE, BETTER DO IT ONE BY ONE USING RFE
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
def extra_trees(self, st_scaler=True, mask=None):
""" Ferature selection method based on extra trees classifier.
Parameters
----------
st_scaler : default=True
True if standard scaler should be used.
mask : default=None
Existing feature selection filter, which can be used to select features on testing dataset.
Returns
-------
If mask is None:
reduced_df : pd.DataFrame
Dataframe with selected features.
If mask is not None:
reduced_df : pd.DataFrame
Dataframe with selected features.
mask :
Feature selection filter.
"""
if mask is not None:
# Apply the mask to the feature dataset X
reduced_df = self.df[mask]
return reduced_df
if st_scaler:
# scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(self.X)
else:
X_scaled = self.X
# extract feature importances
model = ExtraTreesClassifier(random_state=42)
model.fit(X_scaled, self.y)
importances = pd.DataFrame(model.feature_importances_)
# Select only the features which have an importance bigger than the mean importance of the whole dataset
sfm = SelectFromModel(model, threshold=importances.mean())
sfm.fit(X_scaled, self.y)
# Create a mask for features importances
feature_idx = sfm.get_support()
mask = self.X.columns[feature_idx]
# Apply the mask to the feature dataset X to implement the feature selection
reduced_X = self.X.loc[:, mask]
reduced_df = pd.concat([reduced_X, self.y], axis=1)
print(f"Dimensionality reduced from {self.df.shape[1]} to {reduced_df.shape[1]-1}.")
return reduced_df, mask
# def L1(self):
# """ """
# pass
# class FeatureEngineeringPipeline:
# """ """
# def __init__(self, df):
# self.df = df
#
# def normalization(self):
# """ """
# pass
#
# def standartization(self):
# """ """
# pass
#
# def _imputation(self, how):
# """how - one of the following:
# - average,
# - same value outside of normal range,
# - value from the middle of the range,
# - use the missing value as target for regression problem,
# - increase dimensionality by adding a binary indicator feature for each feature with missing values
#
# Parameters
# ----------
# how :
#
#
# Returns
# -------
#
# """
#
# pass
#
# def missing(self, remove=True, impute=False, learn=False):
# """
#
# Parameters
# ----------
# remove :
# (Default value = True)
# impute :
# (Default value = False)
# learn :
# (Default value = False)
#
# Returns
# -------
#
# """
#
# if impute:
# result = self._imputation(how='method')
#
# return result
#
# def normalization(self):
# """ """
# pass
if __name__ == '__main__':
test_features()
| en | 0.73475 | A pipeline for feature selection Note: Assumes the response is the last column in df. Parameters ---------- df : pd.DataFrame Dataframe used for feature selection (includes features and the response). Feature selection based on low variance. Parameters ---------- threshold : float Threshold against which the variance is calculated. Example ------- reduced_df = low_variance(df, 0.01) X_test_new_reduced = low_variance(X_test_new, 0.01) Returns ------- Dataframe with selected features. # Normalize the data # Create a VarianceThreshold feature selector # Fit the selector to normalized df # because higher values may have higher variances => need to adjust for that # Create a boolean mask: gives True/False value on if each feature’s Var > threshold # Apply the mask to create a reduced dataframe Recursive Feature Elimination based on random forest classifier. Parameters ---------- n_features_to_select : int Number of features to be selected. step : int How many features to remove at each step. mask : default=None Existing feature selection filter, which can be used to select features on testing dataset. Example ------- n_features_to_select=300 reduced_df, mask = RFE_selection(df, n_features_to_select=n_features_to_select, step=1, mask=None) X_test_new_reduced = RFE_selection(X_test_new, n_features_to_select=n_features_to_select, step=1, mask=mask) Returns ------- If mask is None: reduced_df : pd.DataFrame Dataframe with selected features. If mask is not None: reduced_df : pd.DataFrame Dataframe with selected features. mask : Feature selection filter. # Apply the mask to the feature dataset X # DROP THE LEAST IMPORTANT FEATURES ONE BY ONE # Set the feature eliminator to remove 2 features on each step # Fit the model to the training data # Create a mask: remaining column names # Apply the mask to the feature dataset X Feature selection method which uses ensembles to select features. Parameters ---------- models : list of sklearn models List of models. n_features_to_select : int Number of features to be selected. mask : default=None Existing feature selection filter, which can be used to select features on testing dataset. Example ------- # MODEL1 gbc = GradientBoostingClassifier() # MODEL2 lda = LinearDiscriminantAnalysis(n_components=2) models={'GBC': gbc, 'LDA': lda} reduced_df, mask = f.ensemble(df, models, n_features_to_select=493, mask=None) X_test_new_reduced = f.ensemble(X_test_new, n_features_to_select=493, mask=mask) Returns ------- If mask is None: reduced_df : pd.DataFrame Dataframe with selected features. If mask is not None: reduced_df : pd.DataFrame Dataframe with selected features. mask : Feature selection filter. # Apply the mask to the feature dataset X # Select n_features_to_selec with RFE on a GradientBoostingRegressor, drop 3 features on each step # Assign the support array to gb_mask # Sum the votes of the models # Create a mask for features selected by all 2 models # Apply the dimensionality reduction on X Feature selection method which uses trees to select features. Parameters ---------- threshold : int Threshold based on which the features will be selected. mask : default=None Existing feature selection filter, which can be used to select features on testing dataset. Example ------- threshold=0.0016 reduced_df, mask = tree_based(df, threshold=threshold, mask=None) X_test_new_reduced = tree_based(X_test_new, threshold=threshold, mask=mask) Returns ------- If mask is None: reduced_df : pd.DataFrame Dataframe with selected features. If mask is not None: reduced_df : pd.DataFrame Dataframe with selected features. mask : Feature selection filter. # Apply the mask to the feature dataset X # Fit the random forest model to the training data # Print the importances per feature # for unimportant features – almost 0 # better than RFE, since the resulting values here r comparable bn features by default, cuz always sum to 1 # => DON’T NEED TO SCALE THE DATA # Create a mask for features importances above the threshold # Apply the mask to the feature dataset X to implement the feature selection # MUST BE CAREFUL WITH DROPPING SEVERAL FEATURES AT ONCE, BETTER DO IT ONE BY ONE USING RFE Ferature selection method based on extra trees classifier. Parameters ---------- st_scaler : default=True True if standard scaler should be used. mask : default=None Existing feature selection filter, which can be used to select features on testing dataset. Returns ------- If mask is None: reduced_df : pd.DataFrame Dataframe with selected features. If mask is not None: reduced_df : pd.DataFrame Dataframe with selected features. mask : Feature selection filter. # Apply the mask to the feature dataset X # scaling # extract feature importances # Select only the features which have an importance bigger than the mean importance of the whole dataset # Create a mask for features importances # Apply the mask to the feature dataset X to implement the feature selection # def L1(self): # """ """ # pass # class FeatureEngineeringPipeline: # """ """ # def __init__(self, df): # self.df = df # # def normalization(self): # """ """ # pass # # def standartization(self): # """ """ # pass # # def _imputation(self, how): # """how - one of the following: # - average, # - same value outside of normal range, # - value from the middle of the range, # - use the missing value as target for regression problem, # - increase dimensionality by adding a binary indicator feature for each feature with missing values # # Parameters # ---------- # how : # # # Returns # ------- # # """ # # pass # # def missing(self, remove=True, impute=False, learn=False): # """ # # Parameters # ---------- # remove : # (Default value = True) # impute : # (Default value = False) # learn : # (Default value = False) # # Returns # ------- # # """ # # if impute: # result = self._imputation(how='method') # # return result # # def normalization(self): # """ """ # pass | 3.609996 | 4 |
CursoemVideo/ex027.py | arthxvr/coding--python | 0 | 6620958 | <gh_stars>0
nome = str(input('Nome completo: ')).strip()
print(f'Primeiro nome = {nome.split()[0]}')
print(f'Último nome = {nome.split()[-1]}')
| nome = str(input('Nome completo: ')).strip()
print(f'Primeiro nome = {nome.split()[0]}')
print(f'Último nome = {nome.split()[-1]}') | none | 1 | 3.704049 | 4 | |
code/utils/scripts/outliers_script.py | berkeley-stat159/project-alpha | 4 | 6620959 | """
Script to identify outliers for each subject.
Compares the mean MRSS values from running GLM on the basic np.convolve convolved time course,
before and after dropping the outliers.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import nibabel as nib
import os
import sys
import pandas as pd
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single
from outliers import *
sub_list = os.listdir(path_to_data)
sub_list = [i for i in sub_list if 'sub' in i]
# List to store the mean MRSS values before and after outlier removal
MRSSvals = []
# saving to compare number of cuts in the beginning
num_cut=np.zeros(len(sub_list))
i=0
# Loop through all the subjects.
for name in sub_list:
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
# Load conditions.
condition_location = path_to_data+ name+ "/model/model001/onsets/task001_run001/"
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
num_cut[i]=num_TR_cut
i+=1
data = data[...,num_TR_cut:]
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
# Get the convolved time course from np.convolve
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
np_hrf=convolved[:n_vols]
# mean MRSS values before and after dropping outliers.
MRSSvals.append((name,) + compare_outliers(data, np_hrf))
#np.savetxt("outlierMRSSvals.txt",MRSSvals)
print(MRSSvals)
'''
By and large, mean MRSS doesn't seem to shift much before and after dropping outliers.
'''
| """
Script to identify outliers for each subject.
Compares the mean MRSS values from running GLM on the basic np.convolve convolved time course,
before and after dropping the outliers.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import nibabel as nib
import os
import sys
import pandas as pd
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single
from outliers import *
sub_list = os.listdir(path_to_data)
sub_list = [i for i in sub_list if 'sub' in i]
# List to store the mean MRSS values before and after outlier removal
MRSSvals = []
# saving to compare number of cuts in the beginning
num_cut=np.zeros(len(sub_list))
i=0
# Loop through all the subjects.
for name in sub_list:
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
# Load conditions.
condition_location = path_to_data+ name+ "/model/model001/onsets/task001_run001/"
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
num_cut[i]=num_TR_cut
i+=1
data = data[...,num_TR_cut:]
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
# Get the convolved time course from np.convolve
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
np_hrf=convolved[:n_vols]
# mean MRSS values before and after dropping outliers.
MRSSvals.append((name,) + compare_outliers(data, np_hrf))
#np.savetxt("outlierMRSSvals.txt",MRSSvals)
print(MRSSvals)
'''
By and large, mean MRSS doesn't seem to shift much before and after dropping outliers.
'''
| en | 0.851229 | Script to identify outliers for each subject. Compares the mean MRSS values from running GLM on the basic np.convolve convolved time course, before and after dropping the outliers. # Relative paths to project and data. # List to store the mean MRSS values before and after outlier removal # saving to compare number of cuts in the beginning # Loop through all the subjects. # amount of beginning TRs not standardized at 6 # Load image data. # Load conditions. # Drop the appropriate number of volumes from the beginning. # initial needed values # creating the .txt file for the events2neural function # Get the convolved time course from np.convolve # hrf_at_trs sample data # mean MRSS values before and after dropping outliers. #np.savetxt("outlierMRSSvals.txt",MRSSvals) By and large, mean MRSS doesn't seem to shift much before and after dropping outliers. | 2.69921 | 3 |
Advanced_algorithm/oj_homework3/4.py | mndream/MyOJ | 1 | 6620960 | """
棋盘覆盖问题:
给定一个大小为2^n * 2^n个小方格的棋盘,其中有一个位置已经被填充,
现在要用一个L型(2 * 2个小方格组成的大方格中去掉其中一个小方格)形状去覆盖剩下的小方格。
求出覆盖方案,即哪些坐标下的小方格使用同一个L型格子覆盖。注意:坐标从0开始。
左上方的第一个格子坐标为(0,0),第一行第二个坐标为(0,1),第二行第一个为(1,0),以此类推。
输入: 第一行为测试用例个数,后面每一个用例有两行,
第一行为n值和特殊的格子的坐标(用空格隔开),
第二行为需要查找其属于同一个L型格子的格子坐标。
输出: 每一行为一个用例的解,先按照行值从小到大、
再按照列值从小到大的顺序输出每一个用例的两个坐标;用逗号隔开。
输入样例
1
1 1 1
0 0
输出样例
0 1,1 0
"""
'''
将2^k * 2^k问题分为4个规模为2^(k-1) * 2^(k-1)的子问题
'''
def chess(tr, tc, dr, dc, size, arr):
if size == 1:
return
global num
num += 1
t = num
s = int(size / 2)
# 左上
if dr < tr + s and dc < tc + s: # 特殊点在左上方
chess(tr, tc, dr, dc, s, arr)
else: # 将右下角涂t
arr[tr + s - 1][tc + s - 1] = t
chess(tr, tc, tr + s - 1, tc + s - 1, s, arr)
# 右上
if dr < tr + s and dc >= tc + s: # 特殊点在右上方
chess(tr, tc + s, dr, dc, s, arr)
else: # 将左下角涂t
arr[tr + s - 1][tc + s] = t
chess(tr, tc + s, tr + s - 1, tc + s, s, arr)
# 左下
if dr >= tr + s and dc < tc + s: # 特殊点在左下方
chess(tr + s, tc, dr, dc, s, arr)
else: # 将右上角涂t
arr[tr + s][tc + s - 1] = t
chess(tr + s, tc, tr + s, tc + s - 1, s, arr)
# 右下
if dr >= tr + s and dc >= tc + s: # 特殊点在右下方
chess(tr + s, tc + s, dr, dc, s, arr)
else: # 将左上角涂t
arr[tr + s][tc + s] = t
chess(tr + s, tc + s, tr + s, tc + s, s, arr)
if __name__ == '__main__':
N = int(input())
for k in range(N):
inputList = list(map(int, input().split()))
N = inputList[0]
s_node = inputList[1:] # 特殊格子坐标
f_node = list(map(int, input().split())) # 查找格子坐标
size = pow(2, N)
num = 0
arr = [[0 for col in range(size)] for row in range(size)]
chess(0, 0, s_node[0], s_node[1], size, arr)
count = 0
res = ""
for i in range(size):
for j in range(size):
if arr[i][j] == arr[f_node[0]][f_node[1]]:
if not (i == f_node[0] and j == f_node[1]):
if count == 0:
res += str(i) + " " + str(j) + ","
elif count == 1:
res += str(i) + " " + str(j)
count += 1
print(res)
| """
棋盘覆盖问题:
给定一个大小为2^n * 2^n个小方格的棋盘,其中有一个位置已经被填充,
现在要用一个L型(2 * 2个小方格组成的大方格中去掉其中一个小方格)形状去覆盖剩下的小方格。
求出覆盖方案,即哪些坐标下的小方格使用同一个L型格子覆盖。注意:坐标从0开始。
左上方的第一个格子坐标为(0,0),第一行第二个坐标为(0,1),第二行第一个为(1,0),以此类推。
输入: 第一行为测试用例个数,后面每一个用例有两行,
第一行为n值和特殊的格子的坐标(用空格隔开),
第二行为需要查找其属于同一个L型格子的格子坐标。
输出: 每一行为一个用例的解,先按照行值从小到大、
再按照列值从小到大的顺序输出每一个用例的两个坐标;用逗号隔开。
输入样例
1
1 1 1
0 0
输出样例
0 1,1 0
"""
'''
将2^k * 2^k问题分为4个规模为2^(k-1) * 2^(k-1)的子问题
'''
def chess(tr, tc, dr, dc, size, arr):
if size == 1:
return
global num
num += 1
t = num
s = int(size / 2)
# 左上
if dr < tr + s and dc < tc + s: # 特殊点在左上方
chess(tr, tc, dr, dc, s, arr)
else: # 将右下角涂t
arr[tr + s - 1][tc + s - 1] = t
chess(tr, tc, tr + s - 1, tc + s - 1, s, arr)
# 右上
if dr < tr + s and dc >= tc + s: # 特殊点在右上方
chess(tr, tc + s, dr, dc, s, arr)
else: # 将左下角涂t
arr[tr + s - 1][tc + s] = t
chess(tr, tc + s, tr + s - 1, tc + s, s, arr)
# 左下
if dr >= tr + s and dc < tc + s: # 特殊点在左下方
chess(tr + s, tc, dr, dc, s, arr)
else: # 将右上角涂t
arr[tr + s][tc + s - 1] = t
chess(tr + s, tc, tr + s, tc + s - 1, s, arr)
# 右下
if dr >= tr + s and dc >= tc + s: # 特殊点在右下方
chess(tr + s, tc + s, dr, dc, s, arr)
else: # 将左上角涂t
arr[tr + s][tc + s] = t
chess(tr + s, tc + s, tr + s, tc + s, s, arr)
if __name__ == '__main__':
N = int(input())
for k in range(N):
inputList = list(map(int, input().split()))
N = inputList[0]
s_node = inputList[1:] # 特殊格子坐标
f_node = list(map(int, input().split())) # 查找格子坐标
size = pow(2, N)
num = 0
arr = [[0 for col in range(size)] for row in range(size)]
chess(0, 0, s_node[0], s_node[1], size, arr)
count = 0
res = ""
for i in range(size):
for j in range(size):
if arr[i][j] == arr[f_node[0]][f_node[1]]:
if not (i == f_node[0] and j == f_node[1]):
if count == 0:
res += str(i) + " " + str(j) + ","
elif count == 1:
res += str(i) + " " + str(j)
count += 1
print(res)
| zh | 0.993872 | 棋盘覆盖问题: 给定一个大小为2^n * 2^n个小方格的棋盘,其中有一个位置已经被填充, 现在要用一个L型(2 * 2个小方格组成的大方格中去掉其中一个小方格)形状去覆盖剩下的小方格。 求出覆盖方案,即哪些坐标下的小方格使用同一个L型格子覆盖。注意:坐标从0开始。 左上方的第一个格子坐标为(0,0),第一行第二个坐标为(0,1),第二行第一个为(1,0),以此类推。 输入: 第一行为测试用例个数,后面每一个用例有两行, 第一行为n值和特殊的格子的坐标(用空格隔开), 第二行为需要查找其属于同一个L型格子的格子坐标。 输出: 每一行为一个用例的解,先按照行值从小到大、 再按照列值从小到大的顺序输出每一个用例的两个坐标;用逗号隔开。 输入样例 1 1 1 1 0 0 输出样例 0 1,1 0 将2^k * 2^k问题分为4个规模为2^(k-1) * 2^(k-1)的子问题 # 左上 # 特殊点在左上方 # 将右下角涂t # 右上 # 特殊点在右上方 # 将左下角涂t # 左下 # 特殊点在左下方 # 将右上角涂t # 右下 # 特殊点在右下方 # 将左上角涂t # 特殊格子坐标 # 查找格子坐标 | 4.150178 | 4 |
features/steps/user_steps.py | gekkeharry13/api-python | 1 | 6620961 | from behave import when, then
import conjur
@when("I create a user")
def step_create_user(context):
context.user_id = context.random_string('api-python-user')
context.user = context.api.create_user(context.user_id)
@when('I create a user "{name}"')
def step_creaet_user_named(context, name):
context.user_id = context.random_string(name)
context.user = context.api.create_user(context.user_id)
@then("I can login as the user using the api key")
def step_login_as_user(context):
user_api = context.user_api = conjur.new_from_key(
context.user_id, context.user.api_key, context.api.config)
user_api.authenticate(False)
@when(u'I create a user with a password')
def step_create_user_with_password(context):
context.user_id = context.random_string("api-python-user")
context.password = context.random_string('<PASSWORD>', 30)
context.user = context.api.create_user(context.user_id, context.password)
@then(u'I can login as the user using the password')
def step_login_as_user_with_password(context):
user_api = conjur.new_from_key(context.user_id, context.password,
context.api.config)
user_api.authenticate(False)
| from behave import when, then
import conjur
@when("I create a user")
def step_create_user(context):
context.user_id = context.random_string('api-python-user')
context.user = context.api.create_user(context.user_id)
@when('I create a user "{name}"')
def step_creaet_user_named(context, name):
context.user_id = context.random_string(name)
context.user = context.api.create_user(context.user_id)
@then("I can login as the user using the api key")
def step_login_as_user(context):
user_api = context.user_api = conjur.new_from_key(
context.user_id, context.user.api_key, context.api.config)
user_api.authenticate(False)
@when(u'I create a user with a password')
def step_create_user_with_password(context):
context.user_id = context.random_string("api-python-user")
context.password = context.random_string('<PASSWORD>', 30)
context.user = context.api.create_user(context.user_id, context.password)
@then(u'I can login as the user using the password')
def step_login_as_user_with_password(context):
user_api = conjur.new_from_key(context.user_id, context.password,
context.api.config)
user_api.authenticate(False)
| none | 1 | 2.67096 | 3 | |
main.py | fablab-wue/MicroPythinShow | 1 | 6620962 | import network
import time
# load WLAN credentials
from WLAN_PW import SSID, PASSWORD
# define function to start network
def net():
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
if not sta_if.isconnected(): # Check if connected
print('Connecting to WiFi "{}"'.format(SSID))
sta_if.connect(SSID, PASSWORD) # Connect to an AP
while not sta_if.isconnected(): # Check for successful connection
print(".", end='')
time.sleep(1)
print()
print(sta_if.ifconfig())
# load CLI commands
from upysh import *
import os, time, machine, onewire, ds18x20, network
from machine import PWM, Pin, ADC
import urequests as requests | import network
import time
# load WLAN credentials
from WLAN_PW import SSID, PASSWORD
# define function to start network
def net():
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
if not sta_if.isconnected(): # Check if connected
print('Connecting to WiFi "{}"'.format(SSID))
sta_if.connect(SSID, PASSWORD) # Connect to an AP
while not sta_if.isconnected(): # Check for successful connection
print(".", end='')
time.sleep(1)
print()
print(sta_if.ifconfig())
# load CLI commands
from upysh import *
import os, time, machine, onewire, ds18x20, network
from machine import PWM, Pin, ADC
import urequests as requests | en | 0.7921 | # load WLAN credentials # define function to start network # Check if connected # Connect to an AP # Check for successful connection # load CLI commands | 2.932592 | 3 |
cloudmersive_nlp_api_client/models/word_position.py | Cloudmersive/Cloudmersive.APIClient.Python.NLP | 1 | 6620963 | # coding: utf-8
"""
nlpapiv2
The powerful Natural Language Processing APIs (v2) let you perform part of speech tagging, entity identification, sentence parsing, and much more to help you understand the meaning of unstructured text. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WordPosition(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'word': 'str',
'word_index': 'int',
'start_position': 'int',
'end_position': 'int'
}
attribute_map = {
'word': 'Word',
'word_index': 'WordIndex',
'start_position': 'StartPosition',
'end_position': 'EndPosition'
}
def __init__(self, word=None, word_index=None, start_position=None, end_position=None): # noqa: E501
"""WordPosition - a model defined in Swagger""" # noqa: E501
self._word = None
self._word_index = None
self._start_position = None
self._end_position = None
self.discriminator = None
if word is not None:
self.word = word
if word_index is not None:
self.word_index = word_index
if start_position is not None:
self.start_position = start_position
if end_position is not None:
self.end_position = end_position
@property
def word(self):
"""Gets the word of this WordPosition. # noqa: E501
Word as a string # noqa: E501
:return: The word of this WordPosition. # noqa: E501
:rtype: str
"""
return self._word
@word.setter
def word(self, word):
"""Sets the word of this WordPosition.
Word as a string # noqa: E501
:param word: The word of this WordPosition. # noqa: E501
:type: str
"""
self._word = word
@property
def word_index(self):
"""Gets the word_index of this WordPosition. # noqa: E501
Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501
:return: The word_index of this WordPosition. # noqa: E501
:rtype: int
"""
return self._word_index
@word_index.setter
def word_index(self, word_index):
"""Sets the word_index of this WordPosition.
Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501
:param word_index: The word_index of this WordPosition. # noqa: E501
:type: int
"""
self._word_index = word_index
@property
def start_position(self):
"""Gets the start_position of this WordPosition. # noqa: E501
Zero-based character offset at which the word begins in the input string # noqa: E501
:return: The start_position of this WordPosition. # noqa: E501
:rtype: int
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this WordPosition.
Zero-based character offset at which the word begins in the input string # noqa: E501
:param start_position: The start_position of this WordPosition. # noqa: E501
:type: int
"""
self._start_position = start_position
@property
def end_position(self):
"""Gets the end_position of this WordPosition. # noqa: E501
Zero-based character offset at which the word ends in the input string # noqa: E501
:return: The end_position of this WordPosition. # noqa: E501
:rtype: int
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this WordPosition.
Zero-based character offset at which the word ends in the input string # noqa: E501
:param end_position: The end_position of this WordPosition. # noqa: E501
:type: int
"""
self._end_position = end_position
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WordPosition, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WordPosition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
nlpapiv2
The powerful Natural Language Processing APIs (v2) let you perform part of speech tagging, entity identification, sentence parsing, and much more to help you understand the meaning of unstructured text. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WordPosition(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'word': 'str',
'word_index': 'int',
'start_position': 'int',
'end_position': 'int'
}
attribute_map = {
'word': 'Word',
'word_index': 'WordIndex',
'start_position': 'StartPosition',
'end_position': 'EndPosition'
}
def __init__(self, word=None, word_index=None, start_position=None, end_position=None): # noqa: E501
"""WordPosition - a model defined in Swagger""" # noqa: E501
self._word = None
self._word_index = None
self._start_position = None
self._end_position = None
self.discriminator = None
if word is not None:
self.word = word
if word_index is not None:
self.word_index = word_index
if start_position is not None:
self.start_position = start_position
if end_position is not None:
self.end_position = end_position
@property
def word(self):
"""Gets the word of this WordPosition. # noqa: E501
Word as a string # noqa: E501
:return: The word of this WordPosition. # noqa: E501
:rtype: str
"""
return self._word
@word.setter
def word(self, word):
"""Sets the word of this WordPosition.
Word as a string # noqa: E501
:param word: The word of this WordPosition. # noqa: E501
:type: str
"""
self._word = word
@property
def word_index(self):
"""Gets the word_index of this WordPosition. # noqa: E501
Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501
:return: The word_index of this WordPosition. # noqa: E501
:rtype: int
"""
return self._word_index
@word_index.setter
def word_index(self, word_index):
"""Sets the word_index of this WordPosition.
Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501
:param word_index: The word_index of this WordPosition. # noqa: E501
:type: int
"""
self._word_index = word_index
@property
def start_position(self):
"""Gets the start_position of this WordPosition. # noqa: E501
Zero-based character offset at which the word begins in the input string # noqa: E501
:return: The start_position of this WordPosition. # noqa: E501
:rtype: int
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this WordPosition.
Zero-based character offset at which the word begins in the input string # noqa: E501
:param start_position: The start_position of this WordPosition. # noqa: E501
:type: int
"""
self._start_position = start_position
@property
def end_position(self):
"""Gets the end_position of this WordPosition. # noqa: E501
Zero-based character offset at which the word ends in the input string # noqa: E501
:return: The end_position of this WordPosition. # noqa: E501
:rtype: int
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this WordPosition.
Zero-based character offset at which the word ends in the input string # noqa: E501
:param end_position: The end_position of this WordPosition. # noqa: E501
:type: int
"""
self._end_position = end_position
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WordPosition, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WordPosition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.745192 | # coding: utf-8 nlpapiv2 The powerful Natural Language Processing APIs (v2) let you perform part of speech tagging, entity identification, sentence parsing, and much more to help you understand the meaning of unstructured text. # noqa: E501 OpenAPI spec version: v1 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 WordPosition - a model defined in Swagger # noqa: E501 Gets the word of this WordPosition. # noqa: E501 Word as a string # noqa: E501 :return: The word of this WordPosition. # noqa: E501 :rtype: str Sets the word of this WordPosition. Word as a string # noqa: E501 :param word: The word of this WordPosition. # noqa: E501 :type: str Gets the word_index of this WordPosition. # noqa: E501 Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501 :return: The word_index of this WordPosition. # noqa: E501 :rtype: int Sets the word_index of this WordPosition. Zero-based index of the word; first word has index 0, second word has index 1 and so on # noqa: E501 :param word_index: The word_index of this WordPosition. # noqa: E501 :type: int Gets the start_position of this WordPosition. # noqa: E501 Zero-based character offset at which the word begins in the input string # noqa: E501 :return: The start_position of this WordPosition. # noqa: E501 :rtype: int Sets the start_position of this WordPosition. Zero-based character offset at which the word begins in the input string # noqa: E501 :param start_position: The start_position of this WordPosition. # noqa: E501 :type: int Gets the end_position of this WordPosition. # noqa: E501 Zero-based character offset at which the word ends in the input string # noqa: E501 :return: The end_position of this WordPosition. # noqa: E501 :rtype: int Sets the end_position of this WordPosition. Zero-based character offset at which the word ends in the input string # noqa: E501 :param end_position: The end_position of this WordPosition. # noqa: E501 :type: int Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 2.681377 | 3 |
config/flask_config.py | iqDF/receiptbook-service | 1 | 6620964 | import os
SQLALCHEMY_DATABASE_URI = 'sqlite:////home/iqdf/Development/OSSHackathon/test.db' | import os
SQLALCHEMY_DATABASE_URI = 'sqlite:////home/iqdf/Development/OSSHackathon/test.db' | none | 1 | 1.207754 | 1 | |
xdgconfig/__init__.py | Dogeek/pyconf | 1 | 6620965 | # flake8: noqa
from platform import system
import xdgconfig.mixins as mixins
__version__ = '1.2.1'
if system() == 'Windows':
from xdgconfig.config_win import WinConfig as Config
elif system() in ('Darwin', 'Linux') or system().startswith('CYGWIN'):
from xdgconfig.config_unix import UnixConfig as Config
else:
raise ImportError(
"xdgconfig is not available on this platform : %s" % system()
)
from xdgconfig.config import LocalConfig
class JsonConfig(mixins.JsonMixin, Config):
...
class LocalJsonConfig(mixins.JsonMixin, LocalConfig):
...
class IniConfig(mixins.IniMixin, Config):
...
class LocalIniConfig(mixins.IniMixin, LocalConfig):
...
if hasattr(mixins, 'XmlMixin'):
class XmlConfig(mixins.XmlMixin, Config):
...
class LocalXmlConfig(mixins.XmlMixin, LocalConfig):
...
if hasattr(mixins, 'YamlMixin'):
class YamlConfig(mixins.YamlMixin, Config):
...
class LocalYamlConfig(mixins.YamlMixin, LocalConfig):
...
if hasattr(mixins, 'TomlMixin'):
class TomlConfig(mixins.TomlMixin, Config):
...
class LocalTomlConfig(mixins.TomlMixin, LocalConfig):
...
| # flake8: noqa
from platform import system
import xdgconfig.mixins as mixins
__version__ = '1.2.1'
if system() == 'Windows':
from xdgconfig.config_win import WinConfig as Config
elif system() in ('Darwin', 'Linux') or system().startswith('CYGWIN'):
from xdgconfig.config_unix import UnixConfig as Config
else:
raise ImportError(
"xdgconfig is not available on this platform : %s" % system()
)
from xdgconfig.config import LocalConfig
class JsonConfig(mixins.JsonMixin, Config):
...
class LocalJsonConfig(mixins.JsonMixin, LocalConfig):
...
class IniConfig(mixins.IniMixin, Config):
...
class LocalIniConfig(mixins.IniMixin, LocalConfig):
...
if hasattr(mixins, 'XmlMixin'):
class XmlConfig(mixins.XmlMixin, Config):
...
class LocalXmlConfig(mixins.XmlMixin, LocalConfig):
...
if hasattr(mixins, 'YamlMixin'):
class YamlConfig(mixins.YamlMixin, Config):
...
class LocalYamlConfig(mixins.YamlMixin, LocalConfig):
...
if hasattr(mixins, 'TomlMixin'):
class TomlConfig(mixins.TomlMixin, Config):
...
class LocalTomlConfig(mixins.TomlMixin, LocalConfig):
...
| it | 0.238973 | # flake8: noqa | 2.234133 | 2 |
src/genie/libs/parser/iosxe/tests/ShowProcessesCpu/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 6620966 | <filename>src/genie/libs/parser/iosxe/tests/ShowProcessesCpu/cli/equal/golden_output_1_expected.py
expected_output = {
"sort": {
1: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 1,
"one_min_cpu": 0.0,
"pid": 100,
"process": "cpf_process_tpQ",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
2: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 1,
"one_min_cpu": 0.0,
"pid": 189,
"process": "ADJ NSF process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
3: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 365006,
"one_min_cpu": 0.0,
"pid": 244,
"process": "cdp init process",
"runtime": 2930,
"tty": 0,
"usecs": 8,
},
4: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 2,
"one_min_cpu": 0.0,
"pid": 309,
"process": "XDR FOF process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
5: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 712805,
"one_min_cpu": 0.0,
"pid": 355,
"process": "Inspect process",
"runtime": 9385,
"tty": 0,
"usecs": 13,
},
6: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 3,
"one_min_cpu": 0.0,
"pid": 429,
"process": "LDAP process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
7: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 73010,
"one_min_cpu": 0.0,
"pid": 547,
"process": "VDC process",
"runtime": 592,
"tty": 0,
"usecs": 8,
},
8: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 730005,
"one_min_cpu": 0.0,
"pid": 615,
"process": "SBC main process",
"runtime": 19084,
"tty": 0,
"usecs": 26,
},
},
"zero_cpu_processes": [
"cpf_process_tpQ",
"ADJ NSF process",
"cdp init process",
"XDR FOF process",
"Inspect process",
"LDAP process",
"VDC process",
"SBC main process",
],
}
| <filename>src/genie/libs/parser/iosxe/tests/ShowProcessesCpu/cli/equal/golden_output_1_expected.py
expected_output = {
"sort": {
1: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 1,
"one_min_cpu": 0.0,
"pid": 100,
"process": "cpf_process_tpQ",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
2: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 1,
"one_min_cpu": 0.0,
"pid": 189,
"process": "ADJ NSF process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
3: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 365006,
"one_min_cpu": 0.0,
"pid": 244,
"process": "cdp init process",
"runtime": 2930,
"tty": 0,
"usecs": 8,
},
4: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 2,
"one_min_cpu": 0.0,
"pid": 309,
"process": "XDR FOF process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
5: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 712805,
"one_min_cpu": 0.0,
"pid": 355,
"process": "Inspect process",
"runtime": 9385,
"tty": 0,
"usecs": 13,
},
6: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 3,
"one_min_cpu": 0.0,
"pid": 429,
"process": "LDAP process",
"runtime": 0,
"tty": 0,
"usecs": 0,
},
7: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 73010,
"one_min_cpu": 0.0,
"pid": 547,
"process": "VDC process",
"runtime": 592,
"tty": 0,
"usecs": 8,
},
8: {
"five_min_cpu": 0.0,
"five_sec_cpu": 0.0,
"invoked": 730005,
"one_min_cpu": 0.0,
"pid": 615,
"process": "SBC main process",
"runtime": 19084,
"tty": 0,
"usecs": 26,
},
},
"zero_cpu_processes": [
"cpf_process_tpQ",
"ADJ NSF process",
"cdp init process",
"XDR FOF process",
"Inspect process",
"LDAP process",
"VDC process",
"SBC main process",
],
}
| none | 1 | 1.583647 | 2 | |
vmodel/dataset.py | lis-epfl/vmodel | 0 | 6620967 | import pickle
from datetime import datetime
import numpy as np
import pandas as pd
import xarray as xr
import yaml
from vmodel.util.util import clean_attrs
def generate_filename(args):
# Construct output file name
time_str = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
fnamedict = {
'agents': args.num_agents,
'runs': args.num_runs,
'times': args.num_timesteps,
'dist': args.ref_distance,
'perc': args.perception_radius,
'topo': args.max_agents,
'rngstd': args.range_std,
}
formatexts = {'netcdf': 'nc', 'pickle': 'pkl'}
args_str = '_'.join(f'{k}_{v}' for k, v in fnamedict.items())
return f'{time_str}_{args_str}.states.{formatexts[args.format]}'
def create_dataset(datas, args):
ds = xr.Dataset()
# Clean up attrs dict to be compatible with YAML and NETCDF
ds.attrs = clean_attrs(vars(args))
time = np.array(datas[0].time)
pos = np.array([d.pos for d in datas])
vel = np.array([d.vel for d in datas])
coord_run = np.arange(args.num_runs, dtype=int) + 1
coord_time = pd.to_timedelta(time, unit='s')
coord_agent = np.arange(args.num_agents, dtype=int) + 1
coord_space = np.array(['x', 'y'])
coords_rtas = {
'run': coord_run,
'time': coord_time,
'agent': coord_agent,
'space': coord_space
}
dapos = xr.DataArray(pos, dims=coords_rtas.keys(), coords=coords_rtas)
dapos.attrs['units'] = 'meters'
dapos.attrs['long_name'] = 'position'
ds['position'] = dapos
davel = xr.DataArray(vel, dims=coords_rtas.keys(), coords=coords_rtas)
davel.attrs['units'] = 'meters/second'
davel.attrs['long_name'] = 'velocity'
ds['velocity'] = davel
ds = ds.transpose('run', 'agent', 'space', 'time')
# Return only state (position and velocity)
if args.no_save_precomputed:
return ds
coords_rtaa = {
'run': coord_run,
'time': coord_time,
'agent': coord_agent,
'agent2': coord_agent
}
vis = np.array([d.vis for d in datas])
davis = xr.DataArray(vis, dims=coords_rtaa.keys(), coords=coords_rtaa)
davis.attrs['units'] = 'boolean'
davis.attrs['long_name'] = 'visibility'
ds['visibility'] = davis
# Tranpose to match data generated from Gazebo
ds = ds.transpose('run', 'agent', 'agent2', 'space', 'time')
return ds
def save_dataset(ds, fname, args):
if args.format == 'pickle':
with open(fname, 'wb') as f:
pickle.dump(ds, f, protocol=pickle.HIGHEST_PROTOCOL)
elif args.format == 'netcdf':
comp = dict(zlib=True, complevel=5)
encoding = None if args.no_compress else {v: comp for v in ds.data_vars}
ds.to_netcdf(fname, encoding=encoding)
with open(f'{fname}.yaml', 'w') as f:
yaml.dump(ds.attrs, f)
| import pickle
from datetime import datetime
import numpy as np
import pandas as pd
import xarray as xr
import yaml
from vmodel.util.util import clean_attrs
def generate_filename(args):
# Construct output file name
time_str = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
fnamedict = {
'agents': args.num_agents,
'runs': args.num_runs,
'times': args.num_timesteps,
'dist': args.ref_distance,
'perc': args.perception_radius,
'topo': args.max_agents,
'rngstd': args.range_std,
}
formatexts = {'netcdf': 'nc', 'pickle': 'pkl'}
args_str = '_'.join(f'{k}_{v}' for k, v in fnamedict.items())
return f'{time_str}_{args_str}.states.{formatexts[args.format]}'
def create_dataset(datas, args):
ds = xr.Dataset()
# Clean up attrs dict to be compatible with YAML and NETCDF
ds.attrs = clean_attrs(vars(args))
time = np.array(datas[0].time)
pos = np.array([d.pos for d in datas])
vel = np.array([d.vel for d in datas])
coord_run = np.arange(args.num_runs, dtype=int) + 1
coord_time = pd.to_timedelta(time, unit='s')
coord_agent = np.arange(args.num_agents, dtype=int) + 1
coord_space = np.array(['x', 'y'])
coords_rtas = {
'run': coord_run,
'time': coord_time,
'agent': coord_agent,
'space': coord_space
}
dapos = xr.DataArray(pos, dims=coords_rtas.keys(), coords=coords_rtas)
dapos.attrs['units'] = 'meters'
dapos.attrs['long_name'] = 'position'
ds['position'] = dapos
davel = xr.DataArray(vel, dims=coords_rtas.keys(), coords=coords_rtas)
davel.attrs['units'] = 'meters/second'
davel.attrs['long_name'] = 'velocity'
ds['velocity'] = davel
ds = ds.transpose('run', 'agent', 'space', 'time')
# Return only state (position and velocity)
if args.no_save_precomputed:
return ds
coords_rtaa = {
'run': coord_run,
'time': coord_time,
'agent': coord_agent,
'agent2': coord_agent
}
vis = np.array([d.vis for d in datas])
davis = xr.DataArray(vis, dims=coords_rtaa.keys(), coords=coords_rtaa)
davis.attrs['units'] = 'boolean'
davis.attrs['long_name'] = 'visibility'
ds['visibility'] = davis
# Tranpose to match data generated from Gazebo
ds = ds.transpose('run', 'agent', 'agent2', 'space', 'time')
return ds
def save_dataset(ds, fname, args):
if args.format == 'pickle':
with open(fname, 'wb') as f:
pickle.dump(ds, f, protocol=pickle.HIGHEST_PROTOCOL)
elif args.format == 'netcdf':
comp = dict(zlib=True, complevel=5)
encoding = None if args.no_compress else {v: comp for v in ds.data_vars}
ds.to_netcdf(fname, encoding=encoding)
with open(f'{fname}.yaml', 'w') as f:
yaml.dump(ds.attrs, f)
| en | 0.796745 | # Construct output file name # Clean up attrs dict to be compatible with YAML and NETCDF # Return only state (position and velocity) # Tranpose to match data generated from Gazebo | 2.265958 | 2 |
tests/component/records/targets/test_data_url.py | cwegrzyn/records-mover | 36 | 6620968 | import unittest
from records_mover.url.filesystem import FilesystemFileUrl
from records_mover.records.targets.data_url import DataUrlTarget
from records_mover.records.records_format import DelimitedRecordsFormat
class TestDataUrlTarget(unittest.TestCase):
def test_gzip_compression_inferred_no_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv.gz')
target = DataUrlTarget(url, records_format=None)
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
def test_no_compression_inferred_no_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url, records_format=None)
self.assertIsNone(target.records_format.hints['compression'])
def test_no_compression_inferred_bluelabs_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs'))
self.assertIsNone(target.records_format.hints['compression'])
def test_gzip_compression_inferred_bluelabs_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv.gz')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs'))
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
def test_no_compression_not_inferred_customized_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs',
hints={'compression': 'GZIP'}))
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
| import unittest
from records_mover.url.filesystem import FilesystemFileUrl
from records_mover.records.targets.data_url import DataUrlTarget
from records_mover.records.records_format import DelimitedRecordsFormat
class TestDataUrlTarget(unittest.TestCase):
def test_gzip_compression_inferred_no_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv.gz')
target = DataUrlTarget(url, records_format=None)
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
def test_no_compression_inferred_no_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url, records_format=None)
self.assertIsNone(target.records_format.hints['compression'])
def test_no_compression_inferred_bluelabs_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs'))
self.assertIsNone(target.records_format.hints['compression'])
def test_gzip_compression_inferred_bluelabs_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv.gz')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs'))
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
def test_no_compression_not_inferred_customized_records_format(self):
url = FilesystemFileUrl('file:///foo/bar/baz.csv')
target = DataUrlTarget(url,
records_format=DelimitedRecordsFormat(variant='bluelabs',
hints={'compression': 'GZIP'}))
self.assertEqual(target.records_format.hints['compression'], 'GZIP')
| none | 1 | 2.69883 | 3 | |
template.py | jo3-l/advent | 0 | 6620969 | from collections import defaultdict, Counter, deque
from functools import cache
import math
import re
import itertools
import os
from heapq import heappush, heappop
adj4 = ((0, -1), (0, 1), (1, 0), (-1, 0))
adj8 = ((1, 0), (-1, 0), (0, 1), (0, -1), (1, -1), (1, 1), (-1, 1), (-1, -1))
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
def make_indexer(lst, default=None):
def get(*indices):
cur = lst
for i in indices:
if 0 <= i < len(cur):
cur = cur[i]
else:
return default
return cur
return get
def solve(input):
...
cur_dir = os.path.dirname(os.path.realpath(__file__))
def normalize_str(s):
return "\n".join([l.strip() for l in s.strip().splitlines()])
print("SAMPLE OUTPUT")
with open(os.path.join(cur_dir, "sample.txt")) as file:
print(solve(normalize_str(file.read())))
print("---")
print("OUTPUT")
with open(os.path.join(cur_dir, "input.txt")) as file:
print(solve(normalize_str(file.read())))
| from collections import defaultdict, Counter, deque
from functools import cache
import math
import re
import itertools
import os
from heapq import heappush, heappop
adj4 = ((0, -1), (0, 1), (1, 0), (-1, 0))
adj8 = ((1, 0), (-1, 0), (0, 1), (0, -1), (1, -1), (1, 1), (-1, 1), (-1, -1))
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
def make_indexer(lst, default=None):
def get(*indices):
cur = lst
for i in indices:
if 0 <= i < len(cur):
cur = cur[i]
else:
return default
return cur
return get
def solve(input):
...
cur_dir = os.path.dirname(os.path.realpath(__file__))
def normalize_str(s):
return "\n".join([l.strip() for l in s.strip().splitlines()])
print("SAMPLE OUTPUT")
with open(os.path.join(cur_dir, "sample.txt")) as file:
print(solve(normalize_str(file.read())))
print("---")
print("OUTPUT")
with open(os.path.join(cur_dir, "input.txt")) as file:
print(solve(normalize_str(file.read())))
| none | 1 | 2.999292 | 3 | |
maestral/gui/utils.py | michaelbjames/maestral-dropbox | 0 | 6620970 | <filename>maestral/gui/utils.py
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 16:23:13 2018
@author: samschott
"""
# system imports
import sys
import os
import platform
from subprocess import Popen
from traceback import format_exception
# external packages
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QRect
from PyQt5.QtGui import QBrush, QImage, QPainter, QPixmap, QWindow
# maestral modules
from maestral.gui.resources import APP_ICON_PATH, rgb_to_luminance
from maestral.utils import is_macos_bundle
THEME_DARK = "dark"
THEME_LIGHT = "light"
LINE_COLOR_DARK = (95, 104, 104)
LINE_COLOR_LIGHT = (205, 203, 205)
def elide_string(string, font=None, pixels=200, side="right"):
"""
Elide a string to fit into the given width.
:param str string: String to elide.
:param font: Font to calculate size. If not given, the current style's default font
for a QLabel is used.
:param int pixels: Maximum width in pixels.
:param str side: Side to truncate. Can be "right" or "left", defaults to "right".
:return: Truncated string.
:rtype: str
"""
if not font:
font = QtWidgets.QLabel().font()
metrics = QtGui.QFontMetrics(font)
mode = Qt.ElideRight if side is "right" else Qt.ElideLeft
return metrics.elidedText(string, mode, pixels)
def get_scaled_font(scaling=1.0, bold=False, italic=False):
"""
Returns the styles default font for QLabels, but scaled.
:param float scaling: Scaling factor.
:param bool bold: Sets the returned font to bold (defaults to ``False``)
:param bool italic: Sets the returned font to italic (defaults to ``False``)
:return: `QFont`` instance.
"""
label = QtWidgets.QLabel()
font = label.font()
font.setBold(bold)
font.setItalic(italic)
font_size = round(font.pointSize()*scaling)
# noinspection PyTypeChecker
font.setPointSize(font_size)
return font
def icon_to_pixmap(icon, width, height=None):
"""
Converts a given icon to a pixmap. Automatically adjusts to high-DPI scaling.
:param icon: Icon to convert.
:param int width: Target point height.
:param int height: Target point height.
:return: ``QPixmap`` instance.
"""
if not height:
height = width
is_hidpi = QtCore.QCoreApplication.testAttribute(Qt.AA_UseHighDpiPixmaps)
pr = QWindow().devicePixelRatio()
if not is_hidpi:
width = width*pr
height = height*pr
px = icon.pixmap(width, height)
if not is_hidpi:
px.setDevicePixelRatio(pr)
return px
def windowTheme():
"""
Returns one of gui.utils.THEME_LIGHT or gui.utils.THEME_DARK, corresponding to
current user's UI theme.
"""
# getting color of a pixel on a top bar, and identifying best-fitting color
# theme based on its luminance
w = QtWidgets.QWidget()
bg_color = w.palette().color(QtGui.QPalette.Background)
bg_color_rgb = [bg_color.red(), bg_color.green(), bg_color.blue()]
luminance = rgb_to_luminance(*bg_color_rgb)
return THEME_LIGHT if luminance >= 0.4 else THEME_DARK
def isDarkWindow():
return windowTheme() == THEME_DARK
def get_gnome_scaling_factor():
"""Returns gnome scaling factor as str or None."""
if __command_exists("gsettings"):
res = os.popen("gsettings get org.gnome.desktop.interface scaling-factor").read()
if res and res.split()[0] == "uint32" and len(res.split()) > 1:
scaling_factor_str = res.split()[1]
try:
scaling_factor_float = float(scaling_factor_str)
if scaling_factor_float > 1:
return scaling_factor_str
except ValueError:
pass
return None
def __command_exists(command):
return any(
os.access(os.path.join(path, command), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
class MaestralWorker(QtCore.QObject):
"""A worker object for Maestral. To be used in QThreads."""
sig_done = QtCore.pyqtSignal(object)
def __init__(self, target=None, args=None, kwargs=None):
QtCore.QObject.__init__(self)
self._target = target
self._args = args or ()
self._kwargs = kwargs or {}
def start(self):
res = self._target(*self._args, **self._kwargs)
self.sig_done.emit(res)
class MaestralBackgroundTask(QtCore.QObject):
"""A utility class to manage a worker thread."""
sig_done = QtCore.pyqtSignal(object)
def __init__(self, parent=None, target=None, args=None, kwargs=None, autostart=True):
QtCore.QObject.__init__(self, parent)
self._target = target
self._args = args or ()
self._kwargs = kwargs or {}
if autostart:
self.start()
def start(self):
self.thread = QtCore.QThread(self)
self.worker = MaestralWorker(
target=self._target, args=self._args, kwargs=self._kwargs)
self.worker.sig_done.connect(self.sig_done.emit)
self.worker.sig_done.connect(self.thread.quit)
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.start)
self.thread.start()
def wait(self, timeout=None):
if timeout:
self.thread.wait(msecs=timeout)
else:
self.thread.wait()
class UserDialog(QtWidgets.QDialog):
"""A template user dialog for Maestral. Shows a traceback if given in constructor."""
def __init__(self, title, message, exc_info=None, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.setModal(True)
self.setWindowModality(Qt.WindowModal)
self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.Sheet | Qt.WindowTitleHint | Qt.CustomizeWindowHint)
self.setWindowTitle("")
self.setFixedWidth(450)
self.gridLayout = QtWidgets.QGridLayout()
self.setLayout(self.gridLayout)
self.iconLabel = QtWidgets.QLabel(self)
self.titleLabel = QtWidgets.QLabel(self)
self.infoLabel = QtWidgets.QLabel(self)
icon_size = 70
self.iconLabel.setMinimumSize(icon_size, icon_size)
self.iconLabel.setMaximumSize(icon_size, icon_size)
self.titleLabel.setFont(get_scaled_font(bold=True))
self.infoLabel.setFont(get_scaled_font(scaling=0.9))
self.infoLabel.setWordWrap(True)
icon = QtGui.QIcon(APP_ICON_PATH)
self.iconLabel.setPixmap(icon_to_pixmap(icon, icon_size))
self.titleLabel.setText(title)
self.infoLabel.setText(message)
if exc_info:
self.details = QtWidgets.QTextEdit(self)
self.details.setText("".join(format_exception(*exc_info)))
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.accept)
self.gridLayout.addWidget(self.iconLabel, 0, 0, 2, 1)
self.gridLayout.addWidget(self.titleLabel, 0, 1, 1, 1)
self.gridLayout.addWidget(self.infoLabel, 1, 1, 1, 1)
if exc_info:
self.gridLayout.addWidget(self.details, 2, 0, 1, 2)
self.gridLayout.addWidget(self.buttonBox, 3, 1, -1, -1)
def setAcceptButtonName(self, name):
self.buttonBox.buttons()[0].setText(name)
def addCancelButton(self, name="Cancel"):
self._cancelButton = self.buttonBox.addButton(QtWidgets.QDialogButtonBox.Cancel)
self._cancelButton.setText(name)
self._cancelButton.clicked.connect(self.close)
def setCancelButtonName(self, name):
self._cancelButton.setText(name)
def addSecondAcceptButton(self, name, icon="dialog-ok"):
self._acceptButton2 = self.buttonBox.addButton(QtWidgets.QDialogButtonBox.Ignore)
self._acceptButton2.setText(name)
if isinstance(icon, QtGui.QIcon):
self._acceptButton2.setIcon(icon)
elif isinstance(icon, str):
self._acceptButton2.setIcon(QtGui.QIcon.fromTheme(icon))
self._acceptButton2.clicked.connect(lambda: self.setResult(2))
self._acceptButton2.clicked.connect(self.close)
def setSecondAcceptButtonName(self, name):
self._acceptButton2.setText(name)
def quit_and_restart_maestral():
"""
Quits and restarts Maestral. This chooses the right command to restart Maestral,
running with the previous configuration. It also handles restarting macOS app bundles.
"""
pid = os.getpid() # get ID of current process
config_name = os.getenv("MAESTRAL_CONFIG", "maestral")
# wait for current process to quit and then restart Maestral
if is_macos_bundle:
launch_command = os.path.join(sys._MEIPASS, "main")
Popen("lsof -p {0} +r 1 &>/dev/null; {0}".format(launch_command), shell=True)
if platform.system() == "Darwin":
Popen("lsof -p {0} +r 1 &>/dev/null; maestral gui --config-name='{1}'".format(
pid, config_name), shell=True)
elif platform.system() == "Linux":
Popen("tail --pid={0} -f /dev/null; maestral gui --config-name='{1}'".format(
pid, config_name), shell=True)
QtCore.QCoreApplication.quit()
sys.exit(0)
def get_masked_image(path, size=64, overlay_text=""):
"""
Returns a ``QPixmap`` from an image file masked with a smooth circle.
The returned pixmap will have a size of *size* × *size* pixels.
:param str path: Path to image file.
:param int size: Target size. Will be the diameter of the masked image.
:param overlay_text: Overlay text. This will be shown in white sans-serif on top of
the image.
:return: `QPixmap`` instance.
"""
with open(path, "rb") as f:
imgdata = f.read()
imgtype = path.split(".")[-1]
# Load image and convert to 32-bit ARGB (adds an alpha channel):
image = QImage.fromData(imgdata, imgtype)
image.convertToFormat(QImage.Format_ARGB32)
# Crop image to a square:
imgsize = min(image.width(), image.height())
rect = QRect(
(image.width() - imgsize) / 2,
(image.height() - imgsize) / 2,
imgsize,
imgsize,
)
image = image.copy(rect)
# Create the output image with the same dimensions and an alpha channel
# and make it completely transparent:
out_img = QImage(imgsize, imgsize, QImage.Format_ARGB32)
out_img.fill(Qt.transparent)
# Create a texture brush and paint a circle with the original image onto
# the output image:
brush = QBrush(image) # Create texture brush
painter = QPainter(out_img) # Paint the output image
painter.setBrush(brush) # Use the image texture brush
painter.setPen(Qt.NoPen) # Don't draw an outline
painter.setRenderHint(QPainter.Antialiasing, True) # Use AA
painter.drawEllipse(0, 0, imgsize, imgsize) # Actually draw the circle
if overlay_text:
# draw text
font = QtGui.QFont("Arial Rounded MT Bold")
font.setPointSize(imgsize * 0.4)
painter.setFont(font)
painter.setPen(Qt.white)
painter.drawText(QRect(0, 0, imgsize, imgsize), Qt.AlignCenter, overlay_text)
painter.end() # We are done (segfault if you forget this)
# Convert the image to a pixmap and rescale it. Take pixel ratio into
# account to get a sharp image on retina displays:
pr = QWindow().devicePixelRatio()
pm = QPixmap.fromImage(out_img)
pm.setDevicePixelRatio(pr)
size *= pr
pm = pm.scaled(size, size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
return pm
class FaderWidget(QtWidgets.QWidget):
pixmap_opacity = 1.0
def __init__(self, old_widget, new_widget, duration=300):
QtWidgets.QWidget.__init__(self, new_widget)
pr = QWindow().devicePixelRatio()
self.old_pixmap = QPixmap(new_widget.size()*pr)
self.old_pixmap.setDevicePixelRatio(pr)
old_widget.render(self.old_pixmap)
self.timeline = QtCore.QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(duration)
self.timeline.start()
self.resize(new_widget.size())
self.show()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
painter.end()
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.repaint()
class AnimatedStackedWidget(QtWidgets.QStackedWidget):
"""
A subclass of ``QStackedWidget`` with sliding or fading animations between stacks.
"""
def __init__(self, parent=None):
super(AnimatedStackedWidget, self).__init__(parent)
self.m_direction = Qt.Horizontal
self.m_speed = 300
self.m_animationtype = QtCore.QEasingCurve.OutCubic
self.m_now = 0
self.m_next = 0
self.m_wrap = False
self.m_pnow = QtCore.QPoint(0, 0)
self.m_active = False
def setDirection(self, direction):
self.m_direction = direction
def setSpeed(self, speed):
self.m_speed = speed
def setAnimation(self, animationtype):
self.m_animationtype = animationtype
def setWrap(self, wrap):
self.m_wrap = wrap
@QtCore.pyqtSlot()
def slideInPrev(self):
now = self.currentIndex()
if self.m_wrap or now > 0:
self.slideInIdx(now - 1)
@QtCore.pyqtSlot()
def slideInNext(self):
now = self.currentIndex()
if self.m_wrap or now < (self.count() - 1):
self.slideInIdx(now + 1)
def slideInIdx(self, idx):
if idx > (self.count() - 1):
idx = idx % self.count()
elif idx < 0:
idx = (idx + self.count()) % self.count()
self.slideInWgt(self.widget(idx))
def slideInWgt(self, newwidget):
if self.m_active:
return
self.m_active = True
_now = self.currentIndex()
_next = self.indexOf(newwidget)
if _now == _next:
self.m_active = False
return
offsetx, offsety = self.frameRect().width(), self.frameRect().height()
self.widget(_next).setGeometry(self.frameRect())
if not self.m_direction == Qt.Horizontal:
if _now < _next:
offsetx, offsety = 0, -offsety
else:
offsetx = 0
else:
if _now < _next:
offsetx, offsety = -offsetx, 0
else:
offsety = 0
pnext = self.widget(_next).pos()
pnow = self.widget(_now).pos()
self.m_pnow = pnow
offset = QtCore.QPoint(offsetx, offsety)
self.widget(_next).move(pnext - offset)
self.widget(_next).show()
self.widget(_next).raise_()
anim_group = QtCore.QParallelAnimationGroup(
self, finished=self.animationDoneSlot
)
for index, start, end in zip(
(_now, _next), (pnow, pnext - offset), (pnow + offset, pnext)
):
animation = QtCore.QPropertyAnimation(
self.widget(index),
b"pos",
duration=self.m_speed,
easingCurve=self.m_animationtype,
startValue=start,
endValue=end,
)
anim_group.addAnimation(animation)
self.m_next = _next
self.m_now = _now
self.m_active = True
anim_group.start(QtCore.QAbstractAnimation.DeleteWhenStopped)
@QtCore.pyqtSlot()
def animationDoneSlot(self):
self.setCurrentIndex(self.m_next)
self.widget(self.m_now).hide()
self.widget(self.m_now).move(self.m_pnow)
self.m_active = False
def fadeInIdx(self, index):
self.fader_widget = FaderWidget(self.currentWidget(), self.widget(index),
self.m_speed)
self.setCurrentIndex(index)
class QProgressIndicator(QtWidgets.QWidget):
"""
A macOS style spinning progress indicator. ``QProgressIndicator`` automatically
detects and adjusts to "dark mode" appearances.
"""
m_angle = None
m_timerId = None
m_delay = None
m_displayedWhenStopped = None
m_color = None
m_light_color = QtGui.QColor(170, 170, 170)
m_dark_color = QtGui.QColor(40, 40, 40)
def __init__(self, parent=None):
# Call parent class constructor first
super(QProgressIndicator, self).__init__(parent)
# Initialize instance variables
self.m_angle = 0
self.m_timerId = -1
self.m_delay = 40
self.m_displayedWhenStopped = False
self.m_color = self.m_dark_color
self.update_dark_mode()
# Set size and focus policy
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.setFocusPolicy(Qt.NoFocus)
def animationDelay(self):
return self.delay
def isAnimated(self):
return self.m_timerId != -1
def isDisplayedWhenStopped(self):
return self.displayedWhenStopped
def getColor(self):
return self.color
def sizeHint(self):
return QtCore.QSize(20, 20)
def startAnimation(self):
self.m_angle = 0
if self.m_timerId == -1:
self.m_timerId = self.startTimer(self.m_delay)
def stopAnimation(self):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_timerId = -1
self.update()
def setAnimationDelay(self, delay):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_delay = delay
if self.m_timerId != -1:
self.m_timerId = self.startTimer(self.m_delay)
def setDisplayedWhenStopped(self, state):
self.displayedWhenStopped = state
self.update()
def setColor(self, color):
self.m_color = color
self.update()
def timerEvent(self, event):
self.m_angle = (self.m_angle + 30) % 360
self.update()
def paintEvent(self, event):
if (not self.m_displayedWhenStopped) and (not self.isAnimated()):
return
width = min(self.width(), self.height())
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
outerRadius = (width - 1) * 0.5
innerRadius = (width - 1) * 0.5 * 0.5
capsuleHeight = outerRadius - innerRadius
capsuleWidth = max(1.2, 1.0 + capsuleHeight*0.19, 0.35 + capsuleHeight*0.28)
capsuleRadius = capsuleWidth / 2
for i in range(0, 12):
color = QtGui.QColor(self.m_color)
if self.isAnimated():
color.setAlphaF(1.0 - (i / 12.0))
else:
color.setAlphaF(0.2)
painter.setPen(Qt.NoPen)
painter.setBrush(color)
painter.save()
painter.translate(self.rect().center())
painter.rotate(self.m_angle - (i * 30.0))
painter.drawRoundedRect(capsuleWidth * -0.5,
(innerRadius + capsuleHeight) * -1, capsuleWidth,
capsuleHeight, capsuleRadius, capsuleRadius)
painter.restore()
def changeEvent(self, QEvent):
if QEvent.type() == QtCore.QEvent.PaletteChange:
self.update_dark_mode()
def update_dark_mode(self):
# update folder icons: the system may provide different icons in dark mode
if isDarkWindow():
self.setColor(self.m_light_color)
else:
self.setColor(self.m_dark_color)
| <filename>maestral/gui/utils.py
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 16:23:13 2018
@author: samschott
"""
# system imports
import sys
import os
import platform
from subprocess import Popen
from traceback import format_exception
# external packages
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QRect
from PyQt5.QtGui import QBrush, QImage, QPainter, QPixmap, QWindow
# maestral modules
from maestral.gui.resources import APP_ICON_PATH, rgb_to_luminance
from maestral.utils import is_macos_bundle
THEME_DARK = "dark"
THEME_LIGHT = "light"
LINE_COLOR_DARK = (95, 104, 104)
LINE_COLOR_LIGHT = (205, 203, 205)
def elide_string(string, font=None, pixels=200, side="right"):
"""
Elide a string to fit into the given width.
:param str string: String to elide.
:param font: Font to calculate size. If not given, the current style's default font
for a QLabel is used.
:param int pixels: Maximum width in pixels.
:param str side: Side to truncate. Can be "right" or "left", defaults to "right".
:return: Truncated string.
:rtype: str
"""
if not font:
font = QtWidgets.QLabel().font()
metrics = QtGui.QFontMetrics(font)
mode = Qt.ElideRight if side is "right" else Qt.ElideLeft
return metrics.elidedText(string, mode, pixels)
def get_scaled_font(scaling=1.0, bold=False, italic=False):
"""
Returns the styles default font for QLabels, but scaled.
:param float scaling: Scaling factor.
:param bool bold: Sets the returned font to bold (defaults to ``False``)
:param bool italic: Sets the returned font to italic (defaults to ``False``)
:return: `QFont`` instance.
"""
label = QtWidgets.QLabel()
font = label.font()
font.setBold(bold)
font.setItalic(italic)
font_size = round(font.pointSize()*scaling)
# noinspection PyTypeChecker
font.setPointSize(font_size)
return font
def icon_to_pixmap(icon, width, height=None):
"""
Converts a given icon to a pixmap. Automatically adjusts to high-DPI scaling.
:param icon: Icon to convert.
:param int width: Target point height.
:param int height: Target point height.
:return: ``QPixmap`` instance.
"""
if not height:
height = width
is_hidpi = QtCore.QCoreApplication.testAttribute(Qt.AA_UseHighDpiPixmaps)
pr = QWindow().devicePixelRatio()
if not is_hidpi:
width = width*pr
height = height*pr
px = icon.pixmap(width, height)
if not is_hidpi:
px.setDevicePixelRatio(pr)
return px
def windowTheme():
"""
Returns one of gui.utils.THEME_LIGHT or gui.utils.THEME_DARK, corresponding to
current user's UI theme.
"""
# getting color of a pixel on a top bar, and identifying best-fitting color
# theme based on its luminance
w = QtWidgets.QWidget()
bg_color = w.palette().color(QtGui.QPalette.Background)
bg_color_rgb = [bg_color.red(), bg_color.green(), bg_color.blue()]
luminance = rgb_to_luminance(*bg_color_rgb)
return THEME_LIGHT if luminance >= 0.4 else THEME_DARK
def isDarkWindow():
return windowTheme() == THEME_DARK
def get_gnome_scaling_factor():
"""Returns gnome scaling factor as str or None."""
if __command_exists("gsettings"):
res = os.popen("gsettings get org.gnome.desktop.interface scaling-factor").read()
if res and res.split()[0] == "uint32" and len(res.split()) > 1:
scaling_factor_str = res.split()[1]
try:
scaling_factor_float = float(scaling_factor_str)
if scaling_factor_float > 1:
return scaling_factor_str
except ValueError:
pass
return None
def __command_exists(command):
return any(
os.access(os.path.join(path, command), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
class MaestralWorker(QtCore.QObject):
"""A worker object for Maestral. To be used in QThreads."""
sig_done = QtCore.pyqtSignal(object)
def __init__(self, target=None, args=None, kwargs=None):
QtCore.QObject.__init__(self)
self._target = target
self._args = args or ()
self._kwargs = kwargs or {}
def start(self):
res = self._target(*self._args, **self._kwargs)
self.sig_done.emit(res)
class MaestralBackgroundTask(QtCore.QObject):
"""A utility class to manage a worker thread."""
sig_done = QtCore.pyqtSignal(object)
def __init__(self, parent=None, target=None, args=None, kwargs=None, autostart=True):
QtCore.QObject.__init__(self, parent)
self._target = target
self._args = args or ()
self._kwargs = kwargs or {}
if autostart:
self.start()
def start(self):
self.thread = QtCore.QThread(self)
self.worker = MaestralWorker(
target=self._target, args=self._args, kwargs=self._kwargs)
self.worker.sig_done.connect(self.sig_done.emit)
self.worker.sig_done.connect(self.thread.quit)
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.start)
self.thread.start()
def wait(self, timeout=None):
if timeout:
self.thread.wait(msecs=timeout)
else:
self.thread.wait()
class UserDialog(QtWidgets.QDialog):
"""A template user dialog for Maestral. Shows a traceback if given in constructor."""
def __init__(self, title, message, exc_info=None, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.setModal(True)
self.setWindowModality(Qt.WindowModal)
self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.Sheet | Qt.WindowTitleHint | Qt.CustomizeWindowHint)
self.setWindowTitle("")
self.setFixedWidth(450)
self.gridLayout = QtWidgets.QGridLayout()
self.setLayout(self.gridLayout)
self.iconLabel = QtWidgets.QLabel(self)
self.titleLabel = QtWidgets.QLabel(self)
self.infoLabel = QtWidgets.QLabel(self)
icon_size = 70
self.iconLabel.setMinimumSize(icon_size, icon_size)
self.iconLabel.setMaximumSize(icon_size, icon_size)
self.titleLabel.setFont(get_scaled_font(bold=True))
self.infoLabel.setFont(get_scaled_font(scaling=0.9))
self.infoLabel.setWordWrap(True)
icon = QtGui.QIcon(APP_ICON_PATH)
self.iconLabel.setPixmap(icon_to_pixmap(icon, icon_size))
self.titleLabel.setText(title)
self.infoLabel.setText(message)
if exc_info:
self.details = QtWidgets.QTextEdit(self)
self.details.setText("".join(format_exception(*exc_info)))
self.buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.accept)
self.gridLayout.addWidget(self.iconLabel, 0, 0, 2, 1)
self.gridLayout.addWidget(self.titleLabel, 0, 1, 1, 1)
self.gridLayout.addWidget(self.infoLabel, 1, 1, 1, 1)
if exc_info:
self.gridLayout.addWidget(self.details, 2, 0, 1, 2)
self.gridLayout.addWidget(self.buttonBox, 3, 1, -1, -1)
def setAcceptButtonName(self, name):
self.buttonBox.buttons()[0].setText(name)
def addCancelButton(self, name="Cancel"):
self._cancelButton = self.buttonBox.addButton(QtWidgets.QDialogButtonBox.Cancel)
self._cancelButton.setText(name)
self._cancelButton.clicked.connect(self.close)
def setCancelButtonName(self, name):
self._cancelButton.setText(name)
def addSecondAcceptButton(self, name, icon="dialog-ok"):
self._acceptButton2 = self.buttonBox.addButton(QtWidgets.QDialogButtonBox.Ignore)
self._acceptButton2.setText(name)
if isinstance(icon, QtGui.QIcon):
self._acceptButton2.setIcon(icon)
elif isinstance(icon, str):
self._acceptButton2.setIcon(QtGui.QIcon.fromTheme(icon))
self._acceptButton2.clicked.connect(lambda: self.setResult(2))
self._acceptButton2.clicked.connect(self.close)
def setSecondAcceptButtonName(self, name):
self._acceptButton2.setText(name)
def quit_and_restart_maestral():
"""
Quits and restarts Maestral. This chooses the right command to restart Maestral,
running with the previous configuration. It also handles restarting macOS app bundles.
"""
pid = os.getpid() # get ID of current process
config_name = os.getenv("MAESTRAL_CONFIG", "maestral")
# wait for current process to quit and then restart Maestral
if is_macos_bundle:
launch_command = os.path.join(sys._MEIPASS, "main")
Popen("lsof -p {0} +r 1 &>/dev/null; {0}".format(launch_command), shell=True)
if platform.system() == "Darwin":
Popen("lsof -p {0} +r 1 &>/dev/null; maestral gui --config-name='{1}'".format(
pid, config_name), shell=True)
elif platform.system() == "Linux":
Popen("tail --pid={0} -f /dev/null; maestral gui --config-name='{1}'".format(
pid, config_name), shell=True)
QtCore.QCoreApplication.quit()
sys.exit(0)
def get_masked_image(path, size=64, overlay_text=""):
"""
Returns a ``QPixmap`` from an image file masked with a smooth circle.
The returned pixmap will have a size of *size* × *size* pixels.
:param str path: Path to image file.
:param int size: Target size. Will be the diameter of the masked image.
:param overlay_text: Overlay text. This will be shown in white sans-serif on top of
the image.
:return: `QPixmap`` instance.
"""
with open(path, "rb") as f:
imgdata = f.read()
imgtype = path.split(".")[-1]
# Load image and convert to 32-bit ARGB (adds an alpha channel):
image = QImage.fromData(imgdata, imgtype)
image.convertToFormat(QImage.Format_ARGB32)
# Crop image to a square:
imgsize = min(image.width(), image.height())
rect = QRect(
(image.width() - imgsize) / 2,
(image.height() - imgsize) / 2,
imgsize,
imgsize,
)
image = image.copy(rect)
# Create the output image with the same dimensions and an alpha channel
# and make it completely transparent:
out_img = QImage(imgsize, imgsize, QImage.Format_ARGB32)
out_img.fill(Qt.transparent)
# Create a texture brush and paint a circle with the original image onto
# the output image:
brush = QBrush(image) # Create texture brush
painter = QPainter(out_img) # Paint the output image
painter.setBrush(brush) # Use the image texture brush
painter.setPen(Qt.NoPen) # Don't draw an outline
painter.setRenderHint(QPainter.Antialiasing, True) # Use AA
painter.drawEllipse(0, 0, imgsize, imgsize) # Actually draw the circle
if overlay_text:
# draw text
font = QtGui.QFont("Arial Rounded MT Bold")
font.setPointSize(imgsize * 0.4)
painter.setFont(font)
painter.setPen(Qt.white)
painter.drawText(QRect(0, 0, imgsize, imgsize), Qt.AlignCenter, overlay_text)
painter.end() # We are done (segfault if you forget this)
# Convert the image to a pixmap and rescale it. Take pixel ratio into
# account to get a sharp image on retina displays:
pr = QWindow().devicePixelRatio()
pm = QPixmap.fromImage(out_img)
pm.setDevicePixelRatio(pr)
size *= pr
pm = pm.scaled(size, size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
return pm
class FaderWidget(QtWidgets.QWidget):
pixmap_opacity = 1.0
def __init__(self, old_widget, new_widget, duration=300):
QtWidgets.QWidget.__init__(self, new_widget)
pr = QWindow().devicePixelRatio()
self.old_pixmap = QPixmap(new_widget.size()*pr)
self.old_pixmap.setDevicePixelRatio(pr)
old_widget.render(self.old_pixmap)
self.timeline = QtCore.QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(duration)
self.timeline.start()
self.resize(new_widget.size())
self.show()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
painter.end()
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.repaint()
class AnimatedStackedWidget(QtWidgets.QStackedWidget):
"""
A subclass of ``QStackedWidget`` with sliding or fading animations between stacks.
"""
def __init__(self, parent=None):
super(AnimatedStackedWidget, self).__init__(parent)
self.m_direction = Qt.Horizontal
self.m_speed = 300
self.m_animationtype = QtCore.QEasingCurve.OutCubic
self.m_now = 0
self.m_next = 0
self.m_wrap = False
self.m_pnow = QtCore.QPoint(0, 0)
self.m_active = False
def setDirection(self, direction):
self.m_direction = direction
def setSpeed(self, speed):
self.m_speed = speed
def setAnimation(self, animationtype):
self.m_animationtype = animationtype
def setWrap(self, wrap):
self.m_wrap = wrap
@QtCore.pyqtSlot()
def slideInPrev(self):
now = self.currentIndex()
if self.m_wrap or now > 0:
self.slideInIdx(now - 1)
@QtCore.pyqtSlot()
def slideInNext(self):
now = self.currentIndex()
if self.m_wrap or now < (self.count() - 1):
self.slideInIdx(now + 1)
def slideInIdx(self, idx):
if idx > (self.count() - 1):
idx = idx % self.count()
elif idx < 0:
idx = (idx + self.count()) % self.count()
self.slideInWgt(self.widget(idx))
def slideInWgt(self, newwidget):
if self.m_active:
return
self.m_active = True
_now = self.currentIndex()
_next = self.indexOf(newwidget)
if _now == _next:
self.m_active = False
return
offsetx, offsety = self.frameRect().width(), self.frameRect().height()
self.widget(_next).setGeometry(self.frameRect())
if not self.m_direction == Qt.Horizontal:
if _now < _next:
offsetx, offsety = 0, -offsety
else:
offsetx = 0
else:
if _now < _next:
offsetx, offsety = -offsetx, 0
else:
offsety = 0
pnext = self.widget(_next).pos()
pnow = self.widget(_now).pos()
self.m_pnow = pnow
offset = QtCore.QPoint(offsetx, offsety)
self.widget(_next).move(pnext - offset)
self.widget(_next).show()
self.widget(_next).raise_()
anim_group = QtCore.QParallelAnimationGroup(
self, finished=self.animationDoneSlot
)
for index, start, end in zip(
(_now, _next), (pnow, pnext - offset), (pnow + offset, pnext)
):
animation = QtCore.QPropertyAnimation(
self.widget(index),
b"pos",
duration=self.m_speed,
easingCurve=self.m_animationtype,
startValue=start,
endValue=end,
)
anim_group.addAnimation(animation)
self.m_next = _next
self.m_now = _now
self.m_active = True
anim_group.start(QtCore.QAbstractAnimation.DeleteWhenStopped)
@QtCore.pyqtSlot()
def animationDoneSlot(self):
self.setCurrentIndex(self.m_next)
self.widget(self.m_now).hide()
self.widget(self.m_now).move(self.m_pnow)
self.m_active = False
def fadeInIdx(self, index):
self.fader_widget = FaderWidget(self.currentWidget(), self.widget(index),
self.m_speed)
self.setCurrentIndex(index)
class QProgressIndicator(QtWidgets.QWidget):
"""
A macOS style spinning progress indicator. ``QProgressIndicator`` automatically
detects and adjusts to "dark mode" appearances.
"""
m_angle = None
m_timerId = None
m_delay = None
m_displayedWhenStopped = None
m_color = None
m_light_color = QtGui.QColor(170, 170, 170)
m_dark_color = QtGui.QColor(40, 40, 40)
def __init__(self, parent=None):
# Call parent class constructor first
super(QProgressIndicator, self).__init__(parent)
# Initialize instance variables
self.m_angle = 0
self.m_timerId = -1
self.m_delay = 40
self.m_displayedWhenStopped = False
self.m_color = self.m_dark_color
self.update_dark_mode()
# Set size and focus policy
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.setFocusPolicy(Qt.NoFocus)
def animationDelay(self):
return self.delay
def isAnimated(self):
return self.m_timerId != -1
def isDisplayedWhenStopped(self):
return self.displayedWhenStopped
def getColor(self):
return self.color
def sizeHint(self):
return QtCore.QSize(20, 20)
def startAnimation(self):
self.m_angle = 0
if self.m_timerId == -1:
self.m_timerId = self.startTimer(self.m_delay)
def stopAnimation(self):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_timerId = -1
self.update()
def setAnimationDelay(self, delay):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_delay = delay
if self.m_timerId != -1:
self.m_timerId = self.startTimer(self.m_delay)
def setDisplayedWhenStopped(self, state):
self.displayedWhenStopped = state
self.update()
def setColor(self, color):
self.m_color = color
self.update()
def timerEvent(self, event):
self.m_angle = (self.m_angle + 30) % 360
self.update()
def paintEvent(self, event):
if (not self.m_displayedWhenStopped) and (not self.isAnimated()):
return
width = min(self.width(), self.height())
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
outerRadius = (width - 1) * 0.5
innerRadius = (width - 1) * 0.5 * 0.5
capsuleHeight = outerRadius - innerRadius
capsuleWidth = max(1.2, 1.0 + capsuleHeight*0.19, 0.35 + capsuleHeight*0.28)
capsuleRadius = capsuleWidth / 2
for i in range(0, 12):
color = QtGui.QColor(self.m_color)
if self.isAnimated():
color.setAlphaF(1.0 - (i / 12.0))
else:
color.setAlphaF(0.2)
painter.setPen(Qt.NoPen)
painter.setBrush(color)
painter.save()
painter.translate(self.rect().center())
painter.rotate(self.m_angle - (i * 30.0))
painter.drawRoundedRect(capsuleWidth * -0.5,
(innerRadius + capsuleHeight) * -1, capsuleWidth,
capsuleHeight, capsuleRadius, capsuleRadius)
painter.restore()
def changeEvent(self, QEvent):
if QEvent.type() == QtCore.QEvent.PaletteChange:
self.update_dark_mode()
def update_dark_mode(self):
# update folder icons: the system may provide different icons in dark mode
if isDarkWindow():
self.setColor(self.m_light_color)
else:
self.setColor(self.m_dark_color)
| en | 0.736732 | # !/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Wed Oct 31 16:23:13 2018 @author: samschott # system imports # external packages # maestral modules Elide a string to fit into the given width. :param str string: String to elide. :param font: Font to calculate size. If not given, the current style's default font for a QLabel is used. :param int pixels: Maximum width in pixels. :param str side: Side to truncate. Can be "right" or "left", defaults to "right". :return: Truncated string. :rtype: str Returns the styles default font for QLabels, but scaled. :param float scaling: Scaling factor. :param bool bold: Sets the returned font to bold (defaults to ``False``) :param bool italic: Sets the returned font to italic (defaults to ``False``) :return: `QFont`` instance. # noinspection PyTypeChecker Converts a given icon to a pixmap. Automatically adjusts to high-DPI scaling. :param icon: Icon to convert. :param int width: Target point height. :param int height: Target point height. :return: ``QPixmap`` instance. Returns one of gui.utils.THEME_LIGHT or gui.utils.THEME_DARK, corresponding to current user's UI theme. # getting color of a pixel on a top bar, and identifying best-fitting color # theme based on its luminance Returns gnome scaling factor as str or None. A worker object for Maestral. To be used in QThreads. A utility class to manage a worker thread. A template user dialog for Maestral. Shows a traceback if given in constructor. Quits and restarts Maestral. This chooses the right command to restart Maestral, running with the previous configuration. It also handles restarting macOS app bundles. # get ID of current process # wait for current process to quit and then restart Maestral Returns a ``QPixmap`` from an image file masked with a smooth circle. The returned pixmap will have a size of *size* × *size* pixels. :param str path: Path to image file. :param int size: Target size. Will be the diameter of the masked image. :param overlay_text: Overlay text. This will be shown in white sans-serif on top of the image. :return: `QPixmap`` instance. # Load image and convert to 32-bit ARGB (adds an alpha channel): # Crop image to a square: # Create the output image with the same dimensions and an alpha channel # and make it completely transparent: # Create a texture brush and paint a circle with the original image onto # the output image: # Create texture brush # Paint the output image # Use the image texture brush # Don't draw an outline # Use AA # Actually draw the circle # draw text # We are done (segfault if you forget this) # Convert the image to a pixmap and rescale it. Take pixel ratio into # account to get a sharp image on retina displays: A subclass of ``QStackedWidget`` with sliding or fading animations between stacks. A macOS style spinning progress indicator. ``QProgressIndicator`` automatically detects and adjusts to "dark mode" appearances. # Call parent class constructor first # Initialize instance variables # Set size and focus policy # update folder icons: the system may provide different icons in dark mode | 2.233829 | 2 |
aiosvc/db/__init__.py | acsnem/aiosvc | 0 | 6620971 | from .pg import Pool as PgPool | from .pg import Pool as PgPool | none | 1 | 1.075379 | 1 | |
test_framework/show_versions.py | jamescooke/factory_djoy | 26 | 6620972 | <gh_stars>10-100
import sys
from django import get_version
def versions():
return '''
=== VERSIONS ======================
Python = {python}
Django = {django}
===================================
'''.format(
python=sys.version.replace('\n', ' '),
django=get_version(),
)
if __name__ == '__main__':
print(versions())
| import sys
from django import get_version
def versions():
return '''
=== VERSIONS ======================
Python = {python}
Django = {django}
===================================
'''.format(
python=sys.version.replace('\n', ' '),
django=get_version(),
)
if __name__ == '__main__':
print(versions()) | en | 0.345826 | === VERSIONS ====================== Python = {python} Django = {django} =================================== | 2.24897 | 2 |
Microassembly/CPU-16 Micro Assembler.py | techno-sorcery/CPU-16 | 0 | 6620973 | <filename>Microassembly/CPU-16 Micro Assembler.py
##########################################
# ATLAS CPU-16 MICRO-ASSEMBLER #
# WRITTEN BY <NAME>. - 2022 #
##########################################
import re
import sys
import os
mcode = {
'PC_ST' :'0b000000000000000000000000000010000000000',
'OP1_ST' :'0b000000000000000000000000000100000000000',
'OP2_ST' :'0b000000000000000000000000000110000000000',
'IR_ST' :'0b000000000000000000000000001000000000000',
'MDR_ST' :'0b000000000000000000000000001010000000000',
'MEM_ST' :'0b000000000000000000000000001100000000000',
'REG1_ST' :'0b000000000000000000000000001110000000000',
'REG2_ST' :'0b000000000000000000000000010000000000000',
'SP_ST' :'0b000000000000000000000000010010000000000',
'STAT_ST' :'0b000000000000000000000000010100000000000',
'F_DOUT' :'0b000000000000000000000000100000000000000',
'PC_DOUT' :'0b000000000000000000000001000000000000000',
'SWP_DOUT' :'0b000000000000000000000011000000000000000',
'WRD_DOUT' :'0b000000000000000000000011100000000000000',
'MDR_DOUT' :'0b000000000000000000000100000000000000000',
'MEM_DOUT' :'0b000000000000000000000100100000000000000',
'VECT_DOUT' :'0b000000000000000000000101000000000000000',
'REG1_DOUT' :'0b000000000000000000000101100000000000000',
'REG2_DOUT' :'0b000000000000000000000110000000000000000',
'SP_DOUT' :'0b000000000000000000000110100000000000000',
'PC_AOUT' :'0b000000000000000000001000000000000000000',
'MAR_AOUT' :'0b000000000000000000010000000000000000000',
'COND_N' :'0b000000000000000000100000000000000000000',
'COND_Z' :'0b000000000000000001000000000000000000000',
'COND_V' :'0b000000000000000001100000000000000000000',
'COND_C' :'0b000000000000000010000000000000000000000',
'ALU_ADD' :'0b000000000100100100000001100000000000000',
'ALU_ADC' :'0b000000000010100100000001100000000000000',
'ALU_SUB' :'0b000000000000011000000001100000000000000',
'ALU_SBB' :'0b000000000010011000000001100000000000000',
'ALU_AND' :'0b000000000001101100000001100000000000000',
'ALU_OR' :'0b000000000001111000000001100000000000000',
'ALU_XOR' :'0b000000000001011000000001100000000000000',
'ALU_NOT' :'0b000000000001000000000001100000000000000',
'ALU_LSH' :'0b000000000100110000000001100000000000000',
'ALU_RSH' :'0b000000000000000000000010000000000000000',
'ALU_INC' :'0b000000000000000000000001100000000000000',
'ALU_DEC' :'0b000000000100111100000001100000000000000',
'ALU_SEX' :'0b000000000000000000000010100000000000000',
'PC_INC' :'0b000000001000000000000000000000000000000',
'COND_NEG' :'0b000000010000000000000000000000000000000',
'F_ALUIN' :'0b000000100000000000000000000000000000000',
'F_ST' :'0b000001000000000000000000000000000000000',
'MAR_ST' :'0b000010000000000000000000000000000000000',
'MODE_RST' :'0b000100000000000000000000000000000000000',
'MODE_DMA' :'0b001000000000000000000000000000000000000',
'MODE_FLT' :'0b001100000000000000000000000000000000000',
'MODE_FETCH':'0b010000000000000000000000000000000000000',
'IRQ_EN' :'0b100000000000000000000000000000000000000'
}
#path = sys.argv[1]
path = 'mcode.asm'
labels = {}
words = {}
labels2 = {}
lineNum = 0
if path.rsplit('.',1)[1].upper() != 'ASM':
print('Invalid file extension .',path.rsplit('.',1)[1],sep='')
wait = input('Press enter to exit')
exit()
#First pass - Find labels & parse instructions
with open(path) as f:
for line in f:
line = line.strip()
#Find labels
if line != '' and line[0] == '.':
line = line.split('.')[1]
labels[line.split(' ')[0]] = lineNum
print('Label ',line.split(' ')[0],' found @ $',hex(lineNum),sep='')
line = line.split(' ', 1)
if len(line) > 1:
line = line[1]
else:
line = ''
else:
line = line.replace(',',' ')
line = line.split()
#print(line)
currentLine = 0;
instruction = False
label = ''
for word in line:
if word in mcode:
currentLine = int(mcode[word],2)|currentLine
instruction = True
elif word[0] == '$':
word = word.split('$')
lineNum = int(word[1],16)
elif word[0] == ';':
break
else:
if word[0] == '+':
word = word.split('+')
if currentLine >= 1024:
word[1] = int(word[1]) - 1024
currentLine = (int(lineNum)+(int(word[1])))|currentLine
else:
label = word
if instruction == True or label != '':
words[lineNum] = currentLine
if label != '':
labels2[lineNum] = label
lineNum = lineNum + 1
#print()
#print(labels)
#print()
#print(words)
#print()
#print (labels2)
print()
#Create output hex file
path = path.rsplit('.',1)[0] + '.hex'
if os.path.exists(path):
os.remove(path)
f = open(path,'x')
f.write('v2.0 raw\n')
#Second pass - Fill in labels, write to hex file
for line in range(0,2048):
currentLine = 0
if line in words:
#if words[line][0] == '@':
# words[line] = (words[line].split('@'))[1]
# if currentLine:
if line in labels2:
#print(labels[labels2[line]])
if int(labels[labels2[line]]) >= 1024:
words[line] = words[line]|labels[labels2[line]]-1024
else:
words[line] = words[line]|labels[labels2[line]]
currentLine = words[line]
f.write(hex(currentLine))
f.write('\n')
f.close()
wait = input('Press enter to exit')
exit()
| <filename>Microassembly/CPU-16 Micro Assembler.py
##########################################
# ATLAS CPU-16 MICRO-ASSEMBLER #
# WRITTEN BY <NAME>. - 2022 #
##########################################
import re
import sys
import os
mcode = {
'PC_ST' :'0b000000000000000000000000000010000000000',
'OP1_ST' :'0b000000000000000000000000000100000000000',
'OP2_ST' :'0b000000000000000000000000000110000000000',
'IR_ST' :'0b000000000000000000000000001000000000000',
'MDR_ST' :'0b000000000000000000000000001010000000000',
'MEM_ST' :'0b000000000000000000000000001100000000000',
'REG1_ST' :'0b000000000000000000000000001110000000000',
'REG2_ST' :'0b000000000000000000000000010000000000000',
'SP_ST' :'0b000000000000000000000000010010000000000',
'STAT_ST' :'0b000000000000000000000000010100000000000',
'F_DOUT' :'0b000000000000000000000000100000000000000',
'PC_DOUT' :'0b000000000000000000000001000000000000000',
'SWP_DOUT' :'0b000000000000000000000011000000000000000',
'WRD_DOUT' :'0b000000000000000000000011100000000000000',
'MDR_DOUT' :'0b000000000000000000000100000000000000000',
'MEM_DOUT' :'0b000000000000000000000100100000000000000',
'VECT_DOUT' :'0b000000000000000000000101000000000000000',
'REG1_DOUT' :'0b000000000000000000000101100000000000000',
'REG2_DOUT' :'0b000000000000000000000110000000000000000',
'SP_DOUT' :'0b000000000000000000000110100000000000000',
'PC_AOUT' :'0b000000000000000000001000000000000000000',
'MAR_AOUT' :'0b000000000000000000010000000000000000000',
'COND_N' :'0b000000000000000000100000000000000000000',
'COND_Z' :'0b000000000000000001000000000000000000000',
'COND_V' :'0b000000000000000001100000000000000000000',
'COND_C' :'0b000000000000000010000000000000000000000',
'ALU_ADD' :'0b000000000100100100000001100000000000000',
'ALU_ADC' :'0b000000000010100100000001100000000000000',
'ALU_SUB' :'0b000000000000011000000001100000000000000',
'ALU_SBB' :'0b000000000010011000000001100000000000000',
'ALU_AND' :'0b000000000001101100000001100000000000000',
'ALU_OR' :'0b000000000001111000000001100000000000000',
'ALU_XOR' :'0b000000000001011000000001100000000000000',
'ALU_NOT' :'0b000000000001000000000001100000000000000',
'ALU_LSH' :'0b000000000100110000000001100000000000000',
'ALU_RSH' :'0b000000000000000000000010000000000000000',
'ALU_INC' :'0b000000000000000000000001100000000000000',
'ALU_DEC' :'0b000000000100111100000001100000000000000',
'ALU_SEX' :'0b000000000000000000000010100000000000000',
'PC_INC' :'0b000000001000000000000000000000000000000',
'COND_NEG' :'0b000000010000000000000000000000000000000',
'F_ALUIN' :'0b000000100000000000000000000000000000000',
'F_ST' :'0b000001000000000000000000000000000000000',
'MAR_ST' :'0b000010000000000000000000000000000000000',
'MODE_RST' :'0b000100000000000000000000000000000000000',
'MODE_DMA' :'0b001000000000000000000000000000000000000',
'MODE_FLT' :'0b001100000000000000000000000000000000000',
'MODE_FETCH':'0b010000000000000000000000000000000000000',
'IRQ_EN' :'0b100000000000000000000000000000000000000'
}
#path = sys.argv[1]
path = 'mcode.asm'
labels = {}
words = {}
labels2 = {}
lineNum = 0
if path.rsplit('.',1)[1].upper() != 'ASM':
print('Invalid file extension .',path.rsplit('.',1)[1],sep='')
wait = input('Press enter to exit')
exit()
#First pass - Find labels & parse instructions
with open(path) as f:
for line in f:
line = line.strip()
#Find labels
if line != '' and line[0] == '.':
line = line.split('.')[1]
labels[line.split(' ')[0]] = lineNum
print('Label ',line.split(' ')[0],' found @ $',hex(lineNum),sep='')
line = line.split(' ', 1)
if len(line) > 1:
line = line[1]
else:
line = ''
else:
line = line.replace(',',' ')
line = line.split()
#print(line)
currentLine = 0;
instruction = False
label = ''
for word in line:
if word in mcode:
currentLine = int(mcode[word],2)|currentLine
instruction = True
elif word[0] == '$':
word = word.split('$')
lineNum = int(word[1],16)
elif word[0] == ';':
break
else:
if word[0] == '+':
word = word.split('+')
if currentLine >= 1024:
word[1] = int(word[1]) - 1024
currentLine = (int(lineNum)+(int(word[1])))|currentLine
else:
label = word
if instruction == True or label != '':
words[lineNum] = currentLine
if label != '':
labels2[lineNum] = label
lineNum = lineNum + 1
#print()
#print(labels)
#print()
#print(words)
#print()
#print (labels2)
print()
#Create output hex file
path = path.rsplit('.',1)[0] + '.hex'
if os.path.exists(path):
os.remove(path)
f = open(path,'x')
f.write('v2.0 raw\n')
#Second pass - Fill in labels, write to hex file
for line in range(0,2048):
currentLine = 0
if line in words:
#if words[line][0] == '@':
# words[line] = (words[line].split('@'))[1]
# if currentLine:
if line in labels2:
#print(labels[labels2[line]])
if int(labels[labels2[line]]) >= 1024:
words[line] = words[line]|labels[labels2[line]]-1024
else:
words[line] = words[line]|labels[labels2[line]]
currentLine = words[line]
f.write(hex(currentLine))
f.write('\n')
f.close()
wait = input('Press enter to exit')
exit()
| en | 0.333783 | ########################################## # ATLAS CPU-16 MICRO-ASSEMBLER # # WRITTEN BY <NAME>. - 2022 # ########################################## #path = sys.argv[1] #First pass - Find labels & parse instructions #Find labels #print(line) #print() #print(labels) #print() #print(words) #print() #print (labels2) #Create output hex file #Second pass - Fill in labels, write to hex file #if words[line][0] == '@': # words[line] = (words[line].split('@'))[1] # if currentLine: #print(labels[labels2[line]]) | 1.917695 | 2 |
nobrainer/bayesian_utils.py | richford/nobrainer | 2 | 6620974 | <gh_stars>1-10
import numpy as np
import tensorflow as tf
import tensorflow.compat.v2 as tf2
from tensorflow.python.ops import nn_impl
import tensorflow_probability as tfp
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.distributions import (
deterministic as deterministic_lib,
)
from tensorflow_probability.python.distributions import independent as independent_lib
from tensorflow_probability.python.distributions import normal as normal_lib
tfd = tfp.distributions
def default_loc_scale_fn(
is_singular=True,
loc_initializer=tf.keras.initializers.he_normal(),
untransformed_scale_initializer=tf.constant_initializer(0.0001),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None,
weightnorm=False,
):
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + "_loc",
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable,
)
if weightnorm:
g = add_variable_fn(
name=name + "_wn",
shape=shape,
initializer=tf.constant_initializer(1.4142),
constraint=loc_constraint,
regularizer=loc_regularizer,
dtype=dtype,
trainable=trainable,
)
loc_wn = tfp_util.DeferredTensor(
loc, lambda x: (tf.multiply(nn_impl.l2_normalize(x), g))
)
# loc = tfp_util.DeferredTensor(loc, lambda x: (nn_impl.l2_normalize(x)))
if is_singular:
if weightnorm:
return loc_wn, None
else:
return loc, None
untransformed_scale = add_variable_fn(
name=name + "_untransformed_scale",
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable,
)
scale = tfp_util.DeferredTensor(
untransformed_scale,
lambda x: (np.finfo(dtype.as_numpy_dtype).eps + tf.nn.softplus(x)),
)
if weightnorm:
return loc_wn, scale
else:
return loc, scale
return _fn
def default_mean_field_normal_fn(
is_singular=False,
loc_initializer=tf.keras.initializers.he_normal(),
untransformed_scale_initializer=tf.constant_initializer(0.0001),
loc_regularizer=None, # tf.keras.regularizers.l2(), #None
untransformed_scale_regularizer=None, # tf.keras.regularizers.l2(), #None
loc_constraint=None, # tf.keras.constraints.UnitNorm(axis = [0, 1, 2,3]),
untransformed_scale_constraint=None,
weightnorm=False,
):
loc_scale_fn = default_loc_scale_fn(
is_singular=is_singular,
loc_initializer=loc_initializer,
untransformed_scale_initializer=untransformed_scale_initializer,
loc_regularizer=loc_regularizer,
untransformed_scale_regularizer=untransformed_scale_regularizer,
loc_constraint=loc_constraint,
untransformed_scale_constraint=untransformed_scale_constraint,
weightnorm=weightnorm,
)
def _fn(dtype, shape, name, trainable, add_variable_fn):
loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn)
if scale is None:
dist = deterministic_lib.Deterministic(loc=loc)
else:
dist = normal_lib.Normal(loc=loc, scale=scale)
batch_ndims = tf2.size(dist.batch_shape_tensor())
return independent_lib.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return _fn
def divergence_fn_bayesian(prior_std, examples_per_epoch):
def divergence_fn(q, p, _):
log_probs = tfd.LogNormal(0.0, prior_std).log_prob(p.stddev())
out = tfd.kl_divergence(q, p) - tf.reduce_sum(log_probs)
return out / examples_per_epoch
return divergence_fn
def prior_fn_for_bayesian(init_scale_mean=-1, init_scale_std=0.1):
def prior_fn(dtype, shape, name, _, add_variable_fn):
untransformed_scale = add_variable_fn(
name=name + "_untransformed_scale",
shape=(1,),
initializer=tf.compat.v1.initializers.random_normal(
mean=init_scale_mean, stddev=init_scale_std
),
dtype=dtype,
trainable=True,
)
loc = add_variable_fn(
name=name + "_loc",
initializer=tf.keras.initializers.Zeros(),
shape=shape,
dtype=dtype,
trainable=True,
)
scale = 1e-4 + tf.nn.softplus(untransformed_scale)
dist = tfd.Normal(loc=loc, scale=scale)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn
def normal_prior(prior_std=1.0):
"""Defines normal distributions prior for Bayesian neural network."""
def prior_fn(dtype, shape, name, trainable, add_variable_fn):
dist = tfd.Normal(
loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype((prior_std))
)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn
| import numpy as np
import tensorflow as tf
import tensorflow.compat.v2 as tf2
from tensorflow.python.ops import nn_impl
import tensorflow_probability as tfp
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.distributions import (
deterministic as deterministic_lib,
)
from tensorflow_probability.python.distributions import independent as independent_lib
from tensorflow_probability.python.distributions import normal as normal_lib
tfd = tfp.distributions
def default_loc_scale_fn(
is_singular=True,
loc_initializer=tf.keras.initializers.he_normal(),
untransformed_scale_initializer=tf.constant_initializer(0.0001),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None,
weightnorm=False,
):
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + "_loc",
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable,
)
if weightnorm:
g = add_variable_fn(
name=name + "_wn",
shape=shape,
initializer=tf.constant_initializer(1.4142),
constraint=loc_constraint,
regularizer=loc_regularizer,
dtype=dtype,
trainable=trainable,
)
loc_wn = tfp_util.DeferredTensor(
loc, lambda x: (tf.multiply(nn_impl.l2_normalize(x), g))
)
# loc = tfp_util.DeferredTensor(loc, lambda x: (nn_impl.l2_normalize(x)))
if is_singular:
if weightnorm:
return loc_wn, None
else:
return loc, None
untransformed_scale = add_variable_fn(
name=name + "_untransformed_scale",
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable,
)
scale = tfp_util.DeferredTensor(
untransformed_scale,
lambda x: (np.finfo(dtype.as_numpy_dtype).eps + tf.nn.softplus(x)),
)
if weightnorm:
return loc_wn, scale
else:
return loc, scale
return _fn
def default_mean_field_normal_fn(
is_singular=False,
loc_initializer=tf.keras.initializers.he_normal(),
untransformed_scale_initializer=tf.constant_initializer(0.0001),
loc_regularizer=None, # tf.keras.regularizers.l2(), #None
untransformed_scale_regularizer=None, # tf.keras.regularizers.l2(), #None
loc_constraint=None, # tf.keras.constraints.UnitNorm(axis = [0, 1, 2,3]),
untransformed_scale_constraint=None,
weightnorm=False,
):
loc_scale_fn = default_loc_scale_fn(
is_singular=is_singular,
loc_initializer=loc_initializer,
untransformed_scale_initializer=untransformed_scale_initializer,
loc_regularizer=loc_regularizer,
untransformed_scale_regularizer=untransformed_scale_regularizer,
loc_constraint=loc_constraint,
untransformed_scale_constraint=untransformed_scale_constraint,
weightnorm=weightnorm,
)
def _fn(dtype, shape, name, trainable, add_variable_fn):
loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn)
if scale is None:
dist = deterministic_lib.Deterministic(loc=loc)
else:
dist = normal_lib.Normal(loc=loc, scale=scale)
batch_ndims = tf2.size(dist.batch_shape_tensor())
return independent_lib.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return _fn
def divergence_fn_bayesian(prior_std, examples_per_epoch):
def divergence_fn(q, p, _):
log_probs = tfd.LogNormal(0.0, prior_std).log_prob(p.stddev())
out = tfd.kl_divergence(q, p) - tf.reduce_sum(log_probs)
return out / examples_per_epoch
return divergence_fn
def prior_fn_for_bayesian(init_scale_mean=-1, init_scale_std=0.1):
def prior_fn(dtype, shape, name, _, add_variable_fn):
untransformed_scale = add_variable_fn(
name=name + "_untransformed_scale",
shape=(1,),
initializer=tf.compat.v1.initializers.random_normal(
mean=init_scale_mean, stddev=init_scale_std
),
dtype=dtype,
trainable=True,
)
loc = add_variable_fn(
name=name + "_loc",
initializer=tf.keras.initializers.Zeros(),
shape=shape,
dtype=dtype,
trainable=True,
)
scale = 1e-4 + tf.nn.softplus(untransformed_scale)
dist = tfd.Normal(loc=loc, scale=scale)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn
def normal_prior(prior_std=1.0):
"""Defines normal distributions prior for Bayesian neural network."""
def prior_fn(dtype, shape, name, trainable, add_variable_fn):
dist = tfd.Normal(
loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype((prior_std))
)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn | en | 0.424684 | Creates `loc`, `scale` parameters. # loc = tfp_util.DeferredTensor(loc, lambda x: (nn_impl.l2_normalize(x))) # tf.keras.regularizers.l2(), #None # tf.keras.regularizers.l2(), #None # tf.keras.constraints.UnitNorm(axis = [0, 1, 2,3]), Defines normal distributions prior for Bayesian neural network. | 2.18167 | 2 |
tests/models/test_group.py | SungardAS/porper-core | 0 | 6620975 |
import sys
sys.path.append('../../porper')
import os
region = os.environ.get('AWS_DEFAULT_REGION')
import boto3
dynamodb = boto3.resource('dynamodb',region_name=region)
from models.group import Group
group = Group(dynamodb)
params = {'name': 'public'}
group.create(params)
params = {'id': '1234', 'name': 'new'}
group.create(params)
params = {'id': 'abcd', 'name': 'old'}
group.create(params)
params = {'id': '1234'}
group.find(params)
params = {'ids': ['1234', 'tt']}
group.find(params)
params = {}
group.find(params)
|
import sys
sys.path.append('../../porper')
import os
region = os.environ.get('AWS_DEFAULT_REGION')
import boto3
dynamodb = boto3.resource('dynamodb',region_name=region)
from models.group import Group
group = Group(dynamodb)
params = {'name': 'public'}
group.create(params)
params = {'id': '1234', 'name': 'new'}
group.create(params)
params = {'id': 'abcd', 'name': 'old'}
group.create(params)
params = {'id': '1234'}
group.find(params)
params = {'ids': ['1234', 'tt']}
group.find(params)
params = {}
group.find(params)
| none | 1 | 2.16917 | 2 | |
app/libs/face_detection/tests/test_face_detection_facade.py | guischroeder/hackattic-basic-face-detection | 1 | 6620976 | <reponame>guischroeder/hackattic-basic-face-detection
import requests
from io import BytesIO
from app.libs.hackattic.hackattic_service import HackatticService
from app.libs.aws.aws_client import AWSClient
from app.libs.aws.s3_service import S3Service
from app.libs.aws.rekognition_service import RekognitionService
from app.libs.face_detection.positions_service import PositionsService
from app.libs.face_detection.image_service import ImageService
from app.libs.face_detection.face_detection_facade import FaceDetectionFacade
from app.helpers.create_test_image import create_test_image
def build_facade(s3, rekognition):
hackattic_service = HackatticService({"access_token": ""})
s3_service = S3Service(s3)
rekognition_service = RekognitionService(rekognition)
positions_service = PositionsService()
image_service = ImageService(positions_service, s3_service)
return FaceDetectionFacade(
hackattic_service=hackattic_service,
s3_service=s3_service,
rekognition_service=rekognition_service,
positions_service=positions_service,
image_service=image_service,
)
def mock_s3(mocker, image):
return mocker.Mock(
put_object=lambda Bucket, Key, Body: {},
get_object=lambda Bucket, Key: {"Body": image},
)
def mock_rekognition(mocker, data):
return mocker.Mock(detect_faces=lambda Image: data)
def mock_requests_get(mocker):
mocker.patch(
"requests.get", return_value=mocker.Mock(json=lambda: {"image_url": ""},),
)
def test_show_detect_faces(mocker, data):
mock_requests_get(mocker)
image = create_test_image()
s3_client = mocker.Mock(get_instance=lambda: mock_s3(mocker, image))
rekognition_client = mocker.Mock(
get_instance=lambda: mock_rekognition(mocker, data)
)
face_detection_facade = build_facade(s3_client, rekognition_client)
get_image_url = mocker.spy(
face_detection_facade._hackattic_service, "get_image_url"
)
upload_image_from_url = mocker.spy(
face_detection_facade._s3_service, "upload_image_from_url"
)
detect_faces = mocker.spy(
face_detection_facade._rekognition_service, "detect_faces"
)
find_positions = mocker.spy(
face_detection_facade._positions_service, "find_positions"
)
final_image = face_detection_facade.show_detected_faces()
assert isinstance(final_image, BytesIO)
assert final_image != image
assert find_positions.spy_return == [[2, 3], [0, 6], [4, 7]]
get_image_url.assert_called_once()
upload_image_from_url.assert_called_once()
detect_faces.assert_called_once()
def test_solve_problem(mocker, data):
mock_requests_get(mocker)
s3_client = mocker.Mock(get_instance=lambda: mock_s3(mocker, {}))
rekognition_client = mocker.Mock(
get_instance=lambda: mock_rekognition(mocker, data)
)
face_detection_facade = build_facade(s3_client, rekognition_client)
send_result = mocker.spy(face_detection_facade._hackattic_service, "send_result")
face_detection_facade.solve_problem()
send_result.assert_called_once_with([[2, 3], [0, 6], [4, 7]])
| import requests
from io import BytesIO
from app.libs.hackattic.hackattic_service import HackatticService
from app.libs.aws.aws_client import AWSClient
from app.libs.aws.s3_service import S3Service
from app.libs.aws.rekognition_service import RekognitionService
from app.libs.face_detection.positions_service import PositionsService
from app.libs.face_detection.image_service import ImageService
from app.libs.face_detection.face_detection_facade import FaceDetectionFacade
from app.helpers.create_test_image import create_test_image
def build_facade(s3, rekognition):
hackattic_service = HackatticService({"access_token": ""})
s3_service = S3Service(s3)
rekognition_service = RekognitionService(rekognition)
positions_service = PositionsService()
image_service = ImageService(positions_service, s3_service)
return FaceDetectionFacade(
hackattic_service=hackattic_service,
s3_service=s3_service,
rekognition_service=rekognition_service,
positions_service=positions_service,
image_service=image_service,
)
def mock_s3(mocker, image):
return mocker.Mock(
put_object=lambda Bucket, Key, Body: {},
get_object=lambda Bucket, Key: {"Body": image},
)
def mock_rekognition(mocker, data):
return mocker.Mock(detect_faces=lambda Image: data)
def mock_requests_get(mocker):
mocker.patch(
"requests.get", return_value=mocker.Mock(json=lambda: {"image_url": ""},),
)
def test_show_detect_faces(mocker, data):
mock_requests_get(mocker)
image = create_test_image()
s3_client = mocker.Mock(get_instance=lambda: mock_s3(mocker, image))
rekognition_client = mocker.Mock(
get_instance=lambda: mock_rekognition(mocker, data)
)
face_detection_facade = build_facade(s3_client, rekognition_client)
get_image_url = mocker.spy(
face_detection_facade._hackattic_service, "get_image_url"
)
upload_image_from_url = mocker.spy(
face_detection_facade._s3_service, "upload_image_from_url"
)
detect_faces = mocker.spy(
face_detection_facade._rekognition_service, "detect_faces"
)
find_positions = mocker.spy(
face_detection_facade._positions_service, "find_positions"
)
final_image = face_detection_facade.show_detected_faces()
assert isinstance(final_image, BytesIO)
assert final_image != image
assert find_positions.spy_return == [[2, 3], [0, 6], [4, 7]]
get_image_url.assert_called_once()
upload_image_from_url.assert_called_once()
detect_faces.assert_called_once()
def test_solve_problem(mocker, data):
mock_requests_get(mocker)
s3_client = mocker.Mock(get_instance=lambda: mock_s3(mocker, {}))
rekognition_client = mocker.Mock(
get_instance=lambda: mock_rekognition(mocker, data)
)
face_detection_facade = build_facade(s3_client, rekognition_client)
send_result = mocker.spy(face_detection_facade._hackattic_service, "send_result")
face_detection_facade.solve_problem()
send_result.assert_called_once_with([[2, 3], [0, 6], [4, 7]]) | none | 1 | 2.173446 | 2 | |
unitconv/__init__.py | facundobatista/unitconv | 2 | 6620977 | # Copyright 2010-2018 Canonical Ltd.
# Copyright 2020 <NAME>
# All Rights Reserved
"""A units converter."""
import collections
import itertools
import logging
import math
import random
import re
import sys
import pint
__all__ = ['convert']
logger = logging.getLogger(__name__)
_ureg = pint.UnitRegistry()
UnitInfo = collections.namedtuple("UnitInfo", "mult unit human_single human_plural")
# crazy regex to match a number; this comes from the Python's Decimal code,
# adapted to support also commas
RE_NUMBER = r""" # A numeric string consists of:
(?=\d|\.\d|\,\d) # starts with a number or a point/comma
(?P<int>\d*) # having a (possibly empty) integer part
((\.|\,)(?P<frac>\d*))? # followed by an optional fractional part
((e|E)(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
"""
# supported units by the system; the key is the reference name, its
# multiplier (if any) and the pint unit
SUPPORTED_UNITS = {
'are': (None, _ureg.are),
'celsius': (None, _ureg.degC),
'centimeter': (None, _ureg.centimeter),
'cubic_centimeter': (None, _ureg.centimeter ** 3),
'cubic_foot': (None, _ureg.feet ** 3),
'cubic_inch': (None, _ureg.inch ** 3),
'cubic_kilometer': (None, _ureg.kilometer ** 3),
'cubic_meter': (None, _ureg.meter ** 3),
'cubic_mile': (None, _ureg.mile ** 3),
'cubic_yard': (None, _ureg.yard ** 3),
'cup': (None, _ureg.cup),
'day': (None, _ureg.day),
'fahrenheit': (None, _ureg.degF),
'fluid_ounce': (None, _ureg.floz),
'foot': (None, _ureg.feet),
'gallon': (None, _ureg.gallon),
'gram': (None, _ureg.grams),
'hectare': (100, _ureg.are),
'hour': (None, _ureg.hour),
'inch': (None, _ureg.inch),
'kelvin': (None, _ureg.degK),
'kilogram': (None, _ureg.kilogram),
'kilometer': (None, _ureg.kilometer),
'litre': (None, _ureg.litres),
'meter': (None, _ureg.meter),
'metric_ton': (None, _ureg.metric_ton),
'mile': (None, _ureg.mile),
'milligram': (.001, _ureg.gram),
'millilitre': (.001, _ureg.litre),
'minute': (None, _ureg.minute),
'month': (None, _ureg.month),
'ounce': (None, _ureg.oz),
'pint': (None, _ureg.pint),
'pound': (None, _ureg.pound),
'quart': (None, _ureg.quart),
'second': (None, _ureg.second),
'short_ton': (None, _ureg.ton),
'square_centimeter': (None, _ureg.centimeter ** 2),
'square_foot': (None, _ureg.feet ** 2),
'square_inch': (None, _ureg.inch ** 2),
'square_kilometer': (None, _ureg.kilometer ** 2),
'square_meter': (None, _ureg.meter ** 2),
'square_mile': (None, _ureg.mile ** 2),
'square_yard': (None, _ureg.yard ** 2),
'tablespoon': (None, _ureg.tablespoon),
'teaspoon': (None, _ureg.teaspoon),
'week': (None, _ureg.week),
'yard': (None, _ureg.yard),
'year': (None, _ureg.year),
}
# unit symbols (not to be translated), indicating the symbol, the supported
# unit name, and if it's linear (so we add area and volume postfixes)
UNIT_SYMBOLS = [
('c', 'celsius', False),
('c', 'cup', False),
('cc', 'cubic_centimeter', False),
('cm', 'centimeter', True),
('d', 'day', False),
('f', 'fahrenheit', False),
('f', 'foot', True),
('ft', 'foot', True),
('g', 'gram', False),
('h', 'hour', False),
('in', 'inch', True),
('k', 'kelvin', False),
('kg', 'kilogram', False),
('km', 'kilometer', True),
('l', 'litre', False),
('m', 'meter', True),
('m', 'month', False),
('mg', 'milligram', False),
('mi', 'mile', True),
('ml', 'millilitre', False),
('s', 'second', False),
('t', 'metric_ton', False),
('w', 'week', False),
('y', 'yard', True),
('y', 'year', False),
('°c', 'celsius', False),
('°f', 'fahrenheit', False),
]
# synonyms, abbreviations, and other names for same unit; and also
# multi-word conversions
EXTRA_UNITS_INPUT = [
('ares', 'are'),
('centimeters', 'centimeter'),
('cubic centimeter', 'cubic_centimeter'),
('cubic centimeters', 'cubic_centimeter'),
('cubic cm', 'cubic_centimeter'),
('cubic feet', 'cubic_foot'),
('cubic foot', 'cubic_foot'),
('cubic ft', 'cubic_foot'),
('cubic in', 'cubic_inch'),
('cubic inch', 'cubic_inch'),
('cubic inches', 'cubic_inch'),
('cubic kilometer', 'cubic_kilometer'),
('cubic kilometers', 'cubic_kilometer'),
('cubic km', 'cubic_kilometer'),
('cubic m', 'cubic_meter'),
('cubic meter', 'cubic_meter'),
('cubic meters', 'cubic_meter'),
('cubic mi', 'cubic_mile'),
('cubic mile', 'cubic_mile'),
('cubic miles', 'cubic_mile'),
('cubic y', 'cubic_yard'),
('cubic yard', 'cubic_yard'),
('cubic yards', 'cubic_yard'),
('cups', 'cup'),
('days', 'day'),
('feet', 'foot'),
('floz', 'fluid_ounce'),
('flozs', 'fluid_ounce'),
('fluid ounce', 'fluid_ounce'),
('fluid ounces', 'fluid_ounce'),
('gal', 'gallon'),
('gallons', 'gallon'),
('grams', 'gram'),
('hectares', 'hectare'),
('hours', 'hour'),
('inches', 'inch'),
('kilograms', 'kilogram'),
('kilometers', 'kilometer'),
('lb', 'pound'),
('lbs', 'pound'),
('liter', 'litre'),
('liters', 'litre'),
('litres', 'litre'),
('meters', 'meter'),
('metric ton', 'metric_ton'),
('metric tons', 'metric_ton'),
('miles', 'mile'),
('milligrams', 'milligram'),
('milliliter', 'millilitre'),
('milliliters', 'millilitre'),
('millilitres', 'millilitre'),
('min', 'minute'),
('minutes', 'minute'),
('months', 'month'),
('ounce', 'fluid_ounce'),
('ounces', 'fluid_ounce'),
('ounces', 'ounce'),
('oz', 'fluid_ounce'),
('oz', 'ounce'),
('ozs', 'fluid_ounce'),
('ozs', 'ounce'),
('pints', 'pint'),
('pounds', 'pound'),
('qt', 'quart'),
('qts', 'quart'),
('quarts', 'quart'),
('sec', 'second'),
('seconds', 'second'),
('short ton', 'short_ton'),
('short tons', 'short_ton'),
('sq centimeter', 'square_centimeter'),
('sq centimeters', 'square_centimeter'),
('sq cm', 'square_centimeter'),
('sq feet', 'square_foot'),
('sq foot', 'square_foot'),
('sq ft', 'square_foot'),
('sq in', 'square_inch'),
('sq inch', 'square_inch'),
('sq inches', 'square_inch'),
('sq kilometer', 'square_kilometer'),
('sq kilometers', 'square_kilometer'),
('sq km', 'square_kilometer'),
('sq m', 'square_meter'),
('sq meter', 'square_meter'),
('sq meters', 'square_meter'),
('sq mi', 'square_mile'),
('sq mile', 'square_mile'),
('sq miles', 'square_mile'),
('sq y', 'square_yard'),
('sq yard', 'square_yard'),
('sq yards', 'square_yard'),
('square centimeter', 'square_centimeter'),
('square centimeters', 'square_centimeter'),
('square cm', 'square_centimeter'),
('square feet', 'square_foot'),
('square foot', 'square_foot'),
('square ft', 'square_foot'),
('square in', 'square_inch'),
('square inch', 'square_inch'),
('square inches', 'square_inch'),
('square kilometer', 'square_kilometer'),
('square kilometers', 'square_kilometer'),
('square km', 'square_kilometer'),
('square m', 'square_meter'),
('square meter', 'square_meter'),
('square meters', 'square_meter'),
('square mi', 'square_mile'),
('square mile', 'square_mile'),
('square miles', 'square_mile'),
('square y', 'square_yard'),
('square yard', 'square_yard'),
('square yards', 'square_yard'),
('tablespoons', 'tablespoon'),
('tbs', 'tablespoon'),
('tbsp', 'tablespoon'),
('teaspoons', 'teaspoon'),
('ton', 'short_ton'),
('tonne', 'metric_ton'),
('ts', 'teaspoon'),
('tsp', 'teaspoon'),
('weeks', 'week'),
('yards', 'yard'),
('years', 'year'),
]
# human unit representation for outputs to the user
UNITS_OUTPUT = {
'are': ('{} are', '{} ares'),
'celsius': ('{}°C', '{}°C'),
'centimeter': ('{} centimeter', '{} centimeters'),
'cubic_centimeter': ('{} cubic centimeter', '{} cubic centimeters'),
'cubic_foot': ('{} cubic foot', '{} cubic feet'),
'cubic_inch': ('{} cubic inch', '{} cubic inches'),
'cubic_kilometer': ('{} cubic kilometer', '{} cubic kilometers'),
'cubic_meter': ('{} cubic meter', '{} cubic meters'),
'cubic_mile': ('{} cubic mile', '{} cubic miles'),
'cubic_yard': ('{} cubic yard', '{} cubic yards'),
'cup': ('{} US cup', '{} US cups'),
'day': ('{} day', '{} days'),
'fahrenheit': ('{}°F', '{}°F'),
'fluid_ounce': ('{} US fluid ounce', '{} US fluid ounces'),
'foot': ('{} foot', '{} feet'),
'gallon': ('{} US gallon', '{} US gallons'),
'gram': ('{} gram', '{} grams'),
'hectare': ('{} hectare', '{} hectares'),
'hour': ('{} hour', '{} hours'),
'inch': ('{} inch', '{} inches'),
'kelvin': ('{}K', '{}K'),
'kilogram': ('{} kilogram', '{} kilograms'),
'kilometer': ('{} kilometer', '{} kilometers'),
'litre': ('{} litre', '{} litres'),
'meter': ('{} meter', '{} meters'),
'metric_ton': ('{} metric ton', '{} metric tons'),
'mile': ('{} mile', '{} miles'),
'milligram': ('{} milligram', '{} milligrams'),
'millilitre': ('{} millilitre', '{} millilitres'),
'minute': ('{} minute', '{} minutes'),
'month': ('{} month', '{} months'),
'ounce': ('{} ounce', '{} ounces'),
'pint': ('{} US pint', '{} US pints'),
'pound': ('{} pound', '{} pounds'),
'quart': ('{} quart', '{} quarts'),
'second': ('{} second', '{} seconds'),
'square_centimeter': ('{} square centimeter', '{} square centimeters'),
'square_foot': ('{} square foot', '{} square feet'),
'square_inch': ('{} square inch', '{} square inches'),
'square_kilometer': ('{} square kilometer', '{} square kilometers'),
'square_meter': ('{} square meter', '{} square meters'),
'square_mile': ('{} square mile', '{} square miles'),
'square_yard': ('{} square yard', '{} square yards'),
'tablespoon': ('{} US tablespoon', '{} US tablespoons'),
'teaspoon': ('{} US teaspoon', '{} US teaspoons'),
'short_ton': ('{} short ton', '{} short tons'),
'week': ('{} week', '{} weeks'),
'yard': ('{} yard', '{} yards'),
'year': ('{} year', '{} years'),
}
# normal connectors in user input
CONNECTORS = [
'to',
'in',
]
# facts list to provide useful/fun information about numbers
NUMBERS_INFO = [
(3.2, 'meters', 'wingspan', 'a large andean condor'),
(5.5, 'meters', 'length', 'a white wale'),
(41, 'centimeters', 'height', 'a blue penguin'),
(146, 'meters', 'height', 'the Great Pyramid of Giza'),
(113, 'km/h', 'top speed', 'a cheetah'),
(3475, 'kilometers', 'diameter', 'the Moon'),
(1600, 'kilograms', 'weight', ' a white wale'),
(8850, 'meters', 'height', 'Mount Everest'),
(5500, '°C', 'temperature', 'the surface of the Sun'),
(12756, 'kilometers', 'diameter', 'the Earth'),
(6430, 'kilometers', 'lenght', 'the Great Wall of China'),
(100, '°C', 'temperature', 'boiling water'),
]
# we will not always select the best match for number info (as it will be
# too repeated), but will select randomly between the top N:
NUMBERS_UNCERTAINTY = 3
# table to suggest a second unit; general rules are:
# - if it's temperature, just go celsius<->fahrenheit
# - if it's time, go to a lower unit, but not immediate one (which is
# so easy that user shouldn't need it the unit conversor)
# - for the rest, just go imperial<->metric, using a similar size unit
SUGGESTED_SECOND_UNIT = {
'are': 'square_yard',
'celsius': 'fahrenheit',
'centimeter': 'inch',
'cubic_centimeter': 'fluid_ounce',
'cubic_foot': 'litre',
'cubic_inch': 'millilitre',
'cubic_kilometer': 'cubic_mile',
'cubic_meter': 'cubic_yard',
'cubic_mile': 'cubic_kilometer',
'cubic_yard': 'cubic_meter',
'cup': 'millilitre',
'day': 'hour',
'fahrenheit': 'celsius',
'fluid_ounce': 'millilitre',
'foot': 'meter',
'gallon': 'litre',
'gram': 'ounce',
'hectare': 'square_mile',
'hour': 'second',
'inch': 'centimeter',
'kilogram': 'pound',
'kilometer': 'mile',
'litre': 'gallon',
'meter': 'yard',
'mile': 'kilometer',
'minute': 'second',
'month': 'day',
'ounce': 'gram',
'pint': 'litre',
'pound': 'kilogram',
'quart': 'litre',
'square_centimeter': 'square_inch',
'square_foot': 'square_meter',
'square_inch': 'square_centimeter',
'square_kilometer': 'square_mile',
'square_meter': 'square_foot',
'square_mile': 'square_kilometer',
'square_yard': 'square_meter',
'tablespoon': 'millilitre',
'teaspoon': 'millilitre',
'week': 'hour',
'yard': 'meter',
'year': 'day',
}
class _UnitManager(object):
"""A unique class to hold all units mambo jambo."""
def __init__(self):
# generate the main unit conversion structure
self._units = _u = {k: [k] for k in SUPPORTED_UNITS}
for name, syn in EXTRA_UNITS_INPUT:
_u.setdefault(name, []).append(syn)
for symbol, unit, linear in UNIT_SYMBOLS:
_u.setdefault(symbol, []).append(unit)
if linear:
_u[symbol + 'SUPERSCRIPT_TWO'] = _u['square_' + unit]
_u[symbol + 'SUPERSCRIPT_THREE'] = _u['cubic_' + unit]
# generate the useful tokens
_all_tokens = set(itertools.chain(_u.keys(), CONNECTORS))
self.useful_tokens = sorted(_all_tokens, key=len, reverse=True)
# generate the complex units conversion
_c = ((k, v) for k, v in EXTRA_UNITS_INPUT if ' ' in k)
self.complex_units = sorted(_c, key=lambda x: len(x[0]), reverse=True)
# the connectors
self.connectors = CONNECTORS
def get_units_info(self, unit_token_from, unit_token_to):
"""Return the info for the unit."""
base_units_from = self._units[unit_token_from]
base_units_to = self._units[unit_token_to]
useful = []
for b_u_from in base_units_from:
for b_u_to in base_units_to:
mult_from, u_from = SUPPORTED_UNITS[b_u_from]
mult_to, u_to = SUPPORTED_UNITS[b_u_to]
if u_from.dimensionality == u_to.dimensionality:
h_from_s, h_from_p = UNITS_OUTPUT[b_u_from]
h_to_s, h_to_p = UNITS_OUTPUT[b_u_to]
useful.append((UnitInfo(mult_from, u_from, h_from_s, h_from_p),
UnitInfo(mult_to, u_to, h_to_s, h_to_p)))
# return units info if there's a nice crossing and no ambiguity
if len(useful) == 1:
return useful[0]
def suggest(self, unit_token_from):
"""Suggest a second destination unit."""
base_units_from = self._units[unit_token_from]
for b_u_from in base_units_from:
if b_u_from in SUGGESTED_SECOND_UNIT:
return SUGGESTED_SECOND_UNIT[b_u_from]
unit_manager = _UnitManager()
def _numbers_info(number):
"""Provide useful/fun info about some numbers."""
results = []
for value, unit, dimension, target in NUMBERS_INFO:
msg = None
vals = locals()
if value * .4 <= number <= value * .6:
msg = "{number} {unit} is about half of the {dimension} of {target}"
elif value * .9 <= number <= value * 1.1:
msg = "{number} {unit} is close to the {dimension} of {target}"
elif value * 1.7 <= number <= value * 100:
vals['mult'] = int(round(number / value))
msg = "{number} {unit} is around {mult} times the {dimension} of {target}"
if msg is not None:
text = msg.format(**vals)
distance = abs(math.log10(number) - math.log10(value))
results.append((distance, text))
if results:
return random.choice([x[1] for x in sorted(results)[:NUMBERS_UNCERTAINTY]])
def parse_number(m):
"""Return a float from a match of the regex above."""
intpart, fracpart, expart = m.group('int', 'frac', 'exp')
if intpart:
result = int(intpart)
else:
result = 0
if fracpart:
result += float(fracpart) / 10 ** len(fracpart)
if expart:
result *= 10 ** int(expart)
return result
def convert(source):
"""Parse and convert the units found in the source text."""
logger.debug("Input: %r", source)
text = source.strip().lower()
# normalize square and cubic combinations
text = re.sub(r" *?\*\* *?2| *?\^ *?2|(?<=[a-zA-Z])2|²", 'SUPERSCRIPT_TWO', text)
text = re.sub(r" *?\*\* *?3| *?\^ *?3|(?<=[a-zA-Z])3|³", 'SUPERSCRIPT_THREE', text)
# replace the complex units to something useful
for cu, real in unit_manager.complex_units:
text = re.sub(cu, real, text)
logger.debug("Preconverted: %r", text)
m = re.search(RE_NUMBER, text, re.VERBOSE)
if not m:
logger.debug("OOPS, not number found")
return
number = parse_number(m)
num_start, num_end = m.span()
logger.debug("Number: %r (limit=%s)", number, m.span())
tokens = []
found_tokens_before = False
for part in re.split(r'\W', text[:num_start], re.UNICODE):
for token in unit_manager.useful_tokens:
if part == token:
found_tokens_before = True
tokens.append(token)
break
for part in re.split(r'\W', text[num_end:], re.UNICODE):
for token in unit_manager.useful_tokens:
if part == token:
tokens.append(token)
logger.debug("Tokens found: %s", tokens)
if len(tokens) == 0:
# only give number info if the number is alone
if num_end - num_start == len(text.strip()):
ni = _numbers_info(number)
logger.debug("Numbers info: %r", ni)
return ni
else:
return
if len(tokens) == 1:
# suggest the second unit
suggested = unit_manager.suggest(tokens[0])
if suggested is None:
return
# use suggested unit and assure it's the destination one
logger.debug("Suggesting 2nd unit: %r", suggested)
tokens.append(suggested)
found_tokens_before = False
if len(tokens) > 2:
for conn in unit_manager.connectors:
if conn in tokens:
tokens.remove(conn)
if len(tokens) == 2:
break
else:
logger.debug("OOPS, not enough tokens")
return
logger.debug("Tokens filtered: %s", tokens)
if not found_tokens_before:
# everything is after the number
t_from_pos = 0
t_to_pos = 1
else:
t_from_pos = 1
t_to_pos = 0
logger.debug("Token selector: from=%s to=%s", t_from_pos, t_to_pos)
t_from = tokens[t_from_pos]
t_to = tokens[t_to_pos]
units_info = unit_manager.get_units_info(t_from, t_to)
if units_info is None:
logger.debug("OOPS, no matching units")
return
unit_from, unit_to = units_info
to_convert = _ureg.Quantity(number, unit_from.unit)
if unit_from.mult is not None:
to_convert *= unit_from.mult
try:
converted = to_convert.to(unit_to.unit)
except pint.unit.DimensionalityError:
logger.debug("OOPS, dimensionality error")
return
if unit_to.mult is not None:
converted /= unit_to.mult
logger.debug("Converted: %r", converted)
rounded = round(converted.magnitude, 4)
human_from, human_to = unit_from.human_plural, unit_to.human_plural
# care about result formatting
if isinstance(rounded, int) or rounded.is_integer():
if rounded == 1:
human_to = unit_to.human_single
nicer_res = str(int(rounded))
else:
nicer_res = "%.4f" % rounded
# as it's not an integer, remove extra 0s at the right
while nicer_res[-1] == '0':
nicer_res = nicer_res[:-1]
logger.debug("Nicer number: %r", nicer_res)
# care about source formatting
if number == 1:
human_from = unit_from.human_single
if isinstance(number, float) and number.is_integer():
nicer_orig = str(int(number))
else:
nicer_orig = str(number)
return human_from.format(nicer_orig) + ' = ' + human_to.format(nicer_res)
USAGE = """
Usage: unitconv <expression>
ej: unitconv 42 km to miles
"""
def main():
"""Main entry point to run as script. Use `convert` instead if as module."""
params = sys.argv[1:]
if params:
print(convert(" ".join(params)))
else:
print(USAGE)
if __name__ == '__main__':
# set up logging so it's easier to debug
logger.setLevel(logging.DEBUG)
h = logging.StreamHandler()
h.setLevel(logging.DEBUG)
logger.addHandler(h)
print("Response:", convert(" ".join(sys.argv[1:])))
| # Copyright 2010-2018 Canonical Ltd.
# Copyright 2020 <NAME>
# All Rights Reserved
"""A units converter."""
import collections
import itertools
import logging
import math
import random
import re
import sys
import pint
__all__ = ['convert']
logger = logging.getLogger(__name__)
_ureg = pint.UnitRegistry()
UnitInfo = collections.namedtuple("UnitInfo", "mult unit human_single human_plural")
# crazy regex to match a number; this comes from the Python's Decimal code,
# adapted to support also commas
RE_NUMBER = r""" # A numeric string consists of:
(?=\d|\.\d|\,\d) # starts with a number or a point/comma
(?P<int>\d*) # having a (possibly empty) integer part
((\.|\,)(?P<frac>\d*))? # followed by an optional fractional part
((e|E)(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
"""
# supported units by the system; the key is the reference name, its
# multiplier (if any) and the pint unit
SUPPORTED_UNITS = {
'are': (None, _ureg.are),
'celsius': (None, _ureg.degC),
'centimeter': (None, _ureg.centimeter),
'cubic_centimeter': (None, _ureg.centimeter ** 3),
'cubic_foot': (None, _ureg.feet ** 3),
'cubic_inch': (None, _ureg.inch ** 3),
'cubic_kilometer': (None, _ureg.kilometer ** 3),
'cubic_meter': (None, _ureg.meter ** 3),
'cubic_mile': (None, _ureg.mile ** 3),
'cubic_yard': (None, _ureg.yard ** 3),
'cup': (None, _ureg.cup),
'day': (None, _ureg.day),
'fahrenheit': (None, _ureg.degF),
'fluid_ounce': (None, _ureg.floz),
'foot': (None, _ureg.feet),
'gallon': (None, _ureg.gallon),
'gram': (None, _ureg.grams),
'hectare': (100, _ureg.are),
'hour': (None, _ureg.hour),
'inch': (None, _ureg.inch),
'kelvin': (None, _ureg.degK),
'kilogram': (None, _ureg.kilogram),
'kilometer': (None, _ureg.kilometer),
'litre': (None, _ureg.litres),
'meter': (None, _ureg.meter),
'metric_ton': (None, _ureg.metric_ton),
'mile': (None, _ureg.mile),
'milligram': (.001, _ureg.gram),
'millilitre': (.001, _ureg.litre),
'minute': (None, _ureg.minute),
'month': (None, _ureg.month),
'ounce': (None, _ureg.oz),
'pint': (None, _ureg.pint),
'pound': (None, _ureg.pound),
'quart': (None, _ureg.quart),
'second': (None, _ureg.second),
'short_ton': (None, _ureg.ton),
'square_centimeter': (None, _ureg.centimeter ** 2),
'square_foot': (None, _ureg.feet ** 2),
'square_inch': (None, _ureg.inch ** 2),
'square_kilometer': (None, _ureg.kilometer ** 2),
'square_meter': (None, _ureg.meter ** 2),
'square_mile': (None, _ureg.mile ** 2),
'square_yard': (None, _ureg.yard ** 2),
'tablespoon': (None, _ureg.tablespoon),
'teaspoon': (None, _ureg.teaspoon),
'week': (None, _ureg.week),
'yard': (None, _ureg.yard),
'year': (None, _ureg.year),
}
# unit symbols (not to be translated), indicating the symbol, the supported
# unit name, and if it's linear (so we add area and volume postfixes)
UNIT_SYMBOLS = [
('c', 'celsius', False),
('c', 'cup', False),
('cc', 'cubic_centimeter', False),
('cm', 'centimeter', True),
('d', 'day', False),
('f', 'fahrenheit', False),
('f', 'foot', True),
('ft', 'foot', True),
('g', 'gram', False),
('h', 'hour', False),
('in', 'inch', True),
('k', 'kelvin', False),
('kg', 'kilogram', False),
('km', 'kilometer', True),
('l', 'litre', False),
('m', 'meter', True),
('m', 'month', False),
('mg', 'milligram', False),
('mi', 'mile', True),
('ml', 'millilitre', False),
('s', 'second', False),
('t', 'metric_ton', False),
('w', 'week', False),
('y', 'yard', True),
('y', 'year', False),
('°c', 'celsius', False),
('°f', 'fahrenheit', False),
]
# synonyms, abbreviations, and other names for same unit; and also
# multi-word conversions
EXTRA_UNITS_INPUT = [
('ares', 'are'),
('centimeters', 'centimeter'),
('cubic centimeter', 'cubic_centimeter'),
('cubic centimeters', 'cubic_centimeter'),
('cubic cm', 'cubic_centimeter'),
('cubic feet', 'cubic_foot'),
('cubic foot', 'cubic_foot'),
('cubic ft', 'cubic_foot'),
('cubic in', 'cubic_inch'),
('cubic inch', 'cubic_inch'),
('cubic inches', 'cubic_inch'),
('cubic kilometer', 'cubic_kilometer'),
('cubic kilometers', 'cubic_kilometer'),
('cubic km', 'cubic_kilometer'),
('cubic m', 'cubic_meter'),
('cubic meter', 'cubic_meter'),
('cubic meters', 'cubic_meter'),
('cubic mi', 'cubic_mile'),
('cubic mile', 'cubic_mile'),
('cubic miles', 'cubic_mile'),
('cubic y', 'cubic_yard'),
('cubic yard', 'cubic_yard'),
('cubic yards', 'cubic_yard'),
('cups', 'cup'),
('days', 'day'),
('feet', 'foot'),
('floz', 'fluid_ounce'),
('flozs', 'fluid_ounce'),
('fluid ounce', 'fluid_ounce'),
('fluid ounces', 'fluid_ounce'),
('gal', 'gallon'),
('gallons', 'gallon'),
('grams', 'gram'),
('hectares', 'hectare'),
('hours', 'hour'),
('inches', 'inch'),
('kilograms', 'kilogram'),
('kilometers', 'kilometer'),
('lb', 'pound'),
('lbs', 'pound'),
('liter', 'litre'),
('liters', 'litre'),
('litres', 'litre'),
('meters', 'meter'),
('metric ton', 'metric_ton'),
('metric tons', 'metric_ton'),
('miles', 'mile'),
('milligrams', 'milligram'),
('milliliter', 'millilitre'),
('milliliters', 'millilitre'),
('millilitres', 'millilitre'),
('min', 'minute'),
('minutes', 'minute'),
('months', 'month'),
('ounce', 'fluid_ounce'),
('ounces', 'fluid_ounce'),
('ounces', 'ounce'),
('oz', 'fluid_ounce'),
('oz', 'ounce'),
('ozs', 'fluid_ounce'),
('ozs', 'ounce'),
('pints', 'pint'),
('pounds', 'pound'),
('qt', 'quart'),
('qts', 'quart'),
('quarts', 'quart'),
('sec', 'second'),
('seconds', 'second'),
('short ton', 'short_ton'),
('short tons', 'short_ton'),
('sq centimeter', 'square_centimeter'),
('sq centimeters', 'square_centimeter'),
('sq cm', 'square_centimeter'),
('sq feet', 'square_foot'),
('sq foot', 'square_foot'),
('sq ft', 'square_foot'),
('sq in', 'square_inch'),
('sq inch', 'square_inch'),
('sq inches', 'square_inch'),
('sq kilometer', 'square_kilometer'),
('sq kilometers', 'square_kilometer'),
('sq km', 'square_kilometer'),
('sq m', 'square_meter'),
('sq meter', 'square_meter'),
('sq meters', 'square_meter'),
('sq mi', 'square_mile'),
('sq mile', 'square_mile'),
('sq miles', 'square_mile'),
('sq y', 'square_yard'),
('sq yard', 'square_yard'),
('sq yards', 'square_yard'),
('square centimeter', 'square_centimeter'),
('square centimeters', 'square_centimeter'),
('square cm', 'square_centimeter'),
('square feet', 'square_foot'),
('square foot', 'square_foot'),
('square ft', 'square_foot'),
('square in', 'square_inch'),
('square inch', 'square_inch'),
('square inches', 'square_inch'),
('square kilometer', 'square_kilometer'),
('square kilometers', 'square_kilometer'),
('square km', 'square_kilometer'),
('square m', 'square_meter'),
('square meter', 'square_meter'),
('square meters', 'square_meter'),
('square mi', 'square_mile'),
('square mile', 'square_mile'),
('square miles', 'square_mile'),
('square y', 'square_yard'),
('square yard', 'square_yard'),
('square yards', 'square_yard'),
('tablespoons', 'tablespoon'),
('tbs', 'tablespoon'),
('tbsp', 'tablespoon'),
('teaspoons', 'teaspoon'),
('ton', 'short_ton'),
('tonne', 'metric_ton'),
('ts', 'teaspoon'),
('tsp', 'teaspoon'),
('weeks', 'week'),
('yards', 'yard'),
('years', 'year'),
]
# human unit representation for outputs to the user
UNITS_OUTPUT = {
'are': ('{} are', '{} ares'),
'celsius': ('{}°C', '{}°C'),
'centimeter': ('{} centimeter', '{} centimeters'),
'cubic_centimeter': ('{} cubic centimeter', '{} cubic centimeters'),
'cubic_foot': ('{} cubic foot', '{} cubic feet'),
'cubic_inch': ('{} cubic inch', '{} cubic inches'),
'cubic_kilometer': ('{} cubic kilometer', '{} cubic kilometers'),
'cubic_meter': ('{} cubic meter', '{} cubic meters'),
'cubic_mile': ('{} cubic mile', '{} cubic miles'),
'cubic_yard': ('{} cubic yard', '{} cubic yards'),
'cup': ('{} US cup', '{} US cups'),
'day': ('{} day', '{} days'),
'fahrenheit': ('{}°F', '{}°F'),
'fluid_ounce': ('{} US fluid ounce', '{} US fluid ounces'),
'foot': ('{} foot', '{} feet'),
'gallon': ('{} US gallon', '{} US gallons'),
'gram': ('{} gram', '{} grams'),
'hectare': ('{} hectare', '{} hectares'),
'hour': ('{} hour', '{} hours'),
'inch': ('{} inch', '{} inches'),
'kelvin': ('{}K', '{}K'),
'kilogram': ('{} kilogram', '{} kilograms'),
'kilometer': ('{} kilometer', '{} kilometers'),
'litre': ('{} litre', '{} litres'),
'meter': ('{} meter', '{} meters'),
'metric_ton': ('{} metric ton', '{} metric tons'),
'mile': ('{} mile', '{} miles'),
'milligram': ('{} milligram', '{} milligrams'),
'millilitre': ('{} millilitre', '{} millilitres'),
'minute': ('{} minute', '{} minutes'),
'month': ('{} month', '{} months'),
'ounce': ('{} ounce', '{} ounces'),
'pint': ('{} US pint', '{} US pints'),
'pound': ('{} pound', '{} pounds'),
'quart': ('{} quart', '{} quarts'),
'second': ('{} second', '{} seconds'),
'square_centimeter': ('{} square centimeter', '{} square centimeters'),
'square_foot': ('{} square foot', '{} square feet'),
'square_inch': ('{} square inch', '{} square inches'),
'square_kilometer': ('{} square kilometer', '{} square kilometers'),
'square_meter': ('{} square meter', '{} square meters'),
'square_mile': ('{} square mile', '{} square miles'),
'square_yard': ('{} square yard', '{} square yards'),
'tablespoon': ('{} US tablespoon', '{} US tablespoons'),
'teaspoon': ('{} US teaspoon', '{} US teaspoons'),
'short_ton': ('{} short ton', '{} short tons'),
'week': ('{} week', '{} weeks'),
'yard': ('{} yard', '{} yards'),
'year': ('{} year', '{} years'),
}
# normal connectors in user input
CONNECTORS = [
'to',
'in',
]
# facts list to provide useful/fun information about numbers
NUMBERS_INFO = [
(3.2, 'meters', 'wingspan', 'a large andean condor'),
(5.5, 'meters', 'length', 'a white wale'),
(41, 'centimeters', 'height', 'a blue penguin'),
(146, 'meters', 'height', 'the Great Pyramid of Giza'),
(113, 'km/h', 'top speed', 'a cheetah'),
(3475, 'kilometers', 'diameter', 'the Moon'),
(1600, 'kilograms', 'weight', ' a white wale'),
(8850, 'meters', 'height', 'Mount Everest'),
(5500, '°C', 'temperature', 'the surface of the Sun'),
(12756, 'kilometers', 'diameter', 'the Earth'),
(6430, 'kilometers', 'lenght', 'the Great Wall of China'),
(100, '°C', 'temperature', 'boiling water'),
]
# we will not always select the best match for number info (as it will be
# too repeated), but will select randomly between the top N:
NUMBERS_UNCERTAINTY = 3
# table to suggest a second unit; general rules are:
# - if it's temperature, just go celsius<->fahrenheit
# - if it's time, go to a lower unit, but not immediate one (which is
# so easy that user shouldn't need it the unit conversor)
# - for the rest, just go imperial<->metric, using a similar size unit
SUGGESTED_SECOND_UNIT = {
'are': 'square_yard',
'celsius': 'fahrenheit',
'centimeter': 'inch',
'cubic_centimeter': 'fluid_ounce',
'cubic_foot': 'litre',
'cubic_inch': 'millilitre',
'cubic_kilometer': 'cubic_mile',
'cubic_meter': 'cubic_yard',
'cubic_mile': 'cubic_kilometer',
'cubic_yard': 'cubic_meter',
'cup': 'millilitre',
'day': 'hour',
'fahrenheit': 'celsius',
'fluid_ounce': 'millilitre',
'foot': 'meter',
'gallon': 'litre',
'gram': 'ounce',
'hectare': 'square_mile',
'hour': 'second',
'inch': 'centimeter',
'kilogram': 'pound',
'kilometer': 'mile',
'litre': 'gallon',
'meter': 'yard',
'mile': 'kilometer',
'minute': 'second',
'month': 'day',
'ounce': 'gram',
'pint': 'litre',
'pound': 'kilogram',
'quart': 'litre',
'square_centimeter': 'square_inch',
'square_foot': 'square_meter',
'square_inch': 'square_centimeter',
'square_kilometer': 'square_mile',
'square_meter': 'square_foot',
'square_mile': 'square_kilometer',
'square_yard': 'square_meter',
'tablespoon': 'millilitre',
'teaspoon': 'millilitre',
'week': 'hour',
'yard': 'meter',
'year': 'day',
}
class _UnitManager(object):
"""A unique class to hold all units mambo jambo."""
def __init__(self):
# generate the main unit conversion structure
self._units = _u = {k: [k] for k in SUPPORTED_UNITS}
for name, syn in EXTRA_UNITS_INPUT:
_u.setdefault(name, []).append(syn)
for symbol, unit, linear in UNIT_SYMBOLS:
_u.setdefault(symbol, []).append(unit)
if linear:
_u[symbol + 'SUPERSCRIPT_TWO'] = _u['square_' + unit]
_u[symbol + 'SUPERSCRIPT_THREE'] = _u['cubic_' + unit]
# generate the useful tokens
_all_tokens = set(itertools.chain(_u.keys(), CONNECTORS))
self.useful_tokens = sorted(_all_tokens, key=len, reverse=True)
# generate the complex units conversion
_c = ((k, v) for k, v in EXTRA_UNITS_INPUT if ' ' in k)
self.complex_units = sorted(_c, key=lambda x: len(x[0]), reverse=True)
# the connectors
self.connectors = CONNECTORS
def get_units_info(self, unit_token_from, unit_token_to):
"""Return the info for the unit."""
base_units_from = self._units[unit_token_from]
base_units_to = self._units[unit_token_to]
useful = []
for b_u_from in base_units_from:
for b_u_to in base_units_to:
mult_from, u_from = SUPPORTED_UNITS[b_u_from]
mult_to, u_to = SUPPORTED_UNITS[b_u_to]
if u_from.dimensionality == u_to.dimensionality:
h_from_s, h_from_p = UNITS_OUTPUT[b_u_from]
h_to_s, h_to_p = UNITS_OUTPUT[b_u_to]
useful.append((UnitInfo(mult_from, u_from, h_from_s, h_from_p),
UnitInfo(mult_to, u_to, h_to_s, h_to_p)))
# return units info if there's a nice crossing and no ambiguity
if len(useful) == 1:
return useful[0]
def suggest(self, unit_token_from):
"""Suggest a second destination unit."""
base_units_from = self._units[unit_token_from]
for b_u_from in base_units_from:
if b_u_from in SUGGESTED_SECOND_UNIT:
return SUGGESTED_SECOND_UNIT[b_u_from]
unit_manager = _UnitManager()
def _numbers_info(number):
"""Provide useful/fun info about some numbers."""
results = []
for value, unit, dimension, target in NUMBERS_INFO:
msg = None
vals = locals()
if value * .4 <= number <= value * .6:
msg = "{number} {unit} is about half of the {dimension} of {target}"
elif value * .9 <= number <= value * 1.1:
msg = "{number} {unit} is close to the {dimension} of {target}"
elif value * 1.7 <= number <= value * 100:
vals['mult'] = int(round(number / value))
msg = "{number} {unit} is around {mult} times the {dimension} of {target}"
if msg is not None:
text = msg.format(**vals)
distance = abs(math.log10(number) - math.log10(value))
results.append((distance, text))
if results:
return random.choice([x[1] for x in sorted(results)[:NUMBERS_UNCERTAINTY]])
def parse_number(m):
"""Return a float from a match of the regex above."""
intpart, fracpart, expart = m.group('int', 'frac', 'exp')
if intpart:
result = int(intpart)
else:
result = 0
if fracpart:
result += float(fracpart) / 10 ** len(fracpart)
if expart:
result *= 10 ** int(expart)
return result
def convert(source):
"""Parse and convert the units found in the source text."""
logger.debug("Input: %r", source)
text = source.strip().lower()
# normalize square and cubic combinations
text = re.sub(r" *?\*\* *?2| *?\^ *?2|(?<=[a-zA-Z])2|²", 'SUPERSCRIPT_TWO', text)
text = re.sub(r" *?\*\* *?3| *?\^ *?3|(?<=[a-zA-Z])3|³", 'SUPERSCRIPT_THREE', text)
# replace the complex units to something useful
for cu, real in unit_manager.complex_units:
text = re.sub(cu, real, text)
logger.debug("Preconverted: %r", text)
m = re.search(RE_NUMBER, text, re.VERBOSE)
if not m:
logger.debug("OOPS, not number found")
return
number = parse_number(m)
num_start, num_end = m.span()
logger.debug("Number: %r (limit=%s)", number, m.span())
tokens = []
found_tokens_before = False
for part in re.split(r'\W', text[:num_start], re.UNICODE):
for token in unit_manager.useful_tokens:
if part == token:
found_tokens_before = True
tokens.append(token)
break
for part in re.split(r'\W', text[num_end:], re.UNICODE):
for token in unit_manager.useful_tokens:
if part == token:
tokens.append(token)
logger.debug("Tokens found: %s", tokens)
if len(tokens) == 0:
# only give number info if the number is alone
if num_end - num_start == len(text.strip()):
ni = _numbers_info(number)
logger.debug("Numbers info: %r", ni)
return ni
else:
return
if len(tokens) == 1:
# suggest the second unit
suggested = unit_manager.suggest(tokens[0])
if suggested is None:
return
# use suggested unit and assure it's the destination one
logger.debug("Suggesting 2nd unit: %r", suggested)
tokens.append(suggested)
found_tokens_before = False
if len(tokens) > 2:
for conn in unit_manager.connectors:
if conn in tokens:
tokens.remove(conn)
if len(tokens) == 2:
break
else:
logger.debug("OOPS, not enough tokens")
return
logger.debug("Tokens filtered: %s", tokens)
if not found_tokens_before:
# everything is after the number
t_from_pos = 0
t_to_pos = 1
else:
t_from_pos = 1
t_to_pos = 0
logger.debug("Token selector: from=%s to=%s", t_from_pos, t_to_pos)
t_from = tokens[t_from_pos]
t_to = tokens[t_to_pos]
units_info = unit_manager.get_units_info(t_from, t_to)
if units_info is None:
logger.debug("OOPS, no matching units")
return
unit_from, unit_to = units_info
to_convert = _ureg.Quantity(number, unit_from.unit)
if unit_from.mult is not None:
to_convert *= unit_from.mult
try:
converted = to_convert.to(unit_to.unit)
except pint.unit.DimensionalityError:
logger.debug("OOPS, dimensionality error")
return
if unit_to.mult is not None:
converted /= unit_to.mult
logger.debug("Converted: %r", converted)
rounded = round(converted.magnitude, 4)
human_from, human_to = unit_from.human_plural, unit_to.human_plural
# care about result formatting
if isinstance(rounded, int) or rounded.is_integer():
if rounded == 1:
human_to = unit_to.human_single
nicer_res = str(int(rounded))
else:
nicer_res = "%.4f" % rounded
# as it's not an integer, remove extra 0s at the right
while nicer_res[-1] == '0':
nicer_res = nicer_res[:-1]
logger.debug("Nicer number: %r", nicer_res)
# care about source formatting
if number == 1:
human_from = unit_from.human_single
if isinstance(number, float) and number.is_integer():
nicer_orig = str(int(number))
else:
nicer_orig = str(number)
return human_from.format(nicer_orig) + ' = ' + human_to.format(nicer_res)
USAGE = """
Usage: unitconv <expression>
ej: unitconv 42 km to miles
"""
def main():
"""Main entry point to run as script. Use `convert` instead if as module."""
params = sys.argv[1:]
if params:
print(convert(" ".join(params)))
else:
print(USAGE)
if __name__ == '__main__':
# set up logging so it's easier to debug
logger.setLevel(logging.DEBUG)
h = logging.StreamHandler()
h.setLevel(logging.DEBUG)
logger.addHandler(h)
print("Response:", convert(" ".join(sys.argv[1:])))
| en | 0.806722 | # Copyright 2010-2018 Canonical Ltd. # Copyright 2020 <NAME> # All Rights Reserved A units converter. # crazy regex to match a number; this comes from the Python's Decimal code, # adapted to support also commas # A numeric string consists of: (?=\d|\.\d|\,\d) # starts with a number or a point/comma (?P<int>\d*) # having a (possibly empty) integer part ((\.|\,)(?P<frac>\d*))? # followed by an optional fractional part ((e|E)(?P<exp>[-+]?\d+))? # followed by an optional exponent, or... # supported units by the system; the key is the reference name, its # multiplier (if any) and the pint unit # unit symbols (not to be translated), indicating the symbol, the supported # unit name, and if it's linear (so we add area and volume postfixes) # synonyms, abbreviations, and other names for same unit; and also # multi-word conversions # human unit representation for outputs to the user # normal connectors in user input # facts list to provide useful/fun information about numbers # we will not always select the best match for number info (as it will be # too repeated), but will select randomly between the top N: # table to suggest a second unit; general rules are: # - if it's temperature, just go celsius<->fahrenheit # - if it's time, go to a lower unit, but not immediate one (which is # so easy that user shouldn't need it the unit conversor) # - for the rest, just go imperial<->metric, using a similar size unit A unique class to hold all units mambo jambo. # generate the main unit conversion structure # generate the useful tokens # generate the complex units conversion # the connectors Return the info for the unit. # return units info if there's a nice crossing and no ambiguity Suggest a second destination unit. Provide useful/fun info about some numbers. Return a float from a match of the regex above. Parse and convert the units found in the source text. # normalize square and cubic combinations # replace the complex units to something useful # only give number info if the number is alone # suggest the second unit # use suggested unit and assure it's the destination one # everything is after the number # care about result formatting # as it's not an integer, remove extra 0s at the right # care about source formatting Usage: unitconv <expression> ej: unitconv 42 km to miles Main entry point to run as script. Use `convert` instead if as module. # set up logging so it's easier to debug | 3.373235 | 3 |
tests/resources/config/default_config_2.py | elijahbenizzy/hamilton | 298 | 6620978 | <filename>tests/resources/config/default_config_2.py<gh_stars>100-1000
def some_key() -> str:
return 'some_value'
| <filename>tests/resources/config/default_config_2.py<gh_stars>100-1000
def some_key() -> str:
return 'some_value'
| none | 1 | 1.494413 | 1 | |
rainy/envs/ext.py | alexmlamb/blocks_rl_gru_setup | 0 | 6620979 | <gh_stars>0
from abc import ABC
import gym
from gym import spaces
from numpy import ndarray
from typing import Any, Generic, Tuple
from ..prelude import Action, State
class EnvSpec:
def __init__(
self,
state_dim: Tuple[int, ...],
action_space: gym.Space,
use_reward_monitor: bool = False
) -> None:
"""Properties which are common both in EnvExt and ParallelEnv
"""
self.state_dim = state_dim
self.action_space = action_space
self.use_reward_monitor = use_reward_monitor
if isinstance(action_space, spaces.Discrete):
self.action_dim = action_space.n
elif isinstance(action_space, spaces.Box):
if len(action_space.shape) != 1:
raise RuntimeError('Box space with shape >= 2 is not supportd')
self.action_dim = action_space.shape[0]
else:
raise RuntimeError('{} is not supported'.format(type(action_space)))
def random_action(self) -> Action:
return self.action_space.sample()
def is_discrete(self) -> bool:
return isinstance(self.action_space, spaces.Discrete)
class EnvExt(gym.Env, ABC, Generic[Action, State]):
def __init__(self, env: gym.Env) -> None:
self._env = env
self.spec = EnvSpec(self._env.observation_space.shape, self._env.action_space)
def close(self):
"""
Inherited from gym.Env.
"""
self._env.close
def reset(self) -> State:
"""
Inherited from gym.Env.
"""
return self._env.reset()
def render(self, mode: str = 'human') -> None:
"""
Inherited from gym.Env.
"""
self._env.render(mode=mode)
def seed(self, seed: int) -> None:
"""
Inherited from gym.Env.
"""
self._env.seed(seed)
def step(self, action: Action) -> Tuple[State, float, bool, Any]:
"""
Inherited from gym.Env.
"""
return self._env.step(action)
def step_and_reset(self, action: Action) -> Tuple[State, float, bool, Any]:
state, reward, done, info = self.step(action)
if done:
state = self.reset()
return state, reward, done, info
@property
def unwrapped(self) -> gym.Env:
"""
Inherited from gym.Env.
"""
return self._env.unwrapped
@property
def action_dim(self) -> int:
"""
Extended method.
Returns a ndim of action space.
"""
return self.spec.action_dim
@property
def state_dim(self) -> Tuple[int, ...]:
"""
Extended method.
Returns a shape of observation space.
"""
return self.spec.state_dim
@property
def use_reward_monitor(self) -> bool:
"""Atari wrappers need RewardMonitor for evaluation.
"""
return self.spec.use_reward_monitor
@property
def observation_space(self) -> gym.Space:
return self._env.observation_space
@property
def action_space(self) -> gym.Space:
return self._env.action_space
@staticmethod
def extract(state: State) -> ndarray:
"""
Extended method.
Convert state to ndarray.
It's useful for the cases where numpy.ndarray representation is too large to
throw it to replay buffer directly.
"""
return state
def save_history(self, file_name: str) -> None:
"""
Extended method.
Save agent's action history to file.
"""
pass
def __repr__(self) -> str:
return 'EnvExt({})'.format(self._env)
| from abc import ABC
import gym
from gym import spaces
from numpy import ndarray
from typing import Any, Generic, Tuple
from ..prelude import Action, State
class EnvSpec:
def __init__(
self,
state_dim: Tuple[int, ...],
action_space: gym.Space,
use_reward_monitor: bool = False
) -> None:
"""Properties which are common both in EnvExt and ParallelEnv
"""
self.state_dim = state_dim
self.action_space = action_space
self.use_reward_monitor = use_reward_monitor
if isinstance(action_space, spaces.Discrete):
self.action_dim = action_space.n
elif isinstance(action_space, spaces.Box):
if len(action_space.shape) != 1:
raise RuntimeError('Box space with shape >= 2 is not supportd')
self.action_dim = action_space.shape[0]
else:
raise RuntimeError('{} is not supported'.format(type(action_space)))
def random_action(self) -> Action:
return self.action_space.sample()
def is_discrete(self) -> bool:
return isinstance(self.action_space, spaces.Discrete)
class EnvExt(gym.Env, ABC, Generic[Action, State]):
def __init__(self, env: gym.Env) -> None:
self._env = env
self.spec = EnvSpec(self._env.observation_space.shape, self._env.action_space)
def close(self):
"""
Inherited from gym.Env.
"""
self._env.close
def reset(self) -> State:
"""
Inherited from gym.Env.
"""
return self._env.reset()
def render(self, mode: str = 'human') -> None:
"""
Inherited from gym.Env.
"""
self._env.render(mode=mode)
def seed(self, seed: int) -> None:
"""
Inherited from gym.Env.
"""
self._env.seed(seed)
def step(self, action: Action) -> Tuple[State, float, bool, Any]:
"""
Inherited from gym.Env.
"""
return self._env.step(action)
def step_and_reset(self, action: Action) -> Tuple[State, float, bool, Any]:
state, reward, done, info = self.step(action)
if done:
state = self.reset()
return state, reward, done, info
@property
def unwrapped(self) -> gym.Env:
"""
Inherited from gym.Env.
"""
return self._env.unwrapped
@property
def action_dim(self) -> int:
"""
Extended method.
Returns a ndim of action space.
"""
return self.spec.action_dim
@property
def state_dim(self) -> Tuple[int, ...]:
"""
Extended method.
Returns a shape of observation space.
"""
return self.spec.state_dim
@property
def use_reward_monitor(self) -> bool:
"""Atari wrappers need RewardMonitor for evaluation.
"""
return self.spec.use_reward_monitor
@property
def observation_space(self) -> gym.Space:
return self._env.observation_space
@property
def action_space(self) -> gym.Space:
return self._env.action_space
@staticmethod
def extract(state: State) -> ndarray:
"""
Extended method.
Convert state to ndarray.
It's useful for the cases where numpy.ndarray representation is too large to
throw it to replay buffer directly.
"""
return state
def save_history(self, file_name: str) -> None:
"""
Extended method.
Save agent's action history to file.
"""
pass
def __repr__(self) -> str:
return 'EnvExt({})'.format(self._env) | en | 0.855157 | Properties which are common both in EnvExt and ParallelEnv Inherited from gym.Env. Inherited from gym.Env. Inherited from gym.Env. Inherited from gym.Env. Inherited from gym.Env. Inherited from gym.Env. Extended method. Returns a ndim of action space. Extended method. Returns a shape of observation space. Atari wrappers need RewardMonitor for evaluation. Extended method. Convert state to ndarray. It's useful for the cases where numpy.ndarray representation is too large to throw it to replay buffer directly. Extended method. Save agent's action history to file. | 2.43961 | 2 |
TexasCovidLast40Days.py | gregwa1953/FCM-171 | 0 | 6620980 | <filename>TexasCovidLast40Days.py<gh_stars>0
import plotext as plt
data = [
2520151, 2522016, 2523617, 2524212, 2524688, 2525991, 2527350, 2529068,
2530176, 2530658, 2530910, 2531194, 2532112, 2533123, 2534149, 2534730,
2536217, 2536646, 2536691, 2538120, 2538908, 2540318, 2541586, 2542877,
2543158, 2543253, 2544966, 2546040, 2547411, 2548878, 2549911, 2550317,
2550615, 2551284, 2552598, 2555504, 2557363, 2559192, 2559734, 2560137
]
print(data)
plt.plot(data)
plt.plotsize(100, 40)
plt.title('Covid-19 numbers last 40 days')
plt.show()
| <filename>TexasCovidLast40Days.py<gh_stars>0
import plotext as plt
data = [
2520151, 2522016, 2523617, 2524212, 2524688, 2525991, 2527350, 2529068,
2530176, 2530658, 2530910, 2531194, 2532112, 2533123, 2534149, 2534730,
2536217, 2536646, 2536691, 2538120, 2538908, 2540318, 2541586, 2542877,
2543158, 2543253, 2544966, 2546040, 2547411, 2548878, 2549911, 2550317,
2550615, 2551284, 2552598, 2555504, 2557363, 2559192, 2559734, 2560137
]
print(data)
plt.plot(data)
plt.plotsize(100, 40)
plt.title('Covid-19 numbers last 40 days')
plt.show()
| none | 1 | 3.092186 | 3 | |
dist-packages/deepin_utils/multithread.py | Jianwei-Wang/python2.7_lib | 0 | 6620981 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Wang Yong
#
# Author: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
def create_thread(target, args=()):
thread = threading.Thread(target=target, args=args)
thread.setDaemon(True)
return thread
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Wang Yong
#
# Author: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
def create_thread(target, args=()):
thread = threading.Thread(target=target, args=args)
thread.setDaemon(True)
return thread
| en | 0.818903 | #! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Wang Yong # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. | 3.286192 | 3 |
tests/test_responses/test_response_spec.py | adriangb/xpresso | 75 | 6620982 | from typing import Any, Dict, List, Tuple, Union
from pydantic import BaseModel
from xpresso import App, Operation, Path
from xpresso.responses import FileResponse, JSONResponse, ResponseSpec
from xpresso.testclient import TestClient
def test_default_response_spec_merge_with_top_level_parameters() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
post=Operation(
endpoint,
response_status_code=201,
responses={201: ResponseSpec(description="Item created")},
),
)
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"responses": {
"201": {
"description": "Item created",
"content": {
"application/json": {},
},
}
}
}
}
},
}
client = TestClient(app)
resp = client.post("/")
assert resp.status_code == 201, resp.content
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_response_spec_merged_from_router() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
post=endpoint,
)
],
responses={200: ResponseSpec(description="All good!")},
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"responses": {
"200": {
"description": "All good!",
"content": {
"application/json": {},
},
}
}
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_default_response_spec_response_model_inferred() -> None:
def returns_builtin() -> List[str]:
...
def no_return():
...
def returns_none() -> None:
...
def returns_response() -> JSONResponse:
...
def returns_response_union() -> Union[str, FileResponse]:
...
def returns_model_union() -> Union[str, int]:
...
class Model(BaseModel):
foo: int
def returns_pydantic_model() -> Model:
...
app = App(
routes=[
Path(
"/returns_builtin",
get=returns_builtin,
),
Path(
"/returns_builtin-overriden",
get=Operation(returns_builtin, response_model=Tuple[str, str]),
),
Path(
"/no_return",
get=no_return,
),
Path(
"/returns_none",
get=returns_none,
),
Path(
"/returns_response",
get=returns_response,
),
Path(
"/returns_response_union",
get=returns_response_union,
),
Path(
"/returns_model_union",
get=returns_model_union,
),
Path(
"/returns_pydantic_model",
get=returns_pydantic_model,
),
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/no_return": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_builtin": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {"type": "string"},
}
}
},
}
}
}
},
"/returns_builtin-overriden": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [
{"type": "string"},
{"type": "string"},
],
}
}
},
}
}
}
},
"/returns_model_union": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"anyOf": [
{"type": "string"},
{"type": "integer"},
],
}
}
},
}
}
}
},
"/returns_none": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_pydantic_model": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Model"}
}
},
}
}
}
},
"/returns_response": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_response_union": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
},
"components": {
"schemas": {
"Model": {
"title": "Model",
"required": ["foo"],
"type": "object",
"properties": {"foo": {"title": "Foo", "type": "integer"}},
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_response_description_from_status_code() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
get=Operation(
endpoint,
response_status_code=201,
responses={429: ResponseSpec(), "5XX": ResponseSpec()},
),
)
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"get": {
"responses": {
"429": {"description": "Too Many Requests"},
"5XX": {"description": "Server Error"},
"201": {
"description": "Created",
"content": {"application/json": {}},
},
}
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
| from typing import Any, Dict, List, Tuple, Union
from pydantic import BaseModel
from xpresso import App, Operation, Path
from xpresso.responses import FileResponse, JSONResponse, ResponseSpec
from xpresso.testclient import TestClient
def test_default_response_spec_merge_with_top_level_parameters() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
post=Operation(
endpoint,
response_status_code=201,
responses={201: ResponseSpec(description="Item created")},
),
)
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"responses": {
"201": {
"description": "Item created",
"content": {
"application/json": {},
},
}
}
}
}
},
}
client = TestClient(app)
resp = client.post("/")
assert resp.status_code == 201, resp.content
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_response_spec_merged_from_router() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
post=endpoint,
)
],
responses={200: ResponseSpec(description="All good!")},
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"responses": {
"200": {
"description": "All good!",
"content": {
"application/json": {},
},
}
}
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_default_response_spec_response_model_inferred() -> None:
def returns_builtin() -> List[str]:
...
def no_return():
...
def returns_none() -> None:
...
def returns_response() -> JSONResponse:
...
def returns_response_union() -> Union[str, FileResponse]:
...
def returns_model_union() -> Union[str, int]:
...
class Model(BaseModel):
foo: int
def returns_pydantic_model() -> Model:
...
app = App(
routes=[
Path(
"/returns_builtin",
get=returns_builtin,
),
Path(
"/returns_builtin-overriden",
get=Operation(returns_builtin, response_model=Tuple[str, str]),
),
Path(
"/no_return",
get=no_return,
),
Path(
"/returns_none",
get=returns_none,
),
Path(
"/returns_response",
get=returns_response,
),
Path(
"/returns_response_union",
get=returns_response_union,
),
Path(
"/returns_model_union",
get=returns_model_union,
),
Path(
"/returns_pydantic_model",
get=returns_pydantic_model,
),
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/no_return": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_builtin": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {"type": "string"},
}
}
},
}
}
}
},
"/returns_builtin-overriden": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [
{"type": "string"},
{"type": "string"},
],
}
}
},
}
}
}
},
"/returns_model_union": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"anyOf": [
{"type": "string"},
{"type": "integer"},
],
}
}
},
}
}
}
},
"/returns_none": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_pydantic_model": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Model"}
}
},
}
}
}
},
"/returns_response": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
"/returns_response_union": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {"application/json": {}},
}
}
}
},
},
"components": {
"schemas": {
"Model": {
"title": "Model",
"required": ["foo"],
"type": "object",
"properties": {"foo": {"title": "Foo", "type": "integer"}},
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
def test_response_description_from_status_code() -> None:
async def endpoint() -> None:
...
app = App(
routes=[
Path(
"/",
get=Operation(
endpoint,
response_status_code=201,
responses={429: ResponseSpec(), "5XX": ResponseSpec()},
),
)
]
)
expected_openapi: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/": {
"get": {
"responses": {
"429": {"description": "Too Many Requests"},
"5XX": {"description": "Server Error"},
"201": {
"description": "Created",
"content": {"application/json": {}},
},
}
}
}
},
}
client = TestClient(app)
resp = client.get("/openapi.json")
assert resp.status_code == 200, resp.content
assert resp.json() == expected_openapi
| none | 1 | 2.213365 | 2 | |
deploy.py | unit9/gae-flex-deploy | 0 | 6620983 | #! /usr/bin/env python3
from settings import PROJECT_NAME
from yaml import dump, load
import argparse
import os
import subprocess
import shutil
from hashlib import sha256
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
CONFIGS_BUCKET = 'gs://{}-configs/'.format(PROJECT_NAME)
current_directory = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(
description='Deploy script, usage \n'
'deploy.py environment --version default --app {} --appyaml app.dist.yaml'.format(PROJECT_NAME)
)
parser.add_argument('action', choices=['deploy', 'pull_config', 'push_config'],
help='what to do?')
parser.add_argument('environment', default='live',
help='env, looking for file env_{NAME}.conf in deploy directory')
parser.add_argument('--promote', default='no-promote',
help='promote')
parser.add_argument('--version', dest='version', default='default',
help='appengine flex env version')
parser.add_argument('--app', dest='app', default=PROJECT_NAME,
help='appengine flex env version')
parser.add_argument('--appyaml', dest='appyaml', default='app.dist.yaml',
help='base app yaml file default app.dist.yaml')
parser.add_argument('--force', '-f', action='store_true',
help='force config update')
try:
with open('.cache', 'r') as f:
cache = load(f)
except FileNotFoundError:
with open('.cache', 'w') as f:
cache = {}
dump(cache, f)
print(cache)
args = parser.parse_args()
env_file = "{}.env".format(args.environment)
print(args)
# get full paths
env_file_path = os.path.join(current_directory, env_file)
appyaml_file_path = os.path.join(current_directory, args.appyaml)
def check_gsutil():
try:
subprocess.check_call(['gsutil', '--version'])
except Exception as e:
print("Failed to run gsutil: {}\n"
"Make sure it's installed and working properly.".format(e))
exit(1)
def check_bucket():
params = ['gsutil', 'ls', '-p', PROJECT_NAME, CONFIGS_BUCKET]
try:
subprocess.check_output(params, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(e)
if 'BucketNotFoundException' in e.output.decode():
print('Bucket {} doesn\'t exist, creating...'
.format(CONFIGS_BUCKET))
params = ['gsutil', 'mb', '-p', PROJECT_NAME, '-c', 'regional',
'-l', 'europe-west2', CONFIGS_BUCKET]
subprocess.check_call(params)
print('Created bucket {}'.format(CONFIGS_BUCKET))
else:
raise
def load_env(input_file):
result = {}
for line in input_file.readlines():
key, value = line.split('=', 1)
result[key.strip()] = value.strip()
return result
def pull_config(env_file_path, env_file):
check_gsutil()
check_bucket()
env_file_path_remote = env_file_path + '.remote'
print("Downloading {} using gsutil..."
.format(env_file))
params = ['gsutil', 'cp', os.path.join(CONFIGS_BUCKET, env_file),
env_file_path_remote]
subprocess.check_call(params)
with open(env_file_path_remote, 'r') as f:
remote_sha = sha256(f.read().encode('utf-8')).hexdigest()
try:
with open(env_file, 'r') as f:
local_sha = sha256(f.read().encode('utf-8')).hexdigest()
if remote_sha == local_sha:
print("No changes")
cache[env_file] = local_sha
return
if env_file in cache and cache[env_file] == remote_sha:
print("No new changes")
return
if env_file in cache and cache[env_file] != remote_sha and \
cache[env_file] != local_sha:
print("{} was modified both locally and on remote.\n"
"The remote config was retained in {}.\n"
"Merge it with local config and push using "
"'push_config -f'".format(env_file, env_file_path_remote))
return
except FileNotFoundError:
print("Local {} doesn't exist.".format(env_file))
shutil.move(env_file_path_remote, env_file_path)
try:
os.remove(env_file_path_remote)
except OSError:
pass
cache[env_file] = remote_sha
def push_config(env_file_path, env_file):
check_gsutil()
env_file_path_remote = env_file_path + '.remote'
try:
with open(env_file, 'r') as f:
local_sha = sha256(f.read().encode('utf-8')).hexdigest()
except FileNotFoundError:
print("Local {} doesn't exist.".format(env_file))
exit(1)
if not args.force:
print("Downloading {} using gsutil..."
.format(env_file))
params = ['gsutil', 'cp', os.path.join(CONFIGS_BUCKET, env_file),
env_file_path_remote]
subprocess.check_call(params)
with open(env_file_path_remote, 'r') as f:
remote_sha = sha256(f.read().encode('utf-8')).hexdigest()
if remote_sha == local_sha:
print("No changes")
return
if env_file in cache and cache[env_file] != remote_sha:
print("{} was modified both locally and on remote.\n"
"The remote config was retained in {}.\n"
"Merge it with local config and push using "
"'push_config -f'".format(env_file, env_file_path_remote))
return
params = ['gsutil', 'cp', env_file_path,
os.path.join(CONFIGS_BUCKET, env_file)]
subprocess.check_call(params)
try:
os.remove(env_file_path_remote)
except OSError:
pass
cache[env_file] = local_sha
def deploy(env_file_path, env_file):
# checking if files exist
if not os.path.isfile(appyaml_file_path):
print("{} does not exist, please use default app.dist.yaml"
.format(env_file_path))
exit(1)
# load files
with open(os.path.join(current_directory, env_file), 'r') as file:
env = load_env(file)
with open(os.path.join(current_directory, args.appyaml), 'r') as file:
appyaml = load(file)
if 'env_variables' not in appyaml:
appyaml['env_variables'] = env
else:
for key in env.keys():
appyaml['env_variables'][key] = env[key]
none_entry = False
for key in appyaml['env_variables'].keys():
if appyaml['env_variables'][key] is None and key not in env:
print("Warning {} is set to null and not overwritten, ensure it's correct!".format(key))
none_entry = True
if none_entry and input("Continue? [Y/n]?") != 'Y':
print("Quitting ...")
exit()
if 'service' in env and env['service'] != 'default':
print("using custom service: {}".format(env['service']))
appyaml['service'] = env['service']
if 'version' in env:
args.version = env['version']
promote = True if args.promote == 'promote' else False
# cuz of windows file access
working_directory = rreplace(current_directory, 'deploy', '')
app_file_path = os.path.join(current_directory, '..', 'app.yaml')
try:
print("dumping data to app.yaml")
with open(app_file_path, 'w') as file:
dump(appyaml, file, default_flow_style=False)
print("running deployment, ensure gcloud SDK is configured")
params = ['gcloud', 'app', 'deploy', '--project', args.app, '--quiet']
if not promote:
params.append('--no-promote')
print("using --no-promote flag")
if args.version != 'default':
params.append('--version')
params.append(args.version)
print(params)
subprocess.call(params, cwd=working_directory)
finally:
print("removing app files with secrets")
os.remove(app_file_path)
if args.action == 'pull_config':
pull_config(env_file_path, env_file)
elif args.action == 'push_config':
push_config(env_file_path, env_file)
elif args.action == 'deploy':
deploy(env_file_path, env_file)
with open('.cache', 'w') as f:
dump(cache, f)
exit(0)
| #! /usr/bin/env python3
from settings import PROJECT_NAME
from yaml import dump, load
import argparse
import os
import subprocess
import shutil
from hashlib import sha256
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
CONFIGS_BUCKET = 'gs://{}-configs/'.format(PROJECT_NAME)
current_directory = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(
description='Deploy script, usage \n'
'deploy.py environment --version default --app {} --appyaml app.dist.yaml'.format(PROJECT_NAME)
)
parser.add_argument('action', choices=['deploy', 'pull_config', 'push_config'],
help='what to do?')
parser.add_argument('environment', default='live',
help='env, looking for file env_{NAME}.conf in deploy directory')
parser.add_argument('--promote', default='no-promote',
help='promote')
parser.add_argument('--version', dest='version', default='default',
help='appengine flex env version')
parser.add_argument('--app', dest='app', default=PROJECT_NAME,
help='appengine flex env version')
parser.add_argument('--appyaml', dest='appyaml', default='app.dist.yaml',
help='base app yaml file default app.dist.yaml')
parser.add_argument('--force', '-f', action='store_true',
help='force config update')
try:
with open('.cache', 'r') as f:
cache = load(f)
except FileNotFoundError:
with open('.cache', 'w') as f:
cache = {}
dump(cache, f)
print(cache)
args = parser.parse_args()
env_file = "{}.env".format(args.environment)
print(args)
# get full paths
env_file_path = os.path.join(current_directory, env_file)
appyaml_file_path = os.path.join(current_directory, args.appyaml)
def check_gsutil():
try:
subprocess.check_call(['gsutil', '--version'])
except Exception as e:
print("Failed to run gsutil: {}\n"
"Make sure it's installed and working properly.".format(e))
exit(1)
def check_bucket():
params = ['gsutil', 'ls', '-p', PROJECT_NAME, CONFIGS_BUCKET]
try:
subprocess.check_output(params, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(e)
if 'BucketNotFoundException' in e.output.decode():
print('Bucket {} doesn\'t exist, creating...'
.format(CONFIGS_BUCKET))
params = ['gsutil', 'mb', '-p', PROJECT_NAME, '-c', 'regional',
'-l', 'europe-west2', CONFIGS_BUCKET]
subprocess.check_call(params)
print('Created bucket {}'.format(CONFIGS_BUCKET))
else:
raise
def load_env(input_file):
result = {}
for line in input_file.readlines():
key, value = line.split('=', 1)
result[key.strip()] = value.strip()
return result
def pull_config(env_file_path, env_file):
check_gsutil()
check_bucket()
env_file_path_remote = env_file_path + '.remote'
print("Downloading {} using gsutil..."
.format(env_file))
params = ['gsutil', 'cp', os.path.join(CONFIGS_BUCKET, env_file),
env_file_path_remote]
subprocess.check_call(params)
with open(env_file_path_remote, 'r') as f:
remote_sha = sha256(f.read().encode('utf-8')).hexdigest()
try:
with open(env_file, 'r') as f:
local_sha = sha256(f.read().encode('utf-8')).hexdigest()
if remote_sha == local_sha:
print("No changes")
cache[env_file] = local_sha
return
if env_file in cache and cache[env_file] == remote_sha:
print("No new changes")
return
if env_file in cache and cache[env_file] != remote_sha and \
cache[env_file] != local_sha:
print("{} was modified both locally and on remote.\n"
"The remote config was retained in {}.\n"
"Merge it with local config and push using "
"'push_config -f'".format(env_file, env_file_path_remote))
return
except FileNotFoundError:
print("Local {} doesn't exist.".format(env_file))
shutil.move(env_file_path_remote, env_file_path)
try:
os.remove(env_file_path_remote)
except OSError:
pass
cache[env_file] = remote_sha
def push_config(env_file_path, env_file):
check_gsutil()
env_file_path_remote = env_file_path + '.remote'
try:
with open(env_file, 'r') as f:
local_sha = sha256(f.read().encode('utf-8')).hexdigest()
except FileNotFoundError:
print("Local {} doesn't exist.".format(env_file))
exit(1)
if not args.force:
print("Downloading {} using gsutil..."
.format(env_file))
params = ['gsutil', 'cp', os.path.join(CONFIGS_BUCKET, env_file),
env_file_path_remote]
subprocess.check_call(params)
with open(env_file_path_remote, 'r') as f:
remote_sha = sha256(f.read().encode('utf-8')).hexdigest()
if remote_sha == local_sha:
print("No changes")
return
if env_file in cache and cache[env_file] != remote_sha:
print("{} was modified both locally and on remote.\n"
"The remote config was retained in {}.\n"
"Merge it with local config and push using "
"'push_config -f'".format(env_file, env_file_path_remote))
return
params = ['gsutil', 'cp', env_file_path,
os.path.join(CONFIGS_BUCKET, env_file)]
subprocess.check_call(params)
try:
os.remove(env_file_path_remote)
except OSError:
pass
cache[env_file] = local_sha
def deploy(env_file_path, env_file):
# checking if files exist
if not os.path.isfile(appyaml_file_path):
print("{} does not exist, please use default app.dist.yaml"
.format(env_file_path))
exit(1)
# load files
with open(os.path.join(current_directory, env_file), 'r') as file:
env = load_env(file)
with open(os.path.join(current_directory, args.appyaml), 'r') as file:
appyaml = load(file)
if 'env_variables' not in appyaml:
appyaml['env_variables'] = env
else:
for key in env.keys():
appyaml['env_variables'][key] = env[key]
none_entry = False
for key in appyaml['env_variables'].keys():
if appyaml['env_variables'][key] is None and key not in env:
print("Warning {} is set to null and not overwritten, ensure it's correct!".format(key))
none_entry = True
if none_entry and input("Continue? [Y/n]?") != 'Y':
print("Quitting ...")
exit()
if 'service' in env and env['service'] != 'default':
print("using custom service: {}".format(env['service']))
appyaml['service'] = env['service']
if 'version' in env:
args.version = env['version']
promote = True if args.promote == 'promote' else False
# cuz of windows file access
working_directory = rreplace(current_directory, 'deploy', '')
app_file_path = os.path.join(current_directory, '..', 'app.yaml')
try:
print("dumping data to app.yaml")
with open(app_file_path, 'w') as file:
dump(appyaml, file, default_flow_style=False)
print("running deployment, ensure gcloud SDK is configured")
params = ['gcloud', 'app', 'deploy', '--project', args.app, '--quiet']
if not promote:
params.append('--no-promote')
print("using --no-promote flag")
if args.version != 'default':
params.append('--version')
params.append(args.version)
print(params)
subprocess.call(params, cwd=working_directory)
finally:
print("removing app files with secrets")
os.remove(app_file_path)
if args.action == 'pull_config':
pull_config(env_file_path, env_file)
elif args.action == 'push_config':
push_config(env_file_path, env_file)
elif args.action == 'deploy':
deploy(env_file_path, env_file)
with open('.cache', 'w') as f:
dump(cache, f)
exit(0)
| en | 0.479036 | #! /usr/bin/env python3 # get full paths # checking if files exist # load files # cuz of windows file access | 2.251936 | 2 |
src/C2B_Simulate.py | KahigaKiguru/Daraja-API-Test | 2 | 6620984 | <filename>src/C2B_Simulate.py
import authenticate_app
import user_credentials
import requests
def register_url():
access_token = authenticate_app.access_token
api_url = user_credentials.register_api_url
headers = {"Authorization": "Bearer %s" %access_token}
request = {
"ShortCode": user_credentials.shortcode_1,
"ResponseType": "Confirmed",
"ConfirmationURL": user_credentials.confirmation_url,
"ValidationURL": user_credentials.validation_url
}
response = requests.post(api_url, json = request, headers=headers)
def simulate_c2bTransaction():
access_token = authenticate_app.authenticate_application()
api_url = user_credentials.c2b_simulate_url
headers = {"Authorization": "Bearer %s" % access_token}
request = { "ShortCode": user_credentials.shortcode_1,
"CommandID":"CustomerPayBillOnline",
"Amount": user_credentials.transaction_amount,
"Msisdn": user_credentials.test_msisdn,
"BillRefNumber": user_credentials.account_reference
}
response = requests.post(api_url, json = request, headers=headers)
print (response.text)
simulate_c2bTransaction() | <filename>src/C2B_Simulate.py
import authenticate_app
import user_credentials
import requests
def register_url():
access_token = authenticate_app.access_token
api_url = user_credentials.register_api_url
headers = {"Authorization": "Bearer %s" %access_token}
request = {
"ShortCode": user_credentials.shortcode_1,
"ResponseType": "Confirmed",
"ConfirmationURL": user_credentials.confirmation_url,
"ValidationURL": user_credentials.validation_url
}
response = requests.post(api_url, json = request, headers=headers)
def simulate_c2bTransaction():
access_token = authenticate_app.authenticate_application()
api_url = user_credentials.c2b_simulate_url
headers = {"Authorization": "Bearer %s" % access_token}
request = { "ShortCode": user_credentials.shortcode_1,
"CommandID":"CustomerPayBillOnline",
"Amount": user_credentials.transaction_amount,
"Msisdn": user_credentials.test_msisdn,
"BillRefNumber": user_credentials.account_reference
}
response = requests.post(api_url, json = request, headers=headers)
print (response.text)
simulate_c2bTransaction() | none | 1 | 2.711134 | 3 | |
lib/search/priorityqueue.py | timofurrer/aoc-2020 | 0 | 6620985 | import heapq
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("order must be either 'min' or max'.")
def append(self, item, priority=None):
"""Insert item at its correct position."""
priority = priority if priority is not None else self.f(item)
heapq.heappush(self.heap, (priority, item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __bool__(self):
return len(self) != 0
def __contains__(self, item):
"""Return True if item in PriorityQueue."""
return (self.f(item), item) in self.heap
def __getitem__(self, key):
for _, item in self.heap:
if item == key:
return item
def __delitem__(self, key):
"""Delete the first occurrence of key."""
self.heap.remove((self.f(key), key))
heapq.heapify(self.heap) | import heapq
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("order must be either 'min' or max'.")
def append(self, item, priority=None):
"""Insert item at its correct position."""
priority = priority if priority is not None else self.f(item)
heapq.heappush(self.heap, (priority, item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __bool__(self):
return len(self) != 0
def __contains__(self, item):
"""Return True if item in PriorityQueue."""
return (self.f(item), item) in self.heap
def __getitem__(self, key):
for _, item in self.heap:
if item == key:
return item
def __delitem__(self, key):
"""Delete the first occurrence of key."""
self.heap.remove((self.f(key), key))
heapq.heapify(self.heap) | en | 0.877411 | A Queue in which the minimum (or maximum) element (as determined by f and order) is returned first. If order is 'min', the item with minimum f(x) is returned first; if order is 'max', then it is the item with maximum f(x). Also supports dict-like lookup. # now item with max f(x) # will be popped first Insert item at its correct position. Insert each item in items at its correct position. Pop and return the item (with min or max f(x) value depending on the order. Return current capacity of PriorityQueue. Return True if item in PriorityQueue. Delete the first occurrence of key. | 4.349817 | 4 |
app/http/controllers/PlateController.py | zhaozhentao/MasoniteML | 0 | 6620986 | """A PlateController Module."""
from masonite import Upload
from masonite.controllers import Controller
from masonite.request import Request
from app.providers.DLModelProvider import DLModelProvider
class PlateController(Controller):
"""PlateController Controller Class."""
def __init__(self, request: Request):
"""PlateController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def store(self, request: Request, upload: Upload, ai: DLModelProvider):
filename = 'img.png'
upload.driver('disk').store(request.input('image'), filename=filename)
plate = ai.predict(filename)
return {'plate': plate}
| """A PlateController Module."""
from masonite import Upload
from masonite.controllers import Controller
from masonite.request import Request
from app.providers.DLModelProvider import DLModelProvider
class PlateController(Controller):
"""PlateController Controller Class."""
def __init__(self, request: Request):
"""PlateController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def store(self, request: Request, upload: Upload, ai: DLModelProvider):
filename = 'img.png'
upload.driver('disk').store(request.input('image'), filename=filename)
plate = ai.predict(filename)
return {'plate': plate}
| en | 0.330413 | A PlateController Module. PlateController Controller Class. PlateController Initializer Arguments: request {masonite.request.Request} -- The Masonite Request class. | 2.853762 | 3 |
Menu.py | AStox/tankbot | 0 | 6620987 | <gh_stars>0
'''
Copyright 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from bge import logic
from bge import events
from bge import render
import time
class Button(object):
def getName(self):
return self.name
def mouseOver(self):
cont = logic.getCurrentController()
obj = cont.owner
mouse = cont.sensors['Mouse']
if mouse.positive:
if obj['var'] == 0:
obj.playAction('PlayAction',1.0,5.0,1,1,1.0,0,0.0,1,1.0)
obj['var'] += 1
else:
obj.playAction('PlayAction',5.0,1.0,1,1,1.0,0,0.0,1,1.0)
obj['var'] -= 1
def Play(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if click:
logic.addScene('Level0')
logic.addScene('Music')
def Quit(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if click:
logic.endGame()
def Next(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
dict = logic.globalDict
nextLevel = dict['level'] + 1
if click:
for i in scenes:
if 'Level%s' % (nextLevel - 1) in i.name:
i.replace('Level%s' % nextLevel)
scene.end()
def Gameover(self):
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if obj['time'] > 3:
if click:
logic.restartGame()
obj['time'] += 1
def Menu(self):
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if obj['time'] > 3:
if click:
logic.restartGame()
obj['time'] += 1
def Again(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
dict = logic.globalDict
level = dict['level']
if click:
for i in scenes:
if 'Level%s' % (level) in i.name:
i.restart()
scene.end()
def CameraMain(): #sets the cursor to visible while in the main menu.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
logic.mouse.visible = True
Init()
Update()
def Camera(): #Sets the cursor to visible while in the "Next level" and "GameOver" menus.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
logic.mouse.visible = True
def Update():
pass
Init()
Update()
def Play():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
play = Button()
play.mouseOver()
play.Play()
Init()
Update()
def Quit():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
quit = Button()
quit.mouseOver()
quit.Quit()
Init()
Update()
def Next():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
next = Button()
next.mouseOver()
next.Next()
Init()
Update()
def Gameover():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
obj['time'] = 0
def Update():
gameover = Button()
gameover.mouseOver()
gameover.Gameover()
Init()
Update()
def Menu():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
obj['time'] = 0
def Update():
menu = Button()
menu.mouseOver()
menu.Gameover()
Init()
Update()
def Again():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
again = Button()
again.mouseOver()
again.Again()
Init()
Update()
def Score(): #Keeps track of score, and instantiates it on screen during "Next Level" and "GameOver" menus.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
levelScore = str(int(dict['levelScore']))
score = str(int(dict['score']))
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
tankPoints = dict['tank_kills'] * 100
rocketPoints = dict['rocket_kills'] * 10
timeBonus = 50 - int(dict['levelClock'])
if dict['level'] == 0:
timePoints = 0
elif timeBonus >= 1:
timePoints = timeBonus
else:
timePoints = 0
for i in range(1,5):
if 'tank%s' % i in obj and tankPoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(tankPoints)[len(str(tankPoints))-(5-i)]),obj)
if tankPoints == 0:
if 'tank4' in obj:
scene.addObject('Num_0', obj)
if 'time%s' % i in obj and timePoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(timePoints)[len(str(timePoints))-(5-i)]),obj)
elif timePoints == 0:
if 'time4' in obj:
scene.addObject('Num_0', obj)
if 'rocket%s' % i in obj and rocketPoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(rocketPoints)[len(str(rocketPoints))-(5-i)]),obj)
elif rocketPoints == 0:
if 'rocket4' in obj:
scene.addObject('Num_0', obj)
if 'digit%s' % i in obj and dict['score'] >= 10**(4-i):
scene.addObject('Num_%s' % (score[len(score)-(5-i)]),obj)
if dict['score'] == 0:
if 'digit4' in obj:
scene.addObject('Num_0', obj)
Init()
Update()
def Paused(): #Simple Pause function
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
pause = cont.sensors['Keyboard']
dict = logic.globalDict
level = dict['level']
scenes = logic.getSceneList()
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
for i in scenes:
if 'Score' in i.name:
i.suspend()
if pause.positive:
dict['paused'] = False
for i in scenes:
if 'Score' in i.name:
i.resume()
if 'Level%s' % level in i.name:
i.resume()
scene.end()
Init()
Update()
def ScoreCounter(): #Adds individual level score to total score. This needed to be done so that during a GameOver, the level score could be shown, but not added to the total score.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
dict['score'] += dict['levelScore']
def Update():
pass
Init()
Update()
def Template():
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
pass
Init()
Update() | '''
Copyright 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from bge import logic
from bge import events
from bge import render
import time
class Button(object):
def getName(self):
return self.name
def mouseOver(self):
cont = logic.getCurrentController()
obj = cont.owner
mouse = cont.sensors['Mouse']
if mouse.positive:
if obj['var'] == 0:
obj.playAction('PlayAction',1.0,5.0,1,1,1.0,0,0.0,1,1.0)
obj['var'] += 1
else:
obj.playAction('PlayAction',5.0,1.0,1,1,1.0,0,0.0,1,1.0)
obj['var'] -= 1
def Play(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if click:
logic.addScene('Level0')
logic.addScene('Music')
def Quit(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if click:
logic.endGame()
def Next(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
dict = logic.globalDict
nextLevel = dict['level'] + 1
if click:
for i in scenes:
if 'Level%s' % (nextLevel - 1) in i.name:
i.replace('Level%s' % nextLevel)
scene.end()
def Gameover(self):
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if obj['time'] > 3:
if click:
logic.restartGame()
obj['time'] += 1
def Menu(self):
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
if obj['time'] > 3:
if click:
logic.restartGame()
obj['time'] += 1
def Again(self):
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
mouse = cont.sensors['Mouse']
mouseEvents = logic.mouse.events
click = mouseEvents[events.LEFTMOUSE]
dict = logic.globalDict
level = dict['level']
if click:
for i in scenes:
if 'Level%s' % (level) in i.name:
i.restart()
scene.end()
def CameraMain(): #sets the cursor to visible while in the main menu.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
logic.mouse.visible = True
Init()
Update()
def Camera(): #Sets the cursor to visible while in the "Next level" and "GameOver" menus.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
logic.mouse.visible = True
def Update():
pass
Init()
Update()
def Play():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
play = Button()
play.mouseOver()
play.Play()
Init()
Update()
def Quit():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
quit = Button()
quit.mouseOver()
quit.Quit()
Init()
Update()
def Next():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
next = Button()
next.mouseOver()
next.Next()
Init()
Update()
def Gameover():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
obj['time'] = 0
def Update():
gameover = Button()
gameover.mouseOver()
gameover.Gameover()
Init()
Update()
def Menu():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
obj['time'] = 0
def Update():
menu = Button()
menu.mouseOver()
menu.Gameover()
Init()
Update()
def Again():
def Init():
cont = logic.getCurrentController()
obj = cont.owner
if not 'init' in obj:
obj['init'] = 1
obj['var'] = 0
def Update():
again = Button()
again.mouseOver()
again.Again()
Init()
Update()
def Score(): #Keeps track of score, and instantiates it on screen during "Next Level" and "GameOver" menus.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
levelScore = str(int(dict['levelScore']))
score = str(int(dict['score']))
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
tankPoints = dict['tank_kills'] * 100
rocketPoints = dict['rocket_kills'] * 10
timeBonus = 50 - int(dict['levelClock'])
if dict['level'] == 0:
timePoints = 0
elif timeBonus >= 1:
timePoints = timeBonus
else:
timePoints = 0
for i in range(1,5):
if 'tank%s' % i in obj and tankPoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(tankPoints)[len(str(tankPoints))-(5-i)]),obj)
if tankPoints == 0:
if 'tank4' in obj:
scene.addObject('Num_0', obj)
if 'time%s' % i in obj and timePoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(timePoints)[len(str(timePoints))-(5-i)]),obj)
elif timePoints == 0:
if 'time4' in obj:
scene.addObject('Num_0', obj)
if 'rocket%s' % i in obj and rocketPoints >= 10**(4-i):
scene.addObject('Num_%s' % (str(rocketPoints)[len(str(rocketPoints))-(5-i)]),obj)
elif rocketPoints == 0:
if 'rocket4' in obj:
scene.addObject('Num_0', obj)
if 'digit%s' % i in obj and dict['score'] >= 10**(4-i):
scene.addObject('Num_%s' % (score[len(score)-(5-i)]),obj)
if dict['score'] == 0:
if 'digit4' in obj:
scene.addObject('Num_0', obj)
Init()
Update()
def Paused(): #Simple Pause function
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
pause = cont.sensors['Keyboard']
dict = logic.globalDict
level = dict['level']
scenes = logic.getSceneList()
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
for i in scenes:
if 'Score' in i.name:
i.suspend()
if pause.positive:
dict['paused'] = False
for i in scenes:
if 'Score' in i.name:
i.resume()
if 'Level%s' % level in i.name:
i.resume()
scene.end()
Init()
Update()
def ScoreCounter(): #Adds individual level score to total score. This needed to be done so that during a GameOver, the level score could be shown, but not added to the total score.
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
dict = logic.globalDict
def Init():
if not 'init' in obj:
obj['init'] = 1
dict['score'] += dict['levelScore']
def Update():
pass
Init()
Update()
def Template():
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
def Init():
if not 'init' in obj:
obj['init'] = 1
def Update():
pass
Init()
Update() | en | 0.876396 | Copyright 2014 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #sets the cursor to visible while in the main menu. #Sets the cursor to visible while in the "Next level" and "GameOver" menus. #Keeps track of score, and instantiates it on screen during "Next Level" and "GameOver" menus. #Simple Pause function #Adds individual level score to total score. This needed to be done so that during a GameOver, the level score could be shown, but not added to the total score. | 2.405761 | 2 |
python/project-euler/euler_2.py | indeshan/code | 0 | 6620988 | <filename>python/project-euler/euler_2.py
prev, cur = 0, 1
total = 0
while True:
prev, cur = cur, prev + cur
if cur >= 4000000:
break
if cur % 2 == 0:
total += cur
print(total)
| <filename>python/project-euler/euler_2.py
prev, cur = 0, 1
total = 0
while True:
prev, cur = cur, prev + cur
if cur >= 4000000:
break
if cur % 2 == 0:
total += cur
print(total)
| none | 1 | 3.148696 | 3 | |
odoo-13.0/addons/l10n_in_sale_stock/models/stock_warehouse.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 6620989 | <filename>odoo-13.0/addons/l10n_in_sale_stock/models/stock_warehouse.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
class Stock(models.Model):
_inherit = 'stock.warehouse'
l10n_in_sale_journal_id = fields.Many2one('account.journal', string="Sale Journal")
| <filename>odoo-13.0/addons/l10n_in_sale_stock/models/stock_warehouse.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
class Stock(models.Model):
_inherit = 'stock.warehouse'
l10n_in_sale_journal_id = fields.Many2one('account.journal', string="Sale Journal")
| en | 0.860833 | # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. | 1.239346 | 1 |
MLTL_Compiler/__init__.py | zpcore/Web_MLTL_Compiler | 0 | 6620990 | from .postgraph import Postgraph
from .Observer import * | from .postgraph import Postgraph
from .Observer import * | none | 1 | 1.020479 | 1 | |
tests/test_fuzz_lib.py | NESCAU-UFLA/FuzzyingTool | 0 | 6620991 | import unittest
from unittest.mock import Mock, patch
from src.fuzzingtool.fuzz_lib import FuzzLib
from src.fuzzingtool.conn.requesters import Requester, SubdomainRequester
from src.fuzzingtool.utils.consts import PluginCategory, FUZZING_MARK
from src.fuzzingtool.exceptions import FuzzLibException, WordlistCreationError
from src.fuzzingtool.core.defaults.scanners import DataScanner, PathScanner, SubdomainScanner
from src.fuzzingtool.core.plugins.scanners import Reflected
from src.fuzzingtool.core.plugins.encoders import Html
from .mock_utils.wordlist_mock import WordlistMock
class TestFuzzController(unittest.TestCase):
def test_init_requester_with_common_requester(self):
test_url = "http://test-url.com/"
test_fuzz_lib = FuzzLib(url=test_url)
test_fuzz_lib._init_requester()
self.assertIsInstance(test_fuzz_lib.requester, Requester)
def test_init_requester_with_subdomain_requester(self):
test_url = f"http://{FUZZING_MARK}.test-url.com/"
test_fuzz_lib = FuzzLib(url=test_url)
test_fuzz_lib._init_requester()
self.assertIsInstance(test_fuzz_lib.requester, SubdomainRequester)
@patch("src.fuzzingtool.fuzz_lib.build_target_from_raw_http")
def test_init_requester_with_raw_http(
self,
mock_build_target_from_raw_http: Mock
):
return_target = {
'url': "http://test-url.com/",
'method': 'GET',
'body': '',
'header': {
'test-key': "test-value"
}
}
test_raw_filename = "/home/test/test_raw.txt"
mock_build_target_from_raw_http.return_value = return_target
test_fuzz_lib = FuzzLib(raw_http=test_raw_filename)
test_fuzz_lib._init_requester()
mock_build_target_from_raw_http.assert_called_once_with(test_raw_filename, None)
self.assertIsInstance(test_fuzz_lib.requester, Requester)
def test_init_requester_with_raise_exception(self):
with self.assertRaises(FuzzLibException) as e:
FuzzLib(wordlist="test")._init_requester()
self.assertEqual(str(e.exception), "A target is needed to make the fuzzing")
@patch("src.fuzzingtool.fuzz_lib.Matcher.set_status_code")
def test_init_matcher(self, mock_set_status_code: Mock):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")
test_fuzz_lib._init_requester()
test_fuzz_lib._init_matcher()
mock_set_status_code.assert_called_once_with("200-399,401,403")
def test_get_default_scanner_with_path_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, PathScanner)
def test_get_default_scanner_with_subdomain_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://{FUZZING_MARK}.test-url.com/")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, SubdomainScanner)
def test_get_default_scanner_with_data_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/", data=f"a={FUZZING_MARK}")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, DataScanner)
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_init_scanners_with_plugin_scanner(self, mock_object_creator: Mock):
mock_object_creator.return_value = Reflected()
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/", scanner="Reflected")
test_fuzz_lib._init_requester()
test_fuzz_lib._init_scanners()
mock_object_creator.assert_called_once_with(PluginCategory.scanner, "Reflected", '')
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__get_default_scanner")
def test_init_scanners_with_default_scanner(self, mock_get_default_scanner: Mock):
FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")._init_scanners()
mock_get_default_scanner.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_encoders(self, mock_object_creator: Mock):
expected_encoder = Html()
return_expected = ([expected_encoder], [])
mock_object_creator.return_value = expected_encoder
returned_encoders = FuzzLib(encoder="Html")._FuzzLib__build_encoders()
mock_object_creator.assert_called_once_with(PluginCategory.encoder, "Html", '')
self.assertEqual(returned_encoders, return_expected)
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_chain_encoders(self, mock_object_creator: Mock):
expected_encoder = Html()
return_expected = ([], [[expected_encoder, expected_encoder]])
mock_object_creator.return_value = expected_encoder
returned_encoders = FuzzLib(encoder="Html@Html")._FuzzLib__build_encoders()
mock_object_creator.assert_called_with(PluginCategory.encoder, "Html", '')
self.assertEqual(returned_encoders, return_expected)
@patch("src.fuzzingtool.fuzz_lib.Payloader.encoder.set_regex")
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_encode_only(self,
mock_object_creator: Mock,
mock_set_regex: Mock):
test_encode_only = "<|>|;"
mock_object_creator.return_value = Html()
FuzzLib(encoder="Html", encode_only=test_encode_only)._FuzzLib__build_encoders()
mock_set_regex.assert_called_once_with(test_encode_only)
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_prefix")
def test_configure_payloader_with_prefix(self, mock_set_prefix: Mock):
FuzzLib(prefix="test,test2")._FuzzLib__configure_payloader()
mock_set_prefix.assert_called_once_with(["test", "test2"])
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_suffix")
def test_configure_payloader_with_suffix(self, mock_set_suffix: Mock):
FuzzLib(suffix="test,test2")._FuzzLib__configure_payloader()
mock_set_suffix.assert_called_once_with(["test", "test2"])
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_lowercase")
def test_configure_payloader_with_lowercase(self, mock_set_lowercase: Mock):
FuzzLib(lower=True)._FuzzLib__configure_payloader()
mock_set_lowercase.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_uppercase")
def test_configure_payloader_with_uppercase(self, mock_set_uppercase: Mock):
FuzzLib(upper=True)._FuzzLib__configure_payloader()
mock_set_uppercase.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_capitalize")
def test_configure_payloader_with_capitalize(self, mock_set_capitalize: Mock):
FuzzLib(capitalize=True)._FuzzLib__configure_payloader()
mock_set_capitalize.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__build_encoders")
@patch("src.fuzzingtool.fuzz_lib.Payloader.encoder.set_encoders")
def test_configure_payloader_with_encoders(self,
mock_set_encoders: Mock,
mock_build_encoders: Mock):
build_encoders_return = ([Html()], [])
mock_build_encoders.return_value = build_encoders_return
FuzzLib(encoder="Html")._FuzzLib__configure_payloader()
mock_set_encoders.assert_called_once_with(build_encoders_return)
@patch("src.fuzzingtool.fuzz_lib.WordlistFactory.creator")
def test_build_wordlist(self, mock_creator: Mock):
test_wordlist = WordlistMock('1')
mock_creator.return_value = test_wordlist
returned_wordlist = FuzzLib(
url="http://test-url.com/", wordlist="test=1"
)._FuzzLib__build_wordlist([("test", '1')])
mock_creator.assert_called_once_with("test", '1', None)
self.assertIsInstance(returned_wordlist, list)
self.assertEqual(returned_wordlist, test_wordlist._build())
@patch("src.fuzzingtool.fuzz_lib.WordlistFactory.creator")
def test_build_wordlist_with_blank_wordlist(self, mock_creator: Mock):
mock_creator.side_effect = WordlistCreationError()
test_fuzz_lib = FuzzLib(url="http://test-url.com/", wordlist="test")
with self.assertRaises(FuzzLibException) as e:
test_fuzz_lib._FuzzLib__build_wordlist([("test", '')])
self.assertEqual(str(e.exception), "The wordlist is empty")
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__build_wordlist")
def test_init_dictionary(self, mock_build_wordlist: Mock):
mock_build_wordlist.return_value = ["test", "test", "test2"]
test_fuzz_lib = FuzzLib(wordlist="test", unique=True)
test_fuzz_lib._init_dictionary()
self.assertEqual(test_fuzz_lib.dict_metadata["removed"], 1)
self.assertEqual(test_fuzz_lib.dict_metadata["len"], 2)
| import unittest
from unittest.mock import Mock, patch
from src.fuzzingtool.fuzz_lib import FuzzLib
from src.fuzzingtool.conn.requesters import Requester, SubdomainRequester
from src.fuzzingtool.utils.consts import PluginCategory, FUZZING_MARK
from src.fuzzingtool.exceptions import FuzzLibException, WordlistCreationError
from src.fuzzingtool.core.defaults.scanners import DataScanner, PathScanner, SubdomainScanner
from src.fuzzingtool.core.plugins.scanners import Reflected
from src.fuzzingtool.core.plugins.encoders import Html
from .mock_utils.wordlist_mock import WordlistMock
class TestFuzzController(unittest.TestCase):
def test_init_requester_with_common_requester(self):
test_url = "http://test-url.com/"
test_fuzz_lib = FuzzLib(url=test_url)
test_fuzz_lib._init_requester()
self.assertIsInstance(test_fuzz_lib.requester, Requester)
def test_init_requester_with_subdomain_requester(self):
test_url = f"http://{FUZZING_MARK}.test-url.com/"
test_fuzz_lib = FuzzLib(url=test_url)
test_fuzz_lib._init_requester()
self.assertIsInstance(test_fuzz_lib.requester, SubdomainRequester)
@patch("src.fuzzingtool.fuzz_lib.build_target_from_raw_http")
def test_init_requester_with_raw_http(
self,
mock_build_target_from_raw_http: Mock
):
return_target = {
'url': "http://test-url.com/",
'method': 'GET',
'body': '',
'header': {
'test-key': "test-value"
}
}
test_raw_filename = "/home/test/test_raw.txt"
mock_build_target_from_raw_http.return_value = return_target
test_fuzz_lib = FuzzLib(raw_http=test_raw_filename)
test_fuzz_lib._init_requester()
mock_build_target_from_raw_http.assert_called_once_with(test_raw_filename, None)
self.assertIsInstance(test_fuzz_lib.requester, Requester)
def test_init_requester_with_raise_exception(self):
with self.assertRaises(FuzzLibException) as e:
FuzzLib(wordlist="test")._init_requester()
self.assertEqual(str(e.exception), "A target is needed to make the fuzzing")
@patch("src.fuzzingtool.fuzz_lib.Matcher.set_status_code")
def test_init_matcher(self, mock_set_status_code: Mock):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")
test_fuzz_lib._init_requester()
test_fuzz_lib._init_matcher()
mock_set_status_code.assert_called_once_with("200-399,401,403")
def test_get_default_scanner_with_path_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, PathScanner)
def test_get_default_scanner_with_subdomain_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://{FUZZING_MARK}.test-url.com/")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, SubdomainScanner)
def test_get_default_scanner_with_data_scanner(self):
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/", data=f"a={FUZZING_MARK}")
test_fuzz_lib._init_requester()
returned_scanner = test_fuzz_lib._FuzzLib__get_default_scanner()
self.assertIsInstance(returned_scanner, DataScanner)
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_init_scanners_with_plugin_scanner(self, mock_object_creator: Mock):
mock_object_creator.return_value = Reflected()
test_fuzz_lib = FuzzLib(url=f"http://test-url.com/", scanner="Reflected")
test_fuzz_lib._init_requester()
test_fuzz_lib._init_scanners()
mock_object_creator.assert_called_once_with(PluginCategory.scanner, "Reflected", '')
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__get_default_scanner")
def test_init_scanners_with_default_scanner(self, mock_get_default_scanner: Mock):
FuzzLib(url=f"http://test-url.com/{FUZZING_MARK}")._init_scanners()
mock_get_default_scanner.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_encoders(self, mock_object_creator: Mock):
expected_encoder = Html()
return_expected = ([expected_encoder], [])
mock_object_creator.return_value = expected_encoder
returned_encoders = FuzzLib(encoder="Html")._FuzzLib__build_encoders()
mock_object_creator.assert_called_once_with(PluginCategory.encoder, "Html", '')
self.assertEqual(returned_encoders, return_expected)
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_chain_encoders(self, mock_object_creator: Mock):
expected_encoder = Html()
return_expected = ([], [[expected_encoder, expected_encoder]])
mock_object_creator.return_value = expected_encoder
returned_encoders = FuzzLib(encoder="Html@Html")._FuzzLib__build_encoders()
mock_object_creator.assert_called_with(PluginCategory.encoder, "Html", '')
self.assertEqual(returned_encoders, return_expected)
@patch("src.fuzzingtool.fuzz_lib.Payloader.encoder.set_regex")
@patch("src.fuzzingtool.fuzz_lib.PluginFactory.object_creator")
def test_build_encoders_with_encode_only(self,
mock_object_creator: Mock,
mock_set_regex: Mock):
test_encode_only = "<|>|;"
mock_object_creator.return_value = Html()
FuzzLib(encoder="Html", encode_only=test_encode_only)._FuzzLib__build_encoders()
mock_set_regex.assert_called_once_with(test_encode_only)
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_prefix")
def test_configure_payloader_with_prefix(self, mock_set_prefix: Mock):
FuzzLib(prefix="test,test2")._FuzzLib__configure_payloader()
mock_set_prefix.assert_called_once_with(["test", "test2"])
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_suffix")
def test_configure_payloader_with_suffix(self, mock_set_suffix: Mock):
FuzzLib(suffix="test,test2")._FuzzLib__configure_payloader()
mock_set_suffix.assert_called_once_with(["test", "test2"])
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_lowercase")
def test_configure_payloader_with_lowercase(self, mock_set_lowercase: Mock):
FuzzLib(lower=True)._FuzzLib__configure_payloader()
mock_set_lowercase.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_uppercase")
def test_configure_payloader_with_uppercase(self, mock_set_uppercase: Mock):
FuzzLib(upper=True)._FuzzLib__configure_payloader()
mock_set_uppercase.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.Payloader.set_capitalize")
def test_configure_payloader_with_capitalize(self, mock_set_capitalize: Mock):
FuzzLib(capitalize=True)._FuzzLib__configure_payloader()
mock_set_capitalize.assert_called_once()
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__build_encoders")
@patch("src.fuzzingtool.fuzz_lib.Payloader.encoder.set_encoders")
def test_configure_payloader_with_encoders(self,
mock_set_encoders: Mock,
mock_build_encoders: Mock):
build_encoders_return = ([Html()], [])
mock_build_encoders.return_value = build_encoders_return
FuzzLib(encoder="Html")._FuzzLib__configure_payloader()
mock_set_encoders.assert_called_once_with(build_encoders_return)
@patch("src.fuzzingtool.fuzz_lib.WordlistFactory.creator")
def test_build_wordlist(self, mock_creator: Mock):
test_wordlist = WordlistMock('1')
mock_creator.return_value = test_wordlist
returned_wordlist = FuzzLib(
url="http://test-url.com/", wordlist="test=1"
)._FuzzLib__build_wordlist([("test", '1')])
mock_creator.assert_called_once_with("test", '1', None)
self.assertIsInstance(returned_wordlist, list)
self.assertEqual(returned_wordlist, test_wordlist._build())
@patch("src.fuzzingtool.fuzz_lib.WordlistFactory.creator")
def test_build_wordlist_with_blank_wordlist(self, mock_creator: Mock):
mock_creator.side_effect = WordlistCreationError()
test_fuzz_lib = FuzzLib(url="http://test-url.com/", wordlist="test")
with self.assertRaises(FuzzLibException) as e:
test_fuzz_lib._FuzzLib__build_wordlist([("test", '')])
self.assertEqual(str(e.exception), "The wordlist is empty")
@patch("src.fuzzingtool.fuzz_lib.FuzzLib._FuzzLib__build_wordlist")
def test_init_dictionary(self, mock_build_wordlist: Mock):
mock_build_wordlist.return_value = ["test", "test", "test2"]
test_fuzz_lib = FuzzLib(wordlist="test", unique=True)
test_fuzz_lib._init_dictionary()
self.assertEqual(test_fuzz_lib.dict_metadata["removed"], 1)
self.assertEqual(test_fuzz_lib.dict_metadata["len"], 2)
| none | 1 | 2.396577 | 2 | |
chapter05/bar.py | teleported/ds-fromscratch | 0 | 6620992 | <filename>chapter05/bar.py
#!/usr/bin/python
import matplotlib
matplotlib.use('Qt4Agg')
import random
from collections import Counter
from matplotlib import pyplot as plt
num_friends = [random.choice(range(20)) for _ in range(200)]
friend_counter = Counter(num_friends)
xs = range(21)
ys = [friend_counter[x] for x in xs]
plt.bar(xs, ys)
plt.axis([0, 25, 0, 30])
# plt.xticks(xs , xs)
plt.show()
| <filename>chapter05/bar.py
#!/usr/bin/python
import matplotlib
matplotlib.use('Qt4Agg')
import random
from collections import Counter
from matplotlib import pyplot as plt
num_friends = [random.choice(range(20)) for _ in range(200)]
friend_counter = Counter(num_friends)
xs = range(21)
ys = [friend_counter[x] for x in xs]
plt.bar(xs, ys)
plt.axis([0, 25, 0, 30])
# plt.xticks(xs , xs)
plt.show()
| en | 0.370167 | #!/usr/bin/python # plt.xticks(xs , xs) | 3.231181 | 3 |
good_spot/populartimes/admin.py | jasmine92122/NightClubBackend | 0 | 6620993 | <reponame>jasmine92122/NightClubBackend
from django.contrib import admin
from django.contrib.postgres.fields import JSONField
from prettyjson import PrettyJSONWidget
from good_spot.populartimes.models import Populartimes
@admin.register(Populartimes)
class PopulartimesAdmin(admin.ModelAdmin):
formfield_overrides = {
JSONField: {'widget': PrettyJSONWidget(attrs={'initial': 'parsed'})}
} | from django.contrib import admin
from django.contrib.postgres.fields import JSONField
from prettyjson import PrettyJSONWidget
from good_spot.populartimes.models import Populartimes
@admin.register(Populartimes)
class PopulartimesAdmin(admin.ModelAdmin):
formfield_overrides = {
JSONField: {'widget': PrettyJSONWidget(attrs={'initial': 'parsed'})}
} | none | 1 | 1.513608 | 2 | |
pymodaq_plugins/hardware/Newport/esp100.py | Cdriko/pymodaq_plugins | 0 | 6620994 | import pyvisa
import numpy as np
class ESP100(object):
def __init__(self):
super().__init__()
self._controller = None
self._VISA_rm = pyvisa.ResourceManager()
self.com_ports = self.get_ressources()
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, to):
self._timeout = to
self._controller.timeout = to
def get_ressources(self):
infos=self._VISA_rm.list_resources_info()
com_ports = [infos[key].alias for key in infos.keys()]
return com_ports
def init_communication(self, com_port, axis=1):
if com_port in self.com_ports:
self._controller = self._VISA_rm.open_resource(com_port)
#set attributes
self._controller.baud_rate = 19200
self._controller.data_bits = 8
self._controller.stop_bits = pyvisa.constants.StopBits['one']
self._controller.parity = pyvisa.constants.Parity['none']
#self._controller.flow_control = 0
# self._controller.read_termination=self._controller.LF
# self._controller.write_termination=self._controller.LF
self.timeout = 2000
self.turn_motor_on(axis)
else:
raise IOError('{:s} is not a valid port'.format(com_port))
def turn_motor_on(self, axis=1):
self._write_command(f'{axis}MO?')
status = self._controller.read_ascii_values()[0]
if not status:
self._write_command(f'{axis}MO')
def turn_motor_off(self, axis=1):
self._write_command(f'{axis}MF?')
status = self._controller.read_ascii_values()[0]
if status:
self._write_command(f'{axis}MF')
def close_communication(self, axis=1):
self.turn_motor_off(axis=axis)
self._controller.close()
self._VISA_rm.close()
def get_controller_infos(self):
self._write_command('1ID?')
return self._get_read()
def _query(self, command):
ret = self._controller.query(command)
return ret
def _write_command(self, command):
self._controller.write(command)
def _get_read(self):
self._controller.timeout = 50
info = ''
try:
while True:
info += self._controller.read()+'\n'
except pyvisa.errors.VisaIOError as e:
pass
self._controller.timeout = self._timeout
return info
def move_axis(self, move_type='ABS', axis=1, pos=0.):
if move_type == 'ABS':
ret = self._write_command(f'{axis}PA{pos}')
elif move_type == 'REL':
ret = self._write_command(f'{axis}PR{pos}')
else:
raise Exception('{:s} is not a valid displacement type'.format(move_type))
return ret
def get_position(self, axis=1):
""" return the given axis position always in mm
"""
self._write_command(f'{axis}TP')
pos = self._controller.read_ascii_values()[0]
return pos
def get_velocity(self, axis=1):
self._write_command(f'{axis}VA?')
pos = self._controller.read_ascii_values()[0]
return pos
def get_velocity_max(self, axis=1):
self._write_command(f'{axis}VU?')
pos = self._controller.read_ascii_values()[0]
return pos
def set_velocity(self, velocity, axis=1):
self._write_command(f'{axis}VA{velocity}')
def move_home(self, axis=1):
self._write_command(f'{axis}OR1')
def stop_motion(self, axis=1):
self._write_command(f'{axis}ST') | import pyvisa
import numpy as np
class ESP100(object):
def __init__(self):
super().__init__()
self._controller = None
self._VISA_rm = pyvisa.ResourceManager()
self.com_ports = self.get_ressources()
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, to):
self._timeout = to
self._controller.timeout = to
def get_ressources(self):
infos=self._VISA_rm.list_resources_info()
com_ports = [infos[key].alias for key in infos.keys()]
return com_ports
def init_communication(self, com_port, axis=1):
if com_port in self.com_ports:
self._controller = self._VISA_rm.open_resource(com_port)
#set attributes
self._controller.baud_rate = 19200
self._controller.data_bits = 8
self._controller.stop_bits = pyvisa.constants.StopBits['one']
self._controller.parity = pyvisa.constants.Parity['none']
#self._controller.flow_control = 0
# self._controller.read_termination=self._controller.LF
# self._controller.write_termination=self._controller.LF
self.timeout = 2000
self.turn_motor_on(axis)
else:
raise IOError('{:s} is not a valid port'.format(com_port))
def turn_motor_on(self, axis=1):
self._write_command(f'{axis}MO?')
status = self._controller.read_ascii_values()[0]
if not status:
self._write_command(f'{axis}MO')
def turn_motor_off(self, axis=1):
self._write_command(f'{axis}MF?')
status = self._controller.read_ascii_values()[0]
if status:
self._write_command(f'{axis}MF')
def close_communication(self, axis=1):
self.turn_motor_off(axis=axis)
self._controller.close()
self._VISA_rm.close()
def get_controller_infos(self):
self._write_command('1ID?')
return self._get_read()
def _query(self, command):
ret = self._controller.query(command)
return ret
def _write_command(self, command):
self._controller.write(command)
def _get_read(self):
self._controller.timeout = 50
info = ''
try:
while True:
info += self._controller.read()+'\n'
except pyvisa.errors.VisaIOError as e:
pass
self._controller.timeout = self._timeout
return info
def move_axis(self, move_type='ABS', axis=1, pos=0.):
if move_type == 'ABS':
ret = self._write_command(f'{axis}PA{pos}')
elif move_type == 'REL':
ret = self._write_command(f'{axis}PR{pos}')
else:
raise Exception('{:s} is not a valid displacement type'.format(move_type))
return ret
def get_position(self, axis=1):
""" return the given axis position always in mm
"""
self._write_command(f'{axis}TP')
pos = self._controller.read_ascii_values()[0]
return pos
def get_velocity(self, axis=1):
self._write_command(f'{axis}VA?')
pos = self._controller.read_ascii_values()[0]
return pos
def get_velocity_max(self, axis=1):
self._write_command(f'{axis}VU?')
pos = self._controller.read_ascii_values()[0]
return pos
def set_velocity(self, velocity, axis=1):
self._write_command(f'{axis}VA{velocity}')
def move_home(self, axis=1):
self._write_command(f'{axis}OR1')
def stop_motion(self, axis=1):
self._write_command(f'{axis}ST') | en | 0.227171 | #set attributes #self._controller.flow_control = 0 # self._controller.read_termination=self._controller.LF # self._controller.write_termination=self._controller.LF return the given axis position always in mm | 2.576441 | 3 |
build/PureCloudPlatformClientV2/models/create_web_chat_conversation_request.py | cjohnson-ctl/platform-client-sdk-python | 10 | 6620995 | <reponame>cjohnson-ctl/platform-client-sdk-python<gh_stars>1-10
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class CreateWebChatConversationRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CreateWebChatConversationRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'organization_id': 'str',
'deployment_id': 'str',
'routing_target': 'WebChatRoutingTarget',
'member_info': 'GuestMemberInfo',
'member_auth_token': 'str',
'journey_context': 'JourneyContext'
}
self.attribute_map = {
'organization_id': 'organizationId',
'deployment_id': 'deploymentId',
'routing_target': 'routingTarget',
'member_info': 'memberInfo',
'member_auth_token': 'memberAuthToken',
'journey_context': 'journeyContext'
}
self._organization_id = None
self._deployment_id = None
self._routing_target = None
self._member_info = None
self._member_auth_token = None
self._journey_context = None
@property
def organization_id(self):
"""
Gets the organization_id of this CreateWebChatConversationRequest.
The organization identifier.
:return: The organization_id of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this CreateWebChatConversationRequest.
The organization identifier.
:param organization_id: The organization_id of this CreateWebChatConversationRequest.
:type: str
"""
self._organization_id = organization_id
@property
def deployment_id(self):
"""
Gets the deployment_id of this CreateWebChatConversationRequest.
The web chat Deployment ID which contains the appropriate settings for this chat conversation.
:return: The deployment_id of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""
Sets the deployment_id of this CreateWebChatConversationRequest.
The web chat Deployment ID which contains the appropriate settings for this chat conversation.
:param deployment_id: The deployment_id of this CreateWebChatConversationRequest.
:type: str
"""
self._deployment_id = deployment_id
@property
def routing_target(self):
"""
Gets the routing_target of this CreateWebChatConversationRequest.
The routing information to use for the new chat conversation.
:return: The routing_target of this CreateWebChatConversationRequest.
:rtype: WebChatRoutingTarget
"""
return self._routing_target
@routing_target.setter
def routing_target(self, routing_target):
"""
Sets the routing_target of this CreateWebChatConversationRequest.
The routing information to use for the new chat conversation.
:param routing_target: The routing_target of this CreateWebChatConversationRequest.
:type: WebChatRoutingTarget
"""
self._routing_target = routing_target
@property
def member_info(self):
"""
Gets the member_info of this CreateWebChatConversationRequest.
The guest member info to use for the new chat conversation.
:return: The member_info of this CreateWebChatConversationRequest.
:rtype: GuestMemberInfo
"""
return self._member_info
@member_info.setter
def member_info(self, member_info):
"""
Sets the member_info of this CreateWebChatConversationRequest.
The guest member info to use for the new chat conversation.
:param member_info: The member_info of this CreateWebChatConversationRequest.
:type: GuestMemberInfo
"""
self._member_info = member_info
@property
def member_auth_token(self):
"""
Gets the member_auth_token of this CreateWebChatConversationRequest.
If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource.
:return: The member_auth_token of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._member_auth_token
@member_auth_token.setter
def member_auth_token(self, member_auth_token):
"""
Sets the member_auth_token of this CreateWebChatConversationRequest.
If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource.
:param member_auth_token: The member_auth_token of this CreateWebChatConversationRequest.
:type: str
"""
self._member_auth_token = member_auth_token
@property
def journey_context(self):
"""
Gets the journey_context of this CreateWebChatConversationRequest.
A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context).
:return: The journey_context of this CreateWebChatConversationRequest.
:rtype: JourneyContext
"""
return self._journey_context
@journey_context.setter
def journey_context(self, journey_context):
"""
Sets the journey_context of this CreateWebChatConversationRequest.
A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context).
:param journey_context: The journey_context of this CreateWebChatConversationRequest.
:type: JourneyContext
"""
self._journey_context = journey_context
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class CreateWebChatConversationRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CreateWebChatConversationRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'organization_id': 'str',
'deployment_id': 'str',
'routing_target': 'WebChatRoutingTarget',
'member_info': 'GuestMemberInfo',
'member_auth_token': 'str',
'journey_context': 'JourneyContext'
}
self.attribute_map = {
'organization_id': 'organizationId',
'deployment_id': 'deploymentId',
'routing_target': 'routingTarget',
'member_info': 'memberInfo',
'member_auth_token': 'memberAuthToken',
'journey_context': 'journeyContext'
}
self._organization_id = None
self._deployment_id = None
self._routing_target = None
self._member_info = None
self._member_auth_token = None
self._journey_context = None
@property
def organization_id(self):
"""
Gets the organization_id of this CreateWebChatConversationRequest.
The organization identifier.
:return: The organization_id of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this CreateWebChatConversationRequest.
The organization identifier.
:param organization_id: The organization_id of this CreateWebChatConversationRequest.
:type: str
"""
self._organization_id = organization_id
@property
def deployment_id(self):
"""
Gets the deployment_id of this CreateWebChatConversationRequest.
The web chat Deployment ID which contains the appropriate settings for this chat conversation.
:return: The deployment_id of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""
Sets the deployment_id of this CreateWebChatConversationRequest.
The web chat Deployment ID which contains the appropriate settings for this chat conversation.
:param deployment_id: The deployment_id of this CreateWebChatConversationRequest.
:type: str
"""
self._deployment_id = deployment_id
@property
def routing_target(self):
"""
Gets the routing_target of this CreateWebChatConversationRequest.
The routing information to use for the new chat conversation.
:return: The routing_target of this CreateWebChatConversationRequest.
:rtype: WebChatRoutingTarget
"""
return self._routing_target
@routing_target.setter
def routing_target(self, routing_target):
"""
Sets the routing_target of this CreateWebChatConversationRequest.
The routing information to use for the new chat conversation.
:param routing_target: The routing_target of this CreateWebChatConversationRequest.
:type: WebChatRoutingTarget
"""
self._routing_target = routing_target
@property
def member_info(self):
"""
Gets the member_info of this CreateWebChatConversationRequest.
The guest member info to use for the new chat conversation.
:return: The member_info of this CreateWebChatConversationRequest.
:rtype: GuestMemberInfo
"""
return self._member_info
@member_info.setter
def member_info(self, member_info):
"""
Sets the member_info of this CreateWebChatConversationRequest.
The guest member info to use for the new chat conversation.
:param member_info: The member_info of this CreateWebChatConversationRequest.
:type: GuestMemberInfo
"""
self._member_info = member_info
@property
def member_auth_token(self):
"""
Gets the member_auth_token of this CreateWebChatConversationRequest.
If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource.
:return: The member_auth_token of this CreateWebChatConversationRequest.
:rtype: str
"""
return self._member_auth_token
@member_auth_token.setter
def member_auth_token(self, member_auth_token):
"""
Sets the member_auth_token of this CreateWebChatConversationRequest.
If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource.
:param member_auth_token: The member_auth_token of this CreateWebChatConversationRequest.
:type: str
"""
self._member_auth_token = member_auth_token
@property
def journey_context(self):
"""
Gets the journey_context of this CreateWebChatConversationRequest.
A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context).
:return: The journey_context of this CreateWebChatConversationRequest.
:rtype: JourneyContext
"""
return self._journey_context
@journey_context.setter
def journey_context(self, journey_context):
"""
Sets the journey_context of this CreateWebChatConversationRequest.
A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context).
:param journey_context: The journey_context of this CreateWebChatConversationRequest.
:type: JourneyContext
"""
self._journey_context = journey_context
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | en | 0.78724 | # coding: utf-8 Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. CreateWebChatConversationRequest - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. Gets the organization_id of this CreateWebChatConversationRequest. The organization identifier. :return: The organization_id of this CreateWebChatConversationRequest. :rtype: str Sets the organization_id of this CreateWebChatConversationRequest. The organization identifier. :param organization_id: The organization_id of this CreateWebChatConversationRequest. :type: str Gets the deployment_id of this CreateWebChatConversationRequest. The web chat Deployment ID which contains the appropriate settings for this chat conversation. :return: The deployment_id of this CreateWebChatConversationRequest. :rtype: str Sets the deployment_id of this CreateWebChatConversationRequest. The web chat Deployment ID which contains the appropriate settings for this chat conversation. :param deployment_id: The deployment_id of this CreateWebChatConversationRequest. :type: str Gets the routing_target of this CreateWebChatConversationRequest. The routing information to use for the new chat conversation. :return: The routing_target of this CreateWebChatConversationRequest. :rtype: WebChatRoutingTarget Sets the routing_target of this CreateWebChatConversationRequest. The routing information to use for the new chat conversation. :param routing_target: The routing_target of this CreateWebChatConversationRequest. :type: WebChatRoutingTarget Gets the member_info of this CreateWebChatConversationRequest. The guest member info to use for the new chat conversation. :return: The member_info of this CreateWebChatConversationRequest. :rtype: GuestMemberInfo Sets the member_info of this CreateWebChatConversationRequest. The guest member info to use for the new chat conversation. :param member_info: The member_info of this CreateWebChatConversationRequest. :type: GuestMemberInfo Gets the member_auth_token of this CreateWebChatConversationRequest. If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource. :return: The member_auth_token of this CreateWebChatConversationRequest. :rtype: str Sets the member_auth_token of this CreateWebChatConversationRequest. If the guest member is an authenticated member (ie, not anonymous) his JWT is provided here. The token will have been previously generated with the \"POST /api/v2/signeddata\" resource. :param member_auth_token: The member_auth_token of this CreateWebChatConversationRequest. :type: str Gets the journey_context of this CreateWebChatConversationRequest. A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context). :return: The journey_context of this CreateWebChatConversationRequest. :rtype: JourneyContext Sets the journey_context of this CreateWebChatConversationRequest. A subset of the Journey System's data relevant to this conversation/session request (for external linkage and internal usage/context). :param journey_context: The journey_context of this CreateWebChatConversationRequest. :type: JourneyContext Returns the model properties as a dict Returns the model as raw JSON Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.540727 | 2 |
tests/unit/test_toolchain.py | ericsonj/pymaketool | 6 | 6620996 | <gh_stars>1-10
import unittest
from pymakelib import toolchain
class TestToolchain(unittest.TestCase):
def test_confgcc(self):
res = toolchain.confGCC(binLocation="/usr/bin/")
self.assertEqual("/usr/bin/gcc", res['CC'])
res = toolchain.confGCC(binLocation="/usr/bin")
self.assertEqual("/usr/bin/gcc", res['CC'])
if __name__ == '__main__':
unittest.main() | import unittest
from pymakelib import toolchain
class TestToolchain(unittest.TestCase):
def test_confgcc(self):
res = toolchain.confGCC(binLocation="/usr/bin/")
self.assertEqual("/usr/bin/gcc", res['CC'])
res = toolchain.confGCC(binLocation="/usr/bin")
self.assertEqual("/usr/bin/gcc", res['CC'])
if __name__ == '__main__':
unittest.main() | none | 1 | 2.328085 | 2 | |
jupyterlab_powerpoint/tests/test_init.py | timkpaine/jupyterlab_powerpoint | 19 | 6620997 | <reponame>timkpaine/jupyterlab_powerpoint<gh_stars>10-100
# for Coverage
from jupyterlab_powerpoint import _jupyter_server_extension_paths
class TestInit:
def test__jupyter_server_extension_paths(self):
assert _jupyter_server_extension_paths() == [
{"module": "jupyterlab_powerpoint.extension"}
]
| # for Coverage
from jupyterlab_powerpoint import _jupyter_server_extension_paths
class TestInit:
def test__jupyter_server_extension_paths(self):
assert _jupyter_server_extension_paths() == [
{"module": "jupyterlab_powerpoint.extension"}
] | en | 0.887398 | # for Coverage | 1.784336 | 2 |
src/baseline/exnn/setup.py | fau-is/gam_comparison | 1 | 6620998 | from setuptools import setup
setup(name='exnn',
version='0.1',
description='The enhanced explainable neural network with sparse, orthogonal and smooth constraints',
url='https://github.com/ZebinYang/exnn',
author='<NAME>',
author_email='<EMAIL>',
license='GPL',
packages=['exnn'],
install_requires=[
'matplotlib>=2.2.2','tensorflow>=2.0.0', 'numpy>=1.15.2'],
zip_safe=False)
| from setuptools import setup
setup(name='exnn',
version='0.1',
description='The enhanced explainable neural network with sparse, orthogonal and smooth constraints',
url='https://github.com/ZebinYang/exnn',
author='<NAME>',
author_email='<EMAIL>',
license='GPL',
packages=['exnn'],
install_requires=[
'matplotlib>=2.2.2','tensorflow>=2.0.0', 'numpy>=1.15.2'],
zip_safe=False)
| none | 1 | 1.129175 | 1 | |
logger.py | SadoP/DiscordRolesFromSheets | 0 | 6620999 | <filename>logger.py
import logging
logger = logging.getLogger("logger")
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.INFO)
| <filename>logger.py
import logging
logger = logging.getLogger("logger")
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.INFO)
| none | 1 | 2.567354 | 3 | |
src/golem.py | ignavier/golem | 10 | 6621000 | import os
from models import GolemModel
from trainers import GolemTrainer
# For logging of tensorflow messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def golem(X, lambda_1, lambda_2, equal_variances=True,
num_iter=1e+5, learning_rate=1e-3, seed=1,
checkpoint_iter=None, output_dir=None, B_init=None):
"""Solve the unconstrained optimization problem of GOLEM, which involves
GolemModel and GolemTrainer.
Args:
X (numpy.ndarray): [n, d] data matrix.
lambda_1 (float): Coefficient of L1 penalty.
lambda_2 (float): Coefficient of DAG penalty.
equal_variances (bool): Whether to assume equal noise variances
for likelibood objective. Default: True.
num_iter (int): Number of iterations for training.
learning_rate (float): Learning rate of Adam optimizer. Default: 1e-3.
seed (int): Random seed. Default: 1.
checkpoint_iter (int): Number of iterations between each checkpoint.
Set to None to disable. Default: None.
output_dir (str): Output directory to save training outputs.
B_init (numpy.ndarray or None): [d, d] weighted matrix for initialization.
Set to None to disable. Default: None.
Returns:
numpy.ndarray: [d, d] estimated weighted matrix.
Hyperparameters:
(1) GOLEM-NV: equal_variances=False, lambda_1=2e-3, lambda_2=5.0.
(2) GOLEM-EV: equal_variances=True, lambda_1=2e-2, lambda_2=5.0.
"""
# Center the data
X = X - X.mean(axis=0, keepdims=True)
# Set up model
n, d = X.shape
model = GolemModel(n, d, lambda_1, lambda_2, equal_variances, seed, B_init)
# Training
trainer = GolemTrainer(learning_rate)
B_est = trainer.train(model, X, num_iter, checkpoint_iter, output_dir)
return B_est # Not thresholded yet
if __name__ == '__main__':
# Minimal code to run GOLEM.
import logging
from data_loader import SyntheticDataset
from utils.train import postprocess
from utils.utils import count_accuracy, set_seed
# Setup for logging
# Required for printing histories if checkpointing is activated
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s - %(name)s - %(message)s'
)
# Reproducibility
set_seed(1)
# Load dataset
n, d = 1000, 20
graph_type, degree = 'ER', 4 # ER2 graph
B_scale = 1.0
noise_type = 'gaussian_ev'
dataset = SyntheticDataset(n, d, graph_type, degree,
noise_type, B_scale, seed=1)
# GOLEM-EV
B_est = golem(dataset.X, lambda_1=2e-2, lambda_2=5.0,
equal_variances=True, checkpoint_iter=5000)
# Post-process estimated solution and compute results
B_processed = postprocess(B_est, graph_thres=0.3)
results = count_accuracy(dataset.B != 0, B_processed != 0)
logging.info("Results (after post-processing): {}.".format(results))
| import os
from models import GolemModel
from trainers import GolemTrainer
# For logging of tensorflow messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def golem(X, lambda_1, lambda_2, equal_variances=True,
num_iter=1e+5, learning_rate=1e-3, seed=1,
checkpoint_iter=None, output_dir=None, B_init=None):
"""Solve the unconstrained optimization problem of GOLEM, which involves
GolemModel and GolemTrainer.
Args:
X (numpy.ndarray): [n, d] data matrix.
lambda_1 (float): Coefficient of L1 penalty.
lambda_2 (float): Coefficient of DAG penalty.
equal_variances (bool): Whether to assume equal noise variances
for likelibood objective. Default: True.
num_iter (int): Number of iterations for training.
learning_rate (float): Learning rate of Adam optimizer. Default: 1e-3.
seed (int): Random seed. Default: 1.
checkpoint_iter (int): Number of iterations between each checkpoint.
Set to None to disable. Default: None.
output_dir (str): Output directory to save training outputs.
B_init (numpy.ndarray or None): [d, d] weighted matrix for initialization.
Set to None to disable. Default: None.
Returns:
numpy.ndarray: [d, d] estimated weighted matrix.
Hyperparameters:
(1) GOLEM-NV: equal_variances=False, lambda_1=2e-3, lambda_2=5.0.
(2) GOLEM-EV: equal_variances=True, lambda_1=2e-2, lambda_2=5.0.
"""
# Center the data
X = X - X.mean(axis=0, keepdims=True)
# Set up model
n, d = X.shape
model = GolemModel(n, d, lambda_1, lambda_2, equal_variances, seed, B_init)
# Training
trainer = GolemTrainer(learning_rate)
B_est = trainer.train(model, X, num_iter, checkpoint_iter, output_dir)
return B_est # Not thresholded yet
if __name__ == '__main__':
# Minimal code to run GOLEM.
import logging
from data_loader import SyntheticDataset
from utils.train import postprocess
from utils.utils import count_accuracy, set_seed
# Setup for logging
# Required for printing histories if checkpointing is activated
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s - %(name)s - %(message)s'
)
# Reproducibility
set_seed(1)
# Load dataset
n, d = 1000, 20
graph_type, degree = 'ER', 4 # ER2 graph
B_scale = 1.0
noise_type = 'gaussian_ev'
dataset = SyntheticDataset(n, d, graph_type, degree,
noise_type, B_scale, seed=1)
# GOLEM-EV
B_est = golem(dataset.X, lambda_1=2e-2, lambda_2=5.0,
equal_variances=True, checkpoint_iter=5000)
# Post-process estimated solution and compute results
B_processed = postprocess(B_est, graph_thres=0.3)
results = count_accuracy(dataset.B != 0, B_processed != 0)
logging.info("Results (after post-processing): {}.".format(results))
| en | 0.70785 | # For logging of tensorflow messages Solve the unconstrained optimization problem of GOLEM, which involves GolemModel and GolemTrainer. Args: X (numpy.ndarray): [n, d] data matrix. lambda_1 (float): Coefficient of L1 penalty. lambda_2 (float): Coefficient of DAG penalty. equal_variances (bool): Whether to assume equal noise variances for likelibood objective. Default: True. num_iter (int): Number of iterations for training. learning_rate (float): Learning rate of Adam optimizer. Default: 1e-3. seed (int): Random seed. Default: 1. checkpoint_iter (int): Number of iterations between each checkpoint. Set to None to disable. Default: None. output_dir (str): Output directory to save training outputs. B_init (numpy.ndarray or None): [d, d] weighted matrix for initialization. Set to None to disable. Default: None. Returns: numpy.ndarray: [d, d] estimated weighted matrix. Hyperparameters: (1) GOLEM-NV: equal_variances=False, lambda_1=2e-3, lambda_2=5.0. (2) GOLEM-EV: equal_variances=True, lambda_1=2e-2, lambda_2=5.0. # Center the data # Set up model # Training # Not thresholded yet # Minimal code to run GOLEM. # Setup for logging # Required for printing histories if checkpointing is activated # Reproducibility # Load dataset # ER2 graph # GOLEM-EV # Post-process estimated solution and compute results | 2.410259 | 2 |
2020/5.1/main.py | kylixalex/Advent-of-Code | 0 | 6621001 | <reponame>kylixalex/Advent-of-Code<filename>2020/5.1/main.py<gh_stars>0
inp = open("input.txt").read().splitlines()
li = []
for line in inp:
binary = str.maketrans("FBLR", "0101")
translated = int(line.translate(binary), 2)
li.append(translated)
print(max(li))
| inp = open("input.txt").read().splitlines()
li = []
for line in inp:
binary = str.maketrans("FBLR", "0101")
translated = int(line.translate(binary), 2)
li.append(translated)
print(max(li)) | none | 1 | 3.129653 | 3 | |
dexp/utils/robocopy.py | haesleinhuepf/dexp | 16 | 6621002 | import os
import subprocess
from arbol import aprint, asection
def robocopy(
source_folder: str,
dest_folder: str,
move_files: bool = False,
nb_threads: int = 8,
large_files: bool = False,
wait_to_finish: bool = True,
):
"""
Start a generic robocopy job on Windows to copy files from one folder to another
Args
---------
Returns
---------
copyProcess (Popen Object) :
> this is the Popen object that's running the copy process. This can be
> used to check or kill the process if necessary.
"""
with asection(
f"Starting a Windows Robocopy job: copying all files and folders from {source_folder} "
+ f"to {dest_folder} with {nb_threads} threads."
):
# checks for network file paths in source and destination paths
if "\\" in source_folder[:1]:
source_folder = "\\" + source_folder
if "\\" in dest_folder[:1]:
dest_folder = "\\" + dest_folder
# replaces slashes:
source = (source_folder).replace("/", "\\")
dest = (dest_folder).replace("/", "\\")
# logfile:
log_file = "robocopy_log.txt"
if os.path.exists(log_file):
os.remove(log_file)
# format the command list for Popen
copyCommand = [
"ROBOCOPY",
source,
dest,
"/e",
"/R:10",
"/W:5",
"/TBD",
"/NP",
# '/V',
"/eta",
"/tee",
f"/MT:{nb_threads}",
f"/log:{log_file}",
]
if move_files:
copyCommand.append("/move")
if large_files:
copyCommand.append("/j")
# use subprocess to start a copy command
aprint(f"Robocopy command: {copyCommand}")
copy_process = subprocess.Popen(copyCommand)
if wait_to_finish:
copy_process.wait()
return copy_process
| import os
import subprocess
from arbol import aprint, asection
def robocopy(
source_folder: str,
dest_folder: str,
move_files: bool = False,
nb_threads: int = 8,
large_files: bool = False,
wait_to_finish: bool = True,
):
"""
Start a generic robocopy job on Windows to copy files from one folder to another
Args
---------
Returns
---------
copyProcess (Popen Object) :
> this is the Popen object that's running the copy process. This can be
> used to check or kill the process if necessary.
"""
with asection(
f"Starting a Windows Robocopy job: copying all files and folders from {source_folder} "
+ f"to {dest_folder} with {nb_threads} threads."
):
# checks for network file paths in source and destination paths
if "\\" in source_folder[:1]:
source_folder = "\\" + source_folder
if "\\" in dest_folder[:1]:
dest_folder = "\\" + dest_folder
# replaces slashes:
source = (source_folder).replace("/", "\\")
dest = (dest_folder).replace("/", "\\")
# logfile:
log_file = "robocopy_log.txt"
if os.path.exists(log_file):
os.remove(log_file)
# format the command list for Popen
copyCommand = [
"ROBOCOPY",
source,
dest,
"/e",
"/R:10",
"/W:5",
"/TBD",
"/NP",
# '/V',
"/eta",
"/tee",
f"/MT:{nb_threads}",
f"/log:{log_file}",
]
if move_files:
copyCommand.append("/move")
if large_files:
copyCommand.append("/j")
# use subprocess to start a copy command
aprint(f"Robocopy command: {copyCommand}")
copy_process = subprocess.Popen(copyCommand)
if wait_to_finish:
copy_process.wait()
return copy_process
| en | 0.744428 | Start a generic robocopy job on Windows to copy files from one folder to another Args --------- Returns --------- copyProcess (Popen Object) : > this is the Popen object that's running the copy process. This can be > used to check or kill the process if necessary. # checks for network file paths in source and destination paths # replaces slashes: # logfile: # format the command list for Popen # '/V', # use subprocess to start a copy command | 3.043097 | 3 |
pipot/services/ServiceModelsManager.py | VertexC/pipot-server | 4 | 6621003 | <filename>pipot/services/ServiceModelsManager.py<gh_stars>1-10
# The models is stored in models.txt as
# [serviceName1].[tableName1]
# [serviceName1].[tableName2]
# [serviceName2].[tableName1]
from __future__ import print_function
import os
import sys
import importlib
import inspect
from database import Base
models_storage = './pipot/services/models.txt'
def add_models(service):
models = get_models()
cls_members = inspect.getmembers(importlib.import_module('pipot.services' + '.' + service + '.' + service),
inspect.isclass)
cls_info = list(filter(lambda x: Base in inspect.getmro(x[1]) and x[0] not in ('IModel', 'IModelIP'), cls_members))
models.extend([service + '.' + name for name, _ in cls_info])
save_models(models)
def rm_models(service):
models = get_models()
removed_models = list(filter(lambda x: x.startswith(service), models))
models = list(filter(lambda x: not x.startswith(service), models))
with open(models_storage, 'w') as f:
for model in models:
print(model, file=f)
return removed_models
def get_models():
with open(models_storage, 'r') as f:
return [line.strip('\n') for line in f.readlines()]
def save_models(models):
with open(models_storage, 'w') as f:
for model in models:
print(model, file=f)
def import_models(services=None):
"""
when services is None, import all models
otherwise import models specified in services only
"""
models = get_models()
if services:
models = [model for model in models if model.split('.')[0] in services]
for model in models:
service = model.split('.')[0]
importlib.import_module('pipot.services' + '.' + service + '.' + service)
| <filename>pipot/services/ServiceModelsManager.py<gh_stars>1-10
# The models is stored in models.txt as
# [serviceName1].[tableName1]
# [serviceName1].[tableName2]
# [serviceName2].[tableName1]
from __future__ import print_function
import os
import sys
import importlib
import inspect
from database import Base
models_storage = './pipot/services/models.txt'
def add_models(service):
models = get_models()
cls_members = inspect.getmembers(importlib.import_module('pipot.services' + '.' + service + '.' + service),
inspect.isclass)
cls_info = list(filter(lambda x: Base in inspect.getmro(x[1]) and x[0] not in ('IModel', 'IModelIP'), cls_members))
models.extend([service + '.' + name for name, _ in cls_info])
save_models(models)
def rm_models(service):
models = get_models()
removed_models = list(filter(lambda x: x.startswith(service), models))
models = list(filter(lambda x: not x.startswith(service), models))
with open(models_storage, 'w') as f:
for model in models:
print(model, file=f)
return removed_models
def get_models():
with open(models_storage, 'r') as f:
return [line.strip('\n') for line in f.readlines()]
def save_models(models):
with open(models_storage, 'w') as f:
for model in models:
print(model, file=f)
def import_models(services=None):
"""
when services is None, import all models
otherwise import models specified in services only
"""
models = get_models()
if services:
models = [model for model in models if model.split('.')[0] in services]
for model in models:
service = model.split('.')[0]
importlib.import_module('pipot.services' + '.' + service + '.' + service)
| en | 0.649429 | # The models is stored in models.txt as # [serviceName1].[tableName1] # [serviceName1].[tableName2] # [serviceName2].[tableName1] when services is None, import all models otherwise import models specified in services only | 2.648012 | 3 |
generate-essay.py | chaaklau/appledaily-frequency | 1 | 6621004 | <filename>generate-essay.py
import pandas as pd
essay_cantonese = pd.read_csv(
"source/essay-cantonese.txt",
header=None,
names=["char", "freq"],
sep="\t",
)
found = pd.read_csv(
"output/found.tsv",
header=None,
names=["char", "freq"],
sep="\t",
)
file = open("output/essay-new.txt", "w")
# All words from essay_cantonese will be kept.
combined = (
pd.merge(essay_cantonese, found[found.freq > 10], on=["char"], how="outer")
.set_index(["char"])
.sum(axis=1)
)
combined.to_csv("output/essay-new.txt", sep="\t", header=None, float_format="%u")
| <filename>generate-essay.py
import pandas as pd
essay_cantonese = pd.read_csv(
"source/essay-cantonese.txt",
header=None,
names=["char", "freq"],
sep="\t",
)
found = pd.read_csv(
"output/found.tsv",
header=None,
names=["char", "freq"],
sep="\t",
)
file = open("output/essay-new.txt", "w")
# All words from essay_cantonese will be kept.
combined = (
pd.merge(essay_cantonese, found[found.freq > 10], on=["char"], how="outer")
.set_index(["char"])
.sum(axis=1)
)
combined.to_csv("output/essay-new.txt", sep="\t", header=None, float_format="%u")
| en | 0.764305 | # All words from essay_cantonese will be kept. | 3.191111 | 3 |
src/roi/collection.py | BeastyBlacksmith/pyama | 1 | 6621005 | <reponame>BeastyBlacksmith/pyama<gh_stars>1-10
from threading import RLock
from .base import Roi
from ..listener import Listeners
class RoiCollection:
IDX_TYPE = 0
IDX_VERSION = 1
def __init__(self, key=None, type_=None, version=None,
parameters=None, name=None, color=None, stroke_width=None):
if key is None and isinstance(type_, str) and isinstance(version, str):
self.__key = (type_, version)
elif isinstance(key, tuple) and len(key) == 2 and \
isinstance(key[RoiCollection.IDX_TYPE], str) and \
isinstance(key[RoiCollection.IDX_VERSION], str):
self.__key = key
else:
raise TypeError(f"Invalid ROI type identifier given: {key}")
self.__parameters = parameters
self.__name = name
self.__color = color
self.__stroke_width = stroke_width
self.__rois = {}
self.__listeners = Listeners()
self.__lock = RLock()
@property
def key(self):
return self.__key
@property
def type(self):
return self.__key[RoiCollection.IDX_TYPE]
@property
def version(self):
return self.__key[RoiCollection.IDX_VERSION]
def __len__(self):
with self.__lock:
return self.__rois.__len__()
def __contains__(self, frame):
with self.__lock:
return self.__rois.__contains__(frame)
def set(self, frame, roi):
print("[RoiCollection.set] DEPRECATED, use __setitem__ instead")
self[frame] = roi
def add(self, frame, roi):
if frame not in self:
self[frame] = roi
return
if isinstance(roi, list) and all(isinstance(r, Roi) for r in roi):
if any(r.key() != self.__key for r in roi):
raise TypeError("incomaptible ROI type")
with self.__lock:
self.__rois[frame].extend(roi)
elif isinstance(roi, Roi):
if roi.key() != self.__key:
raise TypeError(f"incomaptible ROI type: expected '{self.__key}', got '{roi.key()}'")
with self.__lock:
self.__rois[frame].append(roi)
else:
raise TypeError(f"expected type 'Roi', got '{type(roi)}')")
self.__listeners.notify()
def __getitem__(self, frame):
with self.__lock:
return self.__rois.get(frame)
def __setitem__(self, frame, rois):
if isinstance(rois, list) and all(isinstance(r, Roi) for r in rois):
if any(r.key() != self.__key for r in rois):
raise TypeError("incomaptible ROI type")
with self.__lock:
self.__rois[frame] = rois
elif isinstance(rois, Roi):
if rois.key() != self.__key:
raise TypeError(f"incomaptible ROI type: expected '{self.__key}', got '{rois.key()}'")
with self.__lock:
self.__rois[frame] = [rois]
else:
raise TypeError(f"expected type 'Roi', got '{type(rois)}'")
self.__listeners.notify()
def __delitem__(self, frame):
with self.__lock:
self.__rois.__delitem__(frame)
def __iter__(self):
return self.__rois.__iter__()
def items(self):
with self.__lock:
return self.__rois.items()
def frames(self):
with self.__lock:
return self.__rois.keys()
def rois(self):
with self.__lock:
return self.__rois.values()
@property
def parameters(self):
with self.__lock:
return self.__parameters
@parameters.setter
def parameters(self, params):
with self.__lock:
self.__parameters = params
@property
def name(self):
with self.__lock:
return self.__name
@name.setter
def name(self, n):
with self.__lock:
self.__name = n
@property
def color(self):
with self.__lock:
return self.__color
@color.setter
def color(self, c):
with self.__lock:
self.__color = c
@property
def stroke_width(self):
with self.__lock:
return self.__stroke_width
@stroke_width.setter
def stroke_width(self, sw):
with self.__lock:
self.__stroke_width = sw
def register_listener(self, fun):
return self.__listeners.register(fun)
def delete_listener(self, lid):
self.__listeners.delete(lid)
| from threading import RLock
from .base import Roi
from ..listener import Listeners
class RoiCollection:
IDX_TYPE = 0
IDX_VERSION = 1
def __init__(self, key=None, type_=None, version=None,
parameters=None, name=None, color=None, stroke_width=None):
if key is None and isinstance(type_, str) and isinstance(version, str):
self.__key = (type_, version)
elif isinstance(key, tuple) and len(key) == 2 and \
isinstance(key[RoiCollection.IDX_TYPE], str) and \
isinstance(key[RoiCollection.IDX_VERSION], str):
self.__key = key
else:
raise TypeError(f"Invalid ROI type identifier given: {key}")
self.__parameters = parameters
self.__name = name
self.__color = color
self.__stroke_width = stroke_width
self.__rois = {}
self.__listeners = Listeners()
self.__lock = RLock()
@property
def key(self):
return self.__key
@property
def type(self):
return self.__key[RoiCollection.IDX_TYPE]
@property
def version(self):
return self.__key[RoiCollection.IDX_VERSION]
def __len__(self):
with self.__lock:
return self.__rois.__len__()
def __contains__(self, frame):
with self.__lock:
return self.__rois.__contains__(frame)
def set(self, frame, roi):
print("[RoiCollection.set] DEPRECATED, use __setitem__ instead")
self[frame] = roi
def add(self, frame, roi):
if frame not in self:
self[frame] = roi
return
if isinstance(roi, list) and all(isinstance(r, Roi) for r in roi):
if any(r.key() != self.__key for r in roi):
raise TypeError("incomaptible ROI type")
with self.__lock:
self.__rois[frame].extend(roi)
elif isinstance(roi, Roi):
if roi.key() != self.__key:
raise TypeError(f"incomaptible ROI type: expected '{self.__key}', got '{roi.key()}'")
with self.__lock:
self.__rois[frame].append(roi)
else:
raise TypeError(f"expected type 'Roi', got '{type(roi)}')")
self.__listeners.notify()
def __getitem__(self, frame):
with self.__lock:
return self.__rois.get(frame)
def __setitem__(self, frame, rois):
if isinstance(rois, list) and all(isinstance(r, Roi) for r in rois):
if any(r.key() != self.__key for r in rois):
raise TypeError("incomaptible ROI type")
with self.__lock:
self.__rois[frame] = rois
elif isinstance(rois, Roi):
if rois.key() != self.__key:
raise TypeError(f"incomaptible ROI type: expected '{self.__key}', got '{rois.key()}'")
with self.__lock:
self.__rois[frame] = [rois]
else:
raise TypeError(f"expected type 'Roi', got '{type(rois)}'")
self.__listeners.notify()
def __delitem__(self, frame):
with self.__lock:
self.__rois.__delitem__(frame)
def __iter__(self):
return self.__rois.__iter__()
def items(self):
with self.__lock:
return self.__rois.items()
def frames(self):
with self.__lock:
return self.__rois.keys()
def rois(self):
with self.__lock:
return self.__rois.values()
@property
def parameters(self):
with self.__lock:
return self.__parameters
@parameters.setter
def parameters(self, params):
with self.__lock:
self.__parameters = params
@property
def name(self):
with self.__lock:
return self.__name
@name.setter
def name(self, n):
with self.__lock:
self.__name = n
@property
def color(self):
with self.__lock:
return self.__color
@color.setter
def color(self, c):
with self.__lock:
self.__color = c
@property
def stroke_width(self):
with self.__lock:
return self.__stroke_width
@stroke_width.setter
def stroke_width(self, sw):
with self.__lock:
self.__stroke_width = sw
def register_listener(self, fun):
return self.__listeners.register(fun)
def delete_listener(self, lid):
self.__listeners.delete(lid) | none | 1 | 2.44981 | 2 | |
psana/psana/peakFinder/examples/ex03-localextrema.py | JBlaschke/lcls2 | 16 | 6621006 | #!/usr/bin/env python
""" test of psalg_ext.local_minimums, local_maximums, threshold_maximums, local_maximums_rank1_cross
"""
#----------
import sys
import psalg_ext as algos
import numpy as np
#----------
def test01(tname='1', NUMBER_OF_EVENTS=5, DO_PRINT=True) :
print('local extrema : %s' % ('minimums' if tname in ('1','2')\
else 'maximums' if tname in ('3','4')\
else 'maximums runk=1 cross' if tname in ('5','6')\
else 'two-threshold maximums' if tname == '7'\
else 'unknown test'))
from time import time #, sleep
from psana.pyalgos.generic.NDArrUtils import print_ndarr
import psana.pyalgos.generic.Graphics as gr
sh, fs = (50,50), (7,6)
fig1, axim1, axcb1 = gr.fig_img_cbar_axes(gr.figure(figsize=fs))
fig2, axim2, axcb2 = gr.fig_img_cbar_axes(gr.figure(figsize=fs))
imsh1 = None
imsh2 = None
print('Image shape: %s' % str(sh))
mu, sigma = 200, 25
for evnum in range(NUMBER_OF_EVENTS) :
data = 10.*np.ones(sh, dtype=np.float64) if tname in ('2','4','6') else\
np.array(mu + sigma*np.random.standard_normal(sh), dtype=np.float64)
mask = np.ones(sh, dtype=np.uint16)
extrema = np.zeros(sh, dtype=np.uint16)
rank=5
thr_low = mu+3*sigma
thr_high = mu+4*sigma
nmax = 0
if DO_PRINT : print_ndarr(data, ' input data')
t0_sec = time()
#----------
if tname in ('1','2') : nmax = algos.local_minimums(data, mask, rank, extrema)
elif tname in ('3','4') : nmax = algos.local_maximums(data, mask, rank, extrema)
elif tname in ('5','6') : nmax = algos.local_maximums_rank1_cross(data, mask, extrema)
elif tname == '7' : nmax = algos.threshold_maximums(data, mask, rank, thr_low, thr_high, extrema)
elif tname == '8' : nmax = algos.local_maximums_rank1_cross(data, mask, extrema)
else : contunue
#----------
print('Event: %2d, consumed time = %10.6f(sec), nmax = %d' % (evnum, time()-t0_sec, nmax))
if DO_PRINT : print_ndarr(extrema, ' output extrema')
img1 = data
img2 = extrema
axim1.clear()
axcb1.clear()
if imsh1 is not None : del imsh1
imsh1 = None
axim2.clear()
axcb2.clear()
if imsh2 is not None : del imsh2
imsh2 = None
#ave, rms = img1.mean(), img1.std()
#amin, amax = ave-1*rms, ave+5*rms
amin, amax = img1.min(), img1.max()
#imsh1,cbar1=\
gr.imshow_cbar(fig1, axim1, axcb1, img1, amin=amin, amax=amax, extent=None,\
interpolation='nearest', aspect='auto', origin='upper',\
orientation='vertical', cmap='inferno')
fig1.canvas.set_window_title('Event: %d Random data'%evnum)
gr.move_fig(fig1, x0=560, y0=30)
#imsh2,cbar2=\
gr.imshow_cbar(fig2, axim2, axcb2, img2, amin=0, amax=img2.max(), extent=None,\
interpolation='nearest', aspect='auto', origin='upper',\
orientation='vertical', cmap='inferno')
fig2.canvas.set_window_title('Event: %d Local extrema'%evnum)
gr.move_fig(fig2, x0=0, y0=30)
gr.show(mode='DO_NOT_HOLD')
gr.show()
#----------
def test02(rank=6) :
algos.print_matrix_of_diag_indexes(rank)
algos.print_vector_of_diag_indexes(rank)
#----------
def usage() :
msg = 'Usage: python examples/ex02-localextrema.py <test-number>'\
'\n where <test-number> ='\
'\n 1 - local_minimums for random image'\
'\n 2 - local_minimums for const image'\
'\n 3 - local_maximums for random image'\
'\n 4 - local_maximums for const image'\
'\n 5 - local_maxima_rank1_cross for random image'\
'\n 6 - local_maxima_rank1_cross for const image'\
'\n 7 - threshold_maximums for random image'\
'\n 8 - local_maximums_rank1_cross for random image'\
'\n 9 - print_matrix_of_diag_indexes, print_vector_of_diag_indexes'
print(msg)
#----------
#----------
#----------
#----------
if __name__ == "__main__" :
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s:' % tname)
if tname in ('1','2','3','4','5','6','7','8') : test01(tname)
elif tname == '9' : test02()
else : usage(); sys.exit('Test %s is not implemented' % tname)
sys.exit('End of test %s' % tname)
#----------
| #!/usr/bin/env python
""" test of psalg_ext.local_minimums, local_maximums, threshold_maximums, local_maximums_rank1_cross
"""
#----------
import sys
import psalg_ext as algos
import numpy as np
#----------
def test01(tname='1', NUMBER_OF_EVENTS=5, DO_PRINT=True) :
print('local extrema : %s' % ('minimums' if tname in ('1','2')\
else 'maximums' if tname in ('3','4')\
else 'maximums runk=1 cross' if tname in ('5','6')\
else 'two-threshold maximums' if tname == '7'\
else 'unknown test'))
from time import time #, sleep
from psana.pyalgos.generic.NDArrUtils import print_ndarr
import psana.pyalgos.generic.Graphics as gr
sh, fs = (50,50), (7,6)
fig1, axim1, axcb1 = gr.fig_img_cbar_axes(gr.figure(figsize=fs))
fig2, axim2, axcb2 = gr.fig_img_cbar_axes(gr.figure(figsize=fs))
imsh1 = None
imsh2 = None
print('Image shape: %s' % str(sh))
mu, sigma = 200, 25
for evnum in range(NUMBER_OF_EVENTS) :
data = 10.*np.ones(sh, dtype=np.float64) if tname in ('2','4','6') else\
np.array(mu + sigma*np.random.standard_normal(sh), dtype=np.float64)
mask = np.ones(sh, dtype=np.uint16)
extrema = np.zeros(sh, dtype=np.uint16)
rank=5
thr_low = mu+3*sigma
thr_high = mu+4*sigma
nmax = 0
if DO_PRINT : print_ndarr(data, ' input data')
t0_sec = time()
#----------
if tname in ('1','2') : nmax = algos.local_minimums(data, mask, rank, extrema)
elif tname in ('3','4') : nmax = algos.local_maximums(data, mask, rank, extrema)
elif tname in ('5','6') : nmax = algos.local_maximums_rank1_cross(data, mask, extrema)
elif tname == '7' : nmax = algos.threshold_maximums(data, mask, rank, thr_low, thr_high, extrema)
elif tname == '8' : nmax = algos.local_maximums_rank1_cross(data, mask, extrema)
else : contunue
#----------
print('Event: %2d, consumed time = %10.6f(sec), nmax = %d' % (evnum, time()-t0_sec, nmax))
if DO_PRINT : print_ndarr(extrema, ' output extrema')
img1 = data
img2 = extrema
axim1.clear()
axcb1.clear()
if imsh1 is not None : del imsh1
imsh1 = None
axim2.clear()
axcb2.clear()
if imsh2 is not None : del imsh2
imsh2 = None
#ave, rms = img1.mean(), img1.std()
#amin, amax = ave-1*rms, ave+5*rms
amin, amax = img1.min(), img1.max()
#imsh1,cbar1=\
gr.imshow_cbar(fig1, axim1, axcb1, img1, amin=amin, amax=amax, extent=None,\
interpolation='nearest', aspect='auto', origin='upper',\
orientation='vertical', cmap='inferno')
fig1.canvas.set_window_title('Event: %d Random data'%evnum)
gr.move_fig(fig1, x0=560, y0=30)
#imsh2,cbar2=\
gr.imshow_cbar(fig2, axim2, axcb2, img2, amin=0, amax=img2.max(), extent=None,\
interpolation='nearest', aspect='auto', origin='upper',\
orientation='vertical', cmap='inferno')
fig2.canvas.set_window_title('Event: %d Local extrema'%evnum)
gr.move_fig(fig2, x0=0, y0=30)
gr.show(mode='DO_NOT_HOLD')
gr.show()
#----------
def test02(rank=6) :
algos.print_matrix_of_diag_indexes(rank)
algos.print_vector_of_diag_indexes(rank)
#----------
def usage() :
msg = 'Usage: python examples/ex02-localextrema.py <test-number>'\
'\n where <test-number> ='\
'\n 1 - local_minimums for random image'\
'\n 2 - local_minimums for const image'\
'\n 3 - local_maximums for random image'\
'\n 4 - local_maximums for const image'\
'\n 5 - local_maxima_rank1_cross for random image'\
'\n 6 - local_maxima_rank1_cross for const image'\
'\n 7 - threshold_maximums for random image'\
'\n 8 - local_maximums_rank1_cross for random image'\
'\n 9 - print_matrix_of_diag_indexes, print_vector_of_diag_indexes'
print(msg)
#----------
#----------
#----------
#----------
if __name__ == "__main__" :
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s:' % tname)
if tname in ('1','2','3','4','5','6','7','8') : test01(tname)
elif tname == '9' : test02()
else : usage(); sys.exit('Test %s is not implemented' % tname)
sys.exit('End of test %s' % tname)
#----------
| pt | 0.191628 | #!/usr/bin/env python test of psalg_ext.local_minimums, local_maximums, threshold_maximums, local_maximums_rank1_cross #---------- #---------- #, sleep #---------- #---------- #ave, rms = img1.mean(), img1.std() #amin, amax = ave-1*rms, ave+5*rms #imsh1,cbar1=\ #imsh2,cbar2=\ #---------- #---------- #---------- #---------- #---------- #---------- #---------- | 2.299759 | 2 |
montlake/utils/utils.py | sjkoelle/montlake | 8 | 6621007 | <reponame>sjkoelle/montlake
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.utils.ipynb (unless otherwise specified).
__all__ = ['get_index_matching', 'get_234_indices', 'get_atoms4_full', 'get_atoms3_full', 'data_stream_custom_range',
'get_cosines', 'cosine_similarity']
# Cell
import numpy as np
from einops import rearrange
def get_index_matching(probe, superset):
probe_permuted = np.asarray([probe[[0, 1, 2, 3]],
probe[[3,2,1,0]],
# combos[c][[0,2,1,3]],
probe[[0,2,1,3]],
probe[[3,1,2,0]]])
output = np.asarray([])
for p in range(4):
#print(p)
output = np.append(output,np.where((superset==tuple(probe_permuted[p])).all(1))[0])
return(int(output))
def get_234_indices(selected_indices, natoms4, natoms2, natoms3, order234 = [2,0,1]):
'''
Get indices in dictionary of each functions set ordered by order234'''
lens = [natoms2, natoms3,natoms4]
combostart = [0, lens[order234[0]], lens[order234[0]] + lens[order234[1]], lens[order234[0]] + lens[order234[1]]+ lens[order234[2]]]
nsel = len(selected_indices)
functionset_id = np.zeros(nsel)
for j in range(nsel):
for i in range(len(combostart) - 1):
if selected_indices[j] > combostart[i] and selected_indices[j] < combostart[i+1] :
#print('here')
functionset_id[j] = i
return(np.asarray(functionset_id , dtype = int),combostart)
def get_atoms4_full(atoms4):
combos4 = np.asarray([[0, 1, 2,3],
[1,2,3,0],
[2,3,0,1],
[3,0,1,2],
[0, 1,3,2],
[1,0,2,3] ])
atoms4full = np.asarray([atoms4[:,c] for c in combos4])
atoms4full = rearrange(atoms4full,'i j k -> (j i) k')
return(atoms4full)
def get_atoms3_full(atoms3):
combos3 = np.asarray([[0, 1, 2],
[1,2,0],
[2,0,1]])
atoms3full = np.asarray([atoms3[:,c] for c in combos3])
atoms3full = rearrange(atoms3full,'i j k -> (j i ) k')
return(atoms3full)
def data_stream_custom_range(selind):
for i in range(len(selind)):
yield i
# Cell
import numpy as np
# def get_cosines(dg):
# n = dg.shape[0]
# p = dg.shape[1]
# d = dg.shape[2]
# coses = np.zeros((n, p, p))
# for i in range(n):
# for j in range(p):
# for k in range(p):
# coses[i, j, k] = cosine_similarity(dg[i, j, :], dg[i, k,:]) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0]
# return (coses)
def get_cosines(dg):
n = dg.shape[0]
p = dg.shape[2]
d = dg.shape[1]
coses = np.zeros((n, p, p))
for i in range(n):
for j in range(p):
for k in range(p):
coses[i, j, k] = cosine_similarity(dg[i, :, j], dg[i, :,k]) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0]
return (coses)
def cosine_similarity(a, b):
output = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
return (output) | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.utils.ipynb (unless otherwise specified).
__all__ = ['get_index_matching', 'get_234_indices', 'get_atoms4_full', 'get_atoms3_full', 'data_stream_custom_range',
'get_cosines', 'cosine_similarity']
# Cell
import numpy as np
from einops import rearrange
def get_index_matching(probe, superset):
probe_permuted = np.asarray([probe[[0, 1, 2, 3]],
probe[[3,2,1,0]],
# combos[c][[0,2,1,3]],
probe[[0,2,1,3]],
probe[[3,1,2,0]]])
output = np.asarray([])
for p in range(4):
#print(p)
output = np.append(output,np.where((superset==tuple(probe_permuted[p])).all(1))[0])
return(int(output))
def get_234_indices(selected_indices, natoms4, natoms2, natoms3, order234 = [2,0,1]):
'''
Get indices in dictionary of each functions set ordered by order234'''
lens = [natoms2, natoms3,natoms4]
combostart = [0, lens[order234[0]], lens[order234[0]] + lens[order234[1]], lens[order234[0]] + lens[order234[1]]+ lens[order234[2]]]
nsel = len(selected_indices)
functionset_id = np.zeros(nsel)
for j in range(nsel):
for i in range(len(combostart) - 1):
if selected_indices[j] > combostart[i] and selected_indices[j] < combostart[i+1] :
#print('here')
functionset_id[j] = i
return(np.asarray(functionset_id , dtype = int),combostart)
def get_atoms4_full(atoms4):
combos4 = np.asarray([[0, 1, 2,3],
[1,2,3,0],
[2,3,0,1],
[3,0,1,2],
[0, 1,3,2],
[1,0,2,3] ])
atoms4full = np.asarray([atoms4[:,c] for c in combos4])
atoms4full = rearrange(atoms4full,'i j k -> (j i) k')
return(atoms4full)
def get_atoms3_full(atoms3):
combos3 = np.asarray([[0, 1, 2],
[1,2,0],
[2,0,1]])
atoms3full = np.asarray([atoms3[:,c] for c in combos3])
atoms3full = rearrange(atoms3full,'i j k -> (j i ) k')
return(atoms3full)
def data_stream_custom_range(selind):
for i in range(len(selind)):
yield i
# Cell
import numpy as np
# def get_cosines(dg):
# n = dg.shape[0]
# p = dg.shape[1]
# d = dg.shape[2]
# coses = np.zeros((n, p, p))
# for i in range(n):
# for j in range(p):
# for k in range(p):
# coses[i, j, k] = cosine_similarity(dg[i, j, :], dg[i, k,:]) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0]
# return (coses)
def get_cosines(dg):
n = dg.shape[0]
p = dg.shape[2]
d = dg.shape[1]
coses = np.zeros((n, p, p))
for i in range(n):
for j in range(p):
for k in range(p):
coses[i, j, k] = cosine_similarity(dg[i, :, j], dg[i, :,k]) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0]
return (coses)
def cosine_similarity(a, b):
output = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
return (output) | en | 0.431901 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.utils.ipynb (unless otherwise specified). # Cell # combos[c][[0,2,1,3]], #print(p) Get indices in dictionary of each functions set ordered by order234 #print('here') # Cell # def get_cosines(dg): # n = dg.shape[0] # p = dg.shape[1] # d = dg.shape[2] # coses = np.zeros((n, p, p)) # for i in range(n): # for j in range(p): # for k in range(p): # coses[i, j, k] = cosine_similarity(dg[i, j, :], dg[i, k,:]) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0] # return (coses) # sklearn.metrics.pairwise.cosine_similarity(X = np.reshape(dg[:,i,:], (1,d*n)),Y = np.reshape(dg[:,j,:], (1,d*n)))[0][0] | 1.942297 | 2 |
src/api/futures.py | newlyedward/datascinece | 2 | 6621008 | # -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime
from src.api import conn
from src.util import connect_mongo
from log import LogHandler
log = LogHandler('api.log')
def get_dominant(code, start_date=None, end_date=None):
"""
获取某一期货品种一段时间的主力合约列表。合约首次上市时,以当日收盘同品种持仓量最大者作为从第二个交易日开始的主力合约。
当同品种其他合约持仓量在收盘后超过当前主力合约时,从第二个交易日开始进行主力合约的切换。日内不会进行主力合约的切换。
:param code: 期货合约品种,例如沪深300股指期货为'IF'
:param start_date: datetime.datetime 开始日期,默认为期货品种最早上市日期后一交易日
:param end_date: 结束日期,默认为当前日期
:return: pd.DataFrame
"""
# 连接数据库
# conn = connect_mongo(db='quote')
cursor = conn['index']
filter_dict = {'code': code, 'symbol': code + '99'}
if start_date is not None: # 使用前一个交易日
filter_dict['datetime'] = {'$gte': start_date}
if end_date is not None:
if 'datetime' in filter_dict:
filter_dict['datetime']['$lte'] = end_date
else:
filter_dict['datetime'] = {'$lte': end_date}
contract = cursor.find(filter_dict, {'_id': 0, 'datetime': 1, 'contract': 1})
contract_df = pd.DataFrame(list(contract))
contract_df.set_index('datetime', inplace=True)
return contract_df
def get_contracts(code, date=None):
"""
获取某一期货品种在策略当前日期的可交易合约symbol列表。
按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。
:param code: 期货合约品种,例如沪深300股指期货为'IF'
:param date: datetime.datetime 查询日期,默认为当日
:return: list
"""
# 连接数据库
# conn = connect_mongo(db='quote')
cursor = conn['future']
if date is None:
date = datetime.today()
filter_dict = {'code': code, 'datetime': date}
contract = cursor.find(filter_dict, {'_id': 0, 'symbol': 1})
return pd.DataFrame(list(contract))
def get_member_rank(symbol, trading_date, rank_by):
"""
获取期货某合约的会员排名数据
:param symbol: 可以是期货的具体合约或者品种,合约代码,如CU1903,品种 CU
:param trading_date: datetime.datetime 交易日期,默认为当日
:param rank_by: 排名依据,默认为volume即根据持仓量统计排名,另外可选'long'和'short',分别对应持买仓量统计和持卖仓量统计。
:return:
-pandas DataFrame
commodity code/symbol 期货品种代码或期货合约代码
member_name 期货商名称
rank 排名
volume 交易量或持仓量视乎参数rank_by的设定
volume_change 交易量或持仓量较之前的变动
"""
pass
def get_warehouse_stocks(code, start_date=None, end_date=None):
"""
获取期货某品种的注册仓单数据
:param code: 合约代码,如CU1903
:param start_date: 开始日期,必须指定
:param end_date: 结束日期,默认为策略当天日期的前一天
:return:
-pandas DataFrame
on_warrant 注册仓单量
market 期货品种对应交易所
"""
def get_roll_yield(code=None, start_date=datetime(1970, 1, 1), end_date=None):
"""
返回价格数据和展期收益率
:param code:
:param start_date:
:param end_date:
:return:
"""
assert isinstance(code, str)
index_cursor = conn['index']
filter_dict = {
'symbol': {"$regex": "^" + code + "[7-9]{2}$"},
'datetime': {
'$gte': start_date
}
}
if end_date:
filter_dict['datetime']['$lte'] = end_date
projection = {
"_id": 0,
"symbol": 1,
"datetime": 1,
"close": 1
}
hq = index_cursor.find(filter_dict, projection=projection)
hq_df = pd.DataFrame(list(hq))
hq_df = hq_df.pivot(index='datetime', columns='symbol', values='close')
spot_cursor = conn['spot_price']
filter_dict = {"code": code}
projection = {"_id": 0, "datetime": 1, "spot": 1}
spot = spot_cursor.find(filter_dict, projection=projection)
spot_df = pd.DataFrame(list(spot))
name = {'deliver': code + '77',
'domain': code + '88',
'far_month': code + '99'}
if spot_df.empty:
yield_df = hq_df
else:
spot_df.set_index('datetime', inplace=True)
yield_df = pd.concat([spot_df, hq_df], axis=1)
yield_df = yield_df.dropna()
yield_df['deliver_basis'] = (yield_df[name['deliver']] / yield_df['spot'] - 1)
yield_df['domain_basis'] = (yield_df[name['domain']] / yield_df['spot'] - 1)
yield_df['far_month_basis'] = (yield_df[name['far_month']] / yield_df['spot'] - 1)
yield_df['nearby_yield'] = (yield_df[name['domain']] / yield_df[name['deliver']] - 1)
yield_df['far_month_yield'] = (yield_df[name['far_month']] / yield_df[name['domain']] - 1)
return yield_df
if __name__ == '__main__':
start = datetime(2019, 1, 1)
end = datetime(2006, 8, 3)
# contracts = get_contracts('CU')
# contracts = get_contracts('CU', end)
# df = get_dominant('CU')
# df = get_dominant('CU', start_date=start)
# df = get_dominant('CU', start_date=start, end_date=end)
# df = get_dominant('CU', end_date=start)
# df = get_price(['CU88', 'M88'], start_date=start, end_date=end, fields=['open', 'close'])
| # -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime
from src.api import conn
from src.util import connect_mongo
from log import LogHandler
log = LogHandler('api.log')
def get_dominant(code, start_date=None, end_date=None):
"""
获取某一期货品种一段时间的主力合约列表。合约首次上市时,以当日收盘同品种持仓量最大者作为从第二个交易日开始的主力合约。
当同品种其他合约持仓量在收盘后超过当前主力合约时,从第二个交易日开始进行主力合约的切换。日内不会进行主力合约的切换。
:param code: 期货合约品种,例如沪深300股指期货为'IF'
:param start_date: datetime.datetime 开始日期,默认为期货品种最早上市日期后一交易日
:param end_date: 结束日期,默认为当前日期
:return: pd.DataFrame
"""
# 连接数据库
# conn = connect_mongo(db='quote')
cursor = conn['index']
filter_dict = {'code': code, 'symbol': code + '99'}
if start_date is not None: # 使用前一个交易日
filter_dict['datetime'] = {'$gte': start_date}
if end_date is not None:
if 'datetime' in filter_dict:
filter_dict['datetime']['$lte'] = end_date
else:
filter_dict['datetime'] = {'$lte': end_date}
contract = cursor.find(filter_dict, {'_id': 0, 'datetime': 1, 'contract': 1})
contract_df = pd.DataFrame(list(contract))
contract_df.set_index('datetime', inplace=True)
return contract_df
def get_contracts(code, date=None):
"""
获取某一期货品种在策略当前日期的可交易合约symbol列表。
按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。
:param code: 期货合约品种,例如沪深300股指期货为'IF'
:param date: datetime.datetime 查询日期,默认为当日
:return: list
"""
# 连接数据库
# conn = connect_mongo(db='quote')
cursor = conn['future']
if date is None:
date = datetime.today()
filter_dict = {'code': code, 'datetime': date}
contract = cursor.find(filter_dict, {'_id': 0, 'symbol': 1})
return pd.DataFrame(list(contract))
def get_member_rank(symbol, trading_date, rank_by):
"""
获取期货某合约的会员排名数据
:param symbol: 可以是期货的具体合约或者品种,合约代码,如CU1903,品种 CU
:param trading_date: datetime.datetime 交易日期,默认为当日
:param rank_by: 排名依据,默认为volume即根据持仓量统计排名,另外可选'long'和'short',分别对应持买仓量统计和持卖仓量统计。
:return:
-pandas DataFrame
commodity code/symbol 期货品种代码或期货合约代码
member_name 期货商名称
rank 排名
volume 交易量或持仓量视乎参数rank_by的设定
volume_change 交易量或持仓量较之前的变动
"""
pass
def get_warehouse_stocks(code, start_date=None, end_date=None):
"""
获取期货某品种的注册仓单数据
:param code: 合约代码,如CU1903
:param start_date: 开始日期,必须指定
:param end_date: 结束日期,默认为策略当天日期的前一天
:return:
-pandas DataFrame
on_warrant 注册仓单量
market 期货品种对应交易所
"""
def get_roll_yield(code=None, start_date=datetime(1970, 1, 1), end_date=None):
"""
返回价格数据和展期收益率
:param code:
:param start_date:
:param end_date:
:return:
"""
assert isinstance(code, str)
index_cursor = conn['index']
filter_dict = {
'symbol': {"$regex": "^" + code + "[7-9]{2}$"},
'datetime': {
'$gte': start_date
}
}
if end_date:
filter_dict['datetime']['$lte'] = end_date
projection = {
"_id": 0,
"symbol": 1,
"datetime": 1,
"close": 1
}
hq = index_cursor.find(filter_dict, projection=projection)
hq_df = pd.DataFrame(list(hq))
hq_df = hq_df.pivot(index='datetime', columns='symbol', values='close')
spot_cursor = conn['spot_price']
filter_dict = {"code": code}
projection = {"_id": 0, "datetime": 1, "spot": 1}
spot = spot_cursor.find(filter_dict, projection=projection)
spot_df = pd.DataFrame(list(spot))
name = {'deliver': code + '77',
'domain': code + '88',
'far_month': code + '99'}
if spot_df.empty:
yield_df = hq_df
else:
spot_df.set_index('datetime', inplace=True)
yield_df = pd.concat([spot_df, hq_df], axis=1)
yield_df = yield_df.dropna()
yield_df['deliver_basis'] = (yield_df[name['deliver']] / yield_df['spot'] - 1)
yield_df['domain_basis'] = (yield_df[name['domain']] / yield_df['spot'] - 1)
yield_df['far_month_basis'] = (yield_df[name['far_month']] / yield_df['spot'] - 1)
yield_df['nearby_yield'] = (yield_df[name['domain']] / yield_df[name['deliver']] - 1)
yield_df['far_month_yield'] = (yield_df[name['far_month']] / yield_df[name['domain']] - 1)
return yield_df
if __name__ == '__main__':
start = datetime(2019, 1, 1)
end = datetime(2006, 8, 3)
# contracts = get_contracts('CU')
# contracts = get_contracts('CU', end)
# df = get_dominant('CU')
# df = get_dominant('CU', start_date=start)
# df = get_dominant('CU', start_date=start, end_date=end)
# df = get_dominant('CU', end_date=start)
# df = get_price(['CU88', 'M88'], start_date=start, end_date=end, fields=['open', 'close'])
| zh | 0.71366 | # -*- coding: utf-8 -*- 获取某一期货品种一段时间的主力合约列表。合约首次上市时,以当日收盘同品种持仓量最大者作为从第二个交易日开始的主力合约。 当同品种其他合约持仓量在收盘后超过当前主力合约时,从第二个交易日开始进行主力合约的切换。日内不会进行主力合约的切换。 :param code: 期货合约品种,例如沪深300股指期货为'IF' :param start_date: datetime.datetime 开始日期,默认为期货品种最早上市日期后一交易日 :param end_date: 结束日期,默认为当前日期 :return: pd.DataFrame # 连接数据库 # conn = connect_mongo(db='quote') # 使用前一个交易日 获取某一期货品种在策略当前日期的可交易合约symbol列表。 按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。 :param code: 期货合约品种,例如沪深300股指期货为'IF' :param date: datetime.datetime 查询日期,默认为当日 :return: list # 连接数据库 # conn = connect_mongo(db='quote') 获取期货某合约的会员排名数据 :param symbol: 可以是期货的具体合约或者品种,合约代码,如CU1903,品种 CU :param trading_date: datetime.datetime 交易日期,默认为当日 :param rank_by: 排名依据,默认为volume即根据持仓量统计排名,另外可选'long'和'short',分别对应持买仓量统计和持卖仓量统计。 :return: -pandas DataFrame commodity code/symbol 期货品种代码或期货合约代码 member_name 期货商名称 rank 排名 volume 交易量或持仓量视乎参数rank_by的设定 volume_change 交易量或持仓量较之前的变动 获取期货某品种的注册仓单数据 :param code: 合约代码,如CU1903 :param start_date: 开始日期,必须指定 :param end_date: 结束日期,默认为策略当天日期的前一天 :return: -pandas DataFrame on_warrant 注册仓单量 market 期货品种对应交易所 返回价格数据和展期收益率 :param code: :param start_date: :param end_date: :return: # contracts = get_contracts('CU') # contracts = get_contracts('CU', end) # df = get_dominant('CU') # df = get_dominant('CU', start_date=start) # df = get_dominant('CU', start_date=start, end_date=end) # df = get_dominant('CU', end_date=start) # df = get_price(['CU88', 'M88'], start_date=start, end_date=end, fields=['open', 'close']) | 2.450505 | 2 |
accelerator/tests/test_application_answer.py | masschallenge/django-accelerator | 6 | 6621009 | <reponame>masschallenge/django-accelerator
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from django.test import TestCase
from accelerator.tests.factories import ApplicationAnswerFactory
class TestApplicationAnswer(TestCase):
def test_str(self):
application_answer = ApplicationAnswerFactory()
assert (application_answer.application.startup.name in
application_answer.__str__())
assert (str(application_answer.application_question.question_number) in
application_answer.__str__())
| # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from django.test import TestCase
from accelerator.tests.factories import ApplicationAnswerFactory
class TestApplicationAnswer(TestCase):
def test_str(self):
application_answer = ApplicationAnswerFactory()
assert (application_answer.application.startup.name in
application_answer.__str__())
assert (str(application_answer.application_question.question_number) in
application_answer.__str__()) | en | 0.640174 | # MIT License # Copyright (c) 2017 MassChallenge, Inc. | 2.436134 | 2 |
src/dataclass_bakery/generators/generators_exceptions.py | miguelFLG13/dataclass-bakery | 1 | 6621010 | class TypeNotAllow(Exception):
def __init__(self, *args, **kwargs):
super().__init__("TypeNotAllow: Creating a dataclass object")
| class TypeNotAllow(Exception):
def __init__(self, *args, **kwargs):
super().__init__("TypeNotAllow: Creating a dataclass object")
| none | 1 | 2.386627 | 2 | |
Pipeline/run_dock6_to_nc.py | CCBatIIT/AlGDock | 15 | 6621011 | <gh_stars>10-100
job_block = 50
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--job_block', default=job_block, type=int, \
help='Number of dockings per job')
parser.add_argument('--dry', action='store_true', default=False, \
help='Does not actually submit the job to the queue')
args = parser.parse_args()
# Find dock6_to_nc.py
import os, inspect
dirs = {}
dirs['script'] = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
execfile(os.path.join(dirs['script'],'_external_paths.py'))
dock6_to_nc_script = os.path.join(dirs['script'], 'dock6_to_nc.py')
command_paths = findPaths(['qsub_command'])
import glob
# Convert from mol2 to netcdf files
command_list = []
FNs = [FN for FN in glob.glob('dock6/*/*/*.mol2.gz') \
if os.path.getsize(FN)>0]
FNs_c = []
outFNs_c = []
for FN in FNs:
if not os.path.isfile(FN[:-8]+'.nc'):
command_list.append('python {0} {1}'.format(dock6_to_nc_script, FN))
FNs_c.append(FN)
outFNs_c.append(FN[:-8]+'.nc')
ncommands = len(command_list)
if ncommands==args.job_block or ((ncommands>0) and (FN==FNs[-1])):
command = '; '.join(command_list)
print command
os.system(' '.join(['python',command_paths['qsub_command'],\
'dock6_to_nc', "'"+command+"'", \
'--input_files', dock6_to_nc_script, ' '.join(FNs_c) + \
'--output_files', ' '.join(outFNs_c), \
{True:'--dry',False:''}[args.dry]]))
command_list = []
FNs_c = []
outFNs_c = []
| job_block = 50
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--job_block', default=job_block, type=int, \
help='Number of dockings per job')
parser.add_argument('--dry', action='store_true', default=False, \
help='Does not actually submit the job to the queue')
args = parser.parse_args()
# Find dock6_to_nc.py
import os, inspect
dirs = {}
dirs['script'] = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
execfile(os.path.join(dirs['script'],'_external_paths.py'))
dock6_to_nc_script = os.path.join(dirs['script'], 'dock6_to_nc.py')
command_paths = findPaths(['qsub_command'])
import glob
# Convert from mol2 to netcdf files
command_list = []
FNs = [FN for FN in glob.glob('dock6/*/*/*.mol2.gz') \
if os.path.getsize(FN)>0]
FNs_c = []
outFNs_c = []
for FN in FNs:
if not os.path.isfile(FN[:-8]+'.nc'):
command_list.append('python {0} {1}'.format(dock6_to_nc_script, FN))
FNs_c.append(FN)
outFNs_c.append(FN[:-8]+'.nc')
ncommands = len(command_list)
if ncommands==args.job_block or ((ncommands>0) and (FN==FNs[-1])):
command = '; '.join(command_list)
print command
os.system(' '.join(['python',command_paths['qsub_command'],\
'dock6_to_nc', "'"+command+"'", \
'--input_files', dock6_to_nc_script, ' '.join(FNs_c) + \
'--output_files', ' '.join(outFNs_c), \
{True:'--dry',False:''}[args.dry]]))
command_list = []
FNs_c = []
outFNs_c = [] | en | 0.538521 | # Find dock6_to_nc.py # Convert from mol2 to netcdf files | 2.387426 | 2 |
leetcode/0-250/145-198. House Robber.py | palash24/algorithms-and-data-structures | 23 | 6621012 | # 198. House Robber
class Solution(object):
def rob(self, nums):
l = len(nums)
robs = [0] * (l + 2)
for i in range(l):
robs[i + 2] = max(robs[i] + nums[i], robs[i + 1])
return robs[l + 1]
| # 198. House Robber
class Solution(object):
def rob(self, nums):
l = len(nums)
robs = [0] * (l + 2)
for i in range(l):
robs[i + 2] = max(robs[i] + nums[i], robs[i + 1])
return robs[l + 1]
| en | 0.688933 | # 198. House Robber | 2.890374 | 3 |
2020-03-26-Python-Object-Model/examples/set-name.py | s3rvac/talks | 2 | 6621013 | <reponame>s3rvac/talks<filename>2020-03-26-Python-Object-Model/examples/set-name.py
# An example of using __set_name__. It can be used for descriptors to
# automatically detect the name of the attribute they have been assigned to.
# Available since Python 3.6.
class X:
def __set_name__(self, owner, name):
print(f'owner: {owner}, name: {name!r}')
class A:
a = X() # owner: <class '__main__.A'>, name: 'a'
b = X() # owner: <class '__main__.A'>, name: 'b'
c = X() # owner: <class '__main__.A'>, name: 'c'
| # An example of using __set_name__. It can be used for descriptors to
# automatically detect the name of the attribute they have been assigned to.
# Available since Python 3.6.
class X:
def __set_name__(self, owner, name):
print(f'owner: {owner}, name: {name!r}')
class A:
a = X() # owner: <class '__main__.A'>, name: 'a'
b = X() # owner: <class '__main__.A'>, name: 'b'
c = X() # owner: <class '__main__.A'>, name: 'c' | en | 0.681022 | # An example of using __set_name__. It can be used for descriptors to # automatically detect the name of the attribute they have been assigned to. # Available since Python 3.6. # owner: <class '__main__.A'>, name: 'a' # owner: <class '__main__.A'>, name: 'b' # owner: <class '__main__.A'>, name: 'c' | 3.605058 | 4 |
Chromatography_Simulator.py | Tocha4/-Displacement--Chromatography | 2 | 6621014 | <filename>Chromatography_Simulator.py
from PyQt5.QtWidgets import QApplication
from Graphs.fig_01 import PlotCanvas
from Graphs.graphChromaCanvas import PlotCanvas_Chrom
from Graphs.Popup_ai import popup
from fa_02 import App
import sys
import numpy as np
import seaborn as sns; sns.set()
class main(App):
def __init__(self):
super().__init__()
self.ui.resetButton.clicked.connect(self.reset_Data)
self.m = PlotCanvas(self.ui.graphColumn_2, width=7, height=4.5, dpi=100)
self.mm = PlotCanvas_Chrom(self.ui.graphChromatogram, width=7, height=4.5)
self.ui.horizontalScrollBar.valueChanged.connect(self.changeGraphColumn)
self.ui.adsoptionIsotherm.clicked.connect(self.show_AI)
self.show()
self.rezise_graph()
def rezise_graph(self):
geom = self.ui.graphChromatogram.geometry()
self.mm.resize(geom.width(),geom.height())
self.m.resize(geom.width(),geom.height())
def show_AI(self):
self.get_settings()
self.ui_graph = popup(self.sample)
def changeChromatogram(self):
geom = self.ui.graphChromatogram.geometry()
self.mm.resize(geom.width(),geom.height())
showAll = self.ui.showAllC.checkState()
n = self.ui.horizontalScrollBar.value()
if n >= self.C.shape[0]-1:
n = self.C.shape[0]-1
concentration = self.ui.doubleSpin_Concentration_Column.value()
x_axes = np.linspace(0,n*self.column.dt, n)
dz = self.C.shape[0]*self.column.dt
self.mm.plot(range(len(self.C[0,0,:])),x_axes,self.C,self.q,n, concentration, dz,showAll)
def changeGraphColumn(self):
geom = self.ui.graphColumn_2.geometry()
self.m.resize(geom.width(),geom.height())
n = self.ui.horizontalScrollBar.value()
if n >= self.C.shape[0]-1:
n = self.C.shape[0]-1
concentration = self.ui.doubleSpin_Concentration_Column.value()
col = np.linspace(1,self.column.geometry[1]-1,int(self.column.geometry[1]//self.column.dz)-1)
self.m.plot(range(len(self.C[0,0,:])),col,self.C,self.q,n,concentration)
self.changeChromatogram()
def reset_Data(self):
self.C = None
self.q = None
if self.simu != None:
self.simu.C = None
self.simu.q = None
else: pass
if __name__ =='__main__':
# lets test some stuff
app = QApplication.instance()
if app == None:
app = QApplication(sys.argv)
ex = main()
sys.exit(app.exec_())
| <filename>Chromatography_Simulator.py
from PyQt5.QtWidgets import QApplication
from Graphs.fig_01 import PlotCanvas
from Graphs.graphChromaCanvas import PlotCanvas_Chrom
from Graphs.Popup_ai import popup
from fa_02 import App
import sys
import numpy as np
import seaborn as sns; sns.set()
class main(App):
def __init__(self):
super().__init__()
self.ui.resetButton.clicked.connect(self.reset_Data)
self.m = PlotCanvas(self.ui.graphColumn_2, width=7, height=4.5, dpi=100)
self.mm = PlotCanvas_Chrom(self.ui.graphChromatogram, width=7, height=4.5)
self.ui.horizontalScrollBar.valueChanged.connect(self.changeGraphColumn)
self.ui.adsoptionIsotherm.clicked.connect(self.show_AI)
self.show()
self.rezise_graph()
def rezise_graph(self):
geom = self.ui.graphChromatogram.geometry()
self.mm.resize(geom.width(),geom.height())
self.m.resize(geom.width(),geom.height())
def show_AI(self):
self.get_settings()
self.ui_graph = popup(self.sample)
def changeChromatogram(self):
geom = self.ui.graphChromatogram.geometry()
self.mm.resize(geom.width(),geom.height())
showAll = self.ui.showAllC.checkState()
n = self.ui.horizontalScrollBar.value()
if n >= self.C.shape[0]-1:
n = self.C.shape[0]-1
concentration = self.ui.doubleSpin_Concentration_Column.value()
x_axes = np.linspace(0,n*self.column.dt, n)
dz = self.C.shape[0]*self.column.dt
self.mm.plot(range(len(self.C[0,0,:])),x_axes,self.C,self.q,n, concentration, dz,showAll)
def changeGraphColumn(self):
geom = self.ui.graphColumn_2.geometry()
self.m.resize(geom.width(),geom.height())
n = self.ui.horizontalScrollBar.value()
if n >= self.C.shape[0]-1:
n = self.C.shape[0]-1
concentration = self.ui.doubleSpin_Concentration_Column.value()
col = np.linspace(1,self.column.geometry[1]-1,int(self.column.geometry[1]//self.column.dz)-1)
self.m.plot(range(len(self.C[0,0,:])),col,self.C,self.q,n,concentration)
self.changeChromatogram()
def reset_Data(self):
self.C = None
self.q = None
if self.simu != None:
self.simu.C = None
self.simu.q = None
else: pass
if __name__ =='__main__':
# lets test some stuff
app = QApplication.instance()
if app == None:
app = QApplication(sys.argv)
ex = main()
sys.exit(app.exec_())
| en | 0.798035 | # lets test some stuff | 2.649259 | 3 |
setup.py | jjnp/traefik-eval | 0 | 6621015 | <filename>setup.py
import os
import setuptools
try:
with open("README.md", "r") as fh:
long_description = fh.read()
except FileNotFoundError:
long_description = ''
try:
with open("requirements-dev.txt", "r") as fh:
tests_require = [line for line in fh.read().split(os.linesep) if line]
except FileNotFoundError:
tests_require = []
try:
with open("requirements.txt", "r") as fh:
install_requires = [line for line in fh.read().split(os.linesep) if line and not line.startswith('git')]
except FileNotFoundError:
install_requires = []
setuptools.setup(
name="galileo-jupyter",
version="0.0.1.dev1",
author="<NAME>",
author_email="<EMAIL>",
description="Galileo Jupyter: Tools for analyzing galileo experiments",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://git.dsg.tuwien.ac.at/mc2/galileo-jupyter",
packages=setuptools.find_packages(),
test_suite="tests",
tests_require=tests_require,
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| <filename>setup.py
import os
import setuptools
try:
with open("README.md", "r") as fh:
long_description = fh.read()
except FileNotFoundError:
long_description = ''
try:
with open("requirements-dev.txt", "r") as fh:
tests_require = [line for line in fh.read().split(os.linesep) if line]
except FileNotFoundError:
tests_require = []
try:
with open("requirements.txt", "r") as fh:
install_requires = [line for line in fh.read().split(os.linesep) if line and not line.startswith('git')]
except FileNotFoundError:
install_requires = []
setuptools.setup(
name="galileo-jupyter",
version="0.0.1.dev1",
author="<NAME>",
author_email="<EMAIL>",
description="Galileo Jupyter: Tools for analyzing galileo experiments",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://git.dsg.tuwien.ac.at/mc2/galileo-jupyter",
packages=setuptools.find_packages(),
test_suite="tests",
tests_require=tests_require,
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| none | 1 | 1.737867 | 2 | |
OpenCV/Section2-Advanced/bitwise.py | CChCheChen/OpenCV-Course | 0 | 6621016 | import cv2 as cv
import numpy as np
blank = np.zeros((400,400), dtype='uint8')
rectangle = cv.rectangle(blank.copy(), (30,30), (370,370), 255, -1)
circle = cv.circle(blank.copy(), (200,200), 200, 255, -1)
cv.imshow('rectangle', rectangle)
cv.imshow('circle', circle)
# Bitwise AND --> intersecting regions
bitwise_and = cv.bitwise_and(rectangle,circle)
cv.imshow('Bitwise AND', bitwise_and)
# Bitwise OR --> non-intersecting and intersecting regions
bitwise_or = cv.bitwise_or(rectangle,circle)
cv.imshow('Bitwise OR', bitwise_or)
# Bitwise XOR --> non-intersecting regions
bitwise_xor = cv.bitwise_xor(rectangle,circle)
cv.imshow('Bitwise XOR', bitwise_xor)
# Bitwise NOT
bitwise_not_circle = cv.bitwise_not(circle)
cv.imshow('Bitwise NOT for circle', bitwise_not_circle)
bitwise_not_rectangle = cv.bitwise_not(rectangle)
cv.imshow('Bitwise NOT for rectangle', bitwise_not_rectangle)
cv.waitKey(0) | import cv2 as cv
import numpy as np
blank = np.zeros((400,400), dtype='uint8')
rectangle = cv.rectangle(blank.copy(), (30,30), (370,370), 255, -1)
circle = cv.circle(blank.copy(), (200,200), 200, 255, -1)
cv.imshow('rectangle', rectangle)
cv.imshow('circle', circle)
# Bitwise AND --> intersecting regions
bitwise_and = cv.bitwise_and(rectangle,circle)
cv.imshow('Bitwise AND', bitwise_and)
# Bitwise OR --> non-intersecting and intersecting regions
bitwise_or = cv.bitwise_or(rectangle,circle)
cv.imshow('Bitwise OR', bitwise_or)
# Bitwise XOR --> non-intersecting regions
bitwise_xor = cv.bitwise_xor(rectangle,circle)
cv.imshow('Bitwise XOR', bitwise_xor)
# Bitwise NOT
bitwise_not_circle = cv.bitwise_not(circle)
cv.imshow('Bitwise NOT for circle', bitwise_not_circle)
bitwise_not_rectangle = cv.bitwise_not(rectangle)
cv.imshow('Bitwise NOT for rectangle', bitwise_not_rectangle)
cv.waitKey(0) | en | 0.718984 | # Bitwise AND --> intersecting regions # Bitwise OR --> non-intersecting and intersecting regions # Bitwise XOR --> non-intersecting regions # Bitwise NOT | 3.095028 | 3 |
include/HydrusNATPunch.py | antonpaquin/hydrus | 0 | 6621017 | <filename>include/HydrusNATPunch.py
import HydrusConstants as HC
import HydrusData
import HydrusExceptions
import HydrusText
import os
import shlex
import socket
import subprocess
import threading
import traceback
# new stuff starts here
if HC.PLATFORM_LINUX:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_linux' )
elif HC.PLATFORM_OSX:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_osx' )
elif HC.PLATFORM_WINDOWS:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_win32.exe' )
EXTERNAL_IP = {}
EXTERNAL_IP[ 'ip' ] = None
EXTERNAL_IP[ 'time' ] = 0
def GetExternalIP():
if 'external_host' in HC.options and HC.options[ 'external_host' ] is not None:
return HC.options[ 'external_host' ]
if HydrusData.TimeHasPassed( EXTERNAL_IP[ 'time' ] + ( 3600 * 24 ) ):
cmd = '"' + upnpc_path + '" -l'
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to fetch External IP:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
else:
try:
lines = HydrusText.DeserialiseNewlinedTexts( output )
i = lines.index( 'i protocol exPort->inAddr:inPort description remoteHost leaseTime' )
'''ExternalIPAddress = ip'''
( gumpf, external_ip_address ) = lines[ i - 1 ].split( ' = ' )
except ValueError:
raise Exception( 'Could not parse external IP!' )
if external_ip_address == '0.0.0.0':
raise Exception( 'Your UPnP device returned your external IP as 0.0.0.0! Try rebooting it, or overwrite it in options!' )
EXTERNAL_IP[ 'ip' ] = external_ip_address
EXTERNAL_IP[ 'time' ] = HydrusData.GetNow()
return EXTERNAL_IP[ 'ip' ]
def GetLocalIP(): return socket.gethostbyname( socket.gethostname() )
def AddUPnPMapping( internal_client, internal_port, external_port, protocol, description, duration = 3600 ):
cmd = '"' + upnpc_path + '" -e "' + description + '" -a ' + internal_client + ' ' + str( internal_port ) + ' ' + str( external_port ) + ' ' + protocol + ' ' + str( duration )
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if 'x.x.x.x:' + str( external_port ) + ' TCP is redirected to internal ' + internal_client + ':' + str( internal_port ) in output:
raise HydrusExceptions.FirewallException( 'The UPnP mapping of ' + internal_client + ':' + internal_port + '->external:' + external_port + ' already exists as a port forward. If this UPnP mapping is automatic, please disable it.' )
if output is not None and 'failed with code' in output:
if 'UnknownError' in output:
raise HydrusExceptions.FirewallException( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( output ) )
else:
raise Exception( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( output ) )
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
def GetUPnPMappings():
external_ip_address = GetExternalIP()
cmd = '"' + upnpc_path + '" -l'
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to fetch UPnP mappings:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
else:
try:
lines = HydrusText.DeserialiseNewlinedTexts( output )
i = lines.index( 'i protocol exPort->inAddr:inPort description remoteHost leaseTime' )
data_lines = []
i += 1
while i < len( lines ):
if not lines[ i ][0] in ( ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ): break
data_lines.append( lines[ i ] )
i += 1
processed_data = []
for line in data_lines:
''' 0 UDP 65533->192.168.0.197:65533 'Skype UDP at 192.168.0.197:65533 (2665)' '' 0'''
while ' ' in line: line = line.replace( ' ', ' ' )
if line.startswith( ' ' ): ( empty, number, protocol, mapping_data, rest_of_line ) = line.split( ' ', 4 )
else: ( number, protocol, mapping_data, rest_of_line ) = line.split( ' ', 3 )
( external_port, rest_of_mapping_data ) = mapping_data.split( '->' )
external_port = int( external_port )
( internal_client, internal_port ) = rest_of_mapping_data.split( ':' )
internal_port = int( internal_port )
( empty, description, space, remote_host, rest_of_line ) = rest_of_line.split( '\'', 4 )
lease_time = int( rest_of_line[1:] )
processed_data.append( ( description, internal_client, internal_port, external_ip_address, external_port, protocol, lease_time ) )
return processed_data
except Exception as e:
HydrusData.Print( 'UPnP problem:' )
HydrusData.Print( traceback.format_exc() )
HydrusData.Print( 'Full response follows:' )
HydrusData.Print( output )
raise Exception( 'Problem while trying to parse UPnP mappings:' + os.linesep * 2 + HydrusData.ToUnicode( e ) )
def RemoveUPnPMapping( external_port, protocol ):
cmd = '"' + upnpc_path + '" -d ' + str( external_port ) + ' ' + protocol
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0: raise Exception( 'Problem while trying to remove UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
| <filename>include/HydrusNATPunch.py
import HydrusConstants as HC
import HydrusData
import HydrusExceptions
import HydrusText
import os
import shlex
import socket
import subprocess
import threading
import traceback
# new stuff starts here
if HC.PLATFORM_LINUX:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_linux' )
elif HC.PLATFORM_OSX:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_osx' )
elif HC.PLATFORM_WINDOWS:
upnpc_path = os.path.join( HC.BIN_DIR, 'upnpc_win32.exe' )
EXTERNAL_IP = {}
EXTERNAL_IP[ 'ip' ] = None
EXTERNAL_IP[ 'time' ] = 0
def GetExternalIP():
if 'external_host' in HC.options and HC.options[ 'external_host' ] is not None:
return HC.options[ 'external_host' ]
if HydrusData.TimeHasPassed( EXTERNAL_IP[ 'time' ] + ( 3600 * 24 ) ):
cmd = '"' + upnpc_path + '" -l'
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to fetch External IP:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
else:
try:
lines = HydrusText.DeserialiseNewlinedTexts( output )
i = lines.index( 'i protocol exPort->inAddr:inPort description remoteHost leaseTime' )
'''ExternalIPAddress = ip'''
( gumpf, external_ip_address ) = lines[ i - 1 ].split( ' = ' )
except ValueError:
raise Exception( 'Could not parse external IP!' )
if external_ip_address == '0.0.0.0':
raise Exception( 'Your UPnP device returned your external IP as 0.0.0.0! Try rebooting it, or overwrite it in options!' )
EXTERNAL_IP[ 'ip' ] = external_ip_address
EXTERNAL_IP[ 'time' ] = HydrusData.GetNow()
return EXTERNAL_IP[ 'ip' ]
def GetLocalIP(): return socket.gethostbyname( socket.gethostname() )
def AddUPnPMapping( internal_client, internal_port, external_port, protocol, description, duration = 3600 ):
cmd = '"' + upnpc_path + '" -e "' + description + '" -a ' + internal_client + ' ' + str( internal_port ) + ' ' + str( external_port ) + ' ' + protocol + ' ' + str( duration )
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if 'x.x.x.x:' + str( external_port ) + ' TCP is redirected to internal ' + internal_client + ':' + str( internal_port ) in output:
raise HydrusExceptions.FirewallException( 'The UPnP mapping of ' + internal_client + ':' + internal_port + '->external:' + external_port + ' already exists as a port forward. If this UPnP mapping is automatic, please disable it.' )
if output is not None and 'failed with code' in output:
if 'UnknownError' in output:
raise HydrusExceptions.FirewallException( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( output ) )
else:
raise Exception( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( output ) )
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to add UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
def GetUPnPMappings():
external_ip_address = GetExternalIP()
cmd = '"' + upnpc_path + '" -l'
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0:
raise Exception( 'Problem while trying to fetch UPnP mappings:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
else:
try:
lines = HydrusText.DeserialiseNewlinedTexts( output )
i = lines.index( 'i protocol exPort->inAddr:inPort description remoteHost leaseTime' )
data_lines = []
i += 1
while i < len( lines ):
if not lines[ i ][0] in ( ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ): break
data_lines.append( lines[ i ] )
i += 1
processed_data = []
for line in data_lines:
''' 0 UDP 65533->192.168.0.197:65533 'Skype UDP at 192.168.0.197:65533 (2665)' '' 0'''
while ' ' in line: line = line.replace( ' ', ' ' )
if line.startswith( ' ' ): ( empty, number, protocol, mapping_data, rest_of_line ) = line.split( ' ', 4 )
else: ( number, protocol, mapping_data, rest_of_line ) = line.split( ' ', 3 )
( external_port, rest_of_mapping_data ) = mapping_data.split( '->' )
external_port = int( external_port )
( internal_client, internal_port ) = rest_of_mapping_data.split( ':' )
internal_port = int( internal_port )
( empty, description, space, remote_host, rest_of_line ) = rest_of_line.split( '\'', 4 )
lease_time = int( rest_of_line[1:] )
processed_data.append( ( description, internal_client, internal_port, external_ip_address, external_port, protocol, lease_time ) )
return processed_data
except Exception as e:
HydrusData.Print( 'UPnP problem:' )
HydrusData.Print( traceback.format_exc() )
HydrusData.Print( 'Full response follows:' )
HydrusData.Print( output )
raise Exception( 'Problem while trying to parse UPnP mappings:' + os.linesep * 2 + HydrusData.ToUnicode( e ) )
def RemoveUPnPMapping( external_port, protocol ):
cmd = '"' + upnpc_path + '" -d ' + str( external_port ) + ' ' + protocol
p = subprocess.Popen( shlex.split( cmd ), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, startupinfo = HydrusData.GetHideTerminalSubprocessStartupInfo() )
HydrusData.WaitForProcessToFinish( p, 30 )
( output, error ) = p.communicate()
if error is not None and len( error ) > 0: raise Exception( 'Problem while trying to remove UPnP mapping:' + os.linesep * 2 + HydrusData.ToUnicode( error ) )
| en | 0.33493 | # new stuff starts here ExternalIPAddress = ip 0 UDP 65533->192.168.0.197:65533 'Skype UDP at 192.168.0.197:65533 (2665)' '' 0 | 1.976149 | 2 |
binary_image_processing.py | Geo-Joy/opencv-basic-hacks | 0 | 6621018 | import cv2
import numpy as np
### THRESHOLDING
# reading the image as greyscale
org_img = cv2.imread("data/images/threshold.png", cv2.IMREAD_GRAYSCALE)
threshold = 0 # set value above which thresholding works
maxValue = 255 # min value below which thresholding should be applied
#retval, dst = cv.threshold(src, thresh, maxval, type[, dst])
ret, th_img = cv2.threshold(org_img, threshold, maxValue, cv2.THRESH_BINARY)
# display the images
cv2.imshow("Original Image", org_img)
cv2.imshow("Thresholded Image", th_img)
# write the new image to disk
cv2.imwrite("data/images/threshold_out.png", th_img)
### Dilation
org_img_dilation = cv2.imread("data/images/truth.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
dilationSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_CROSS, (2*dilationSize+1, 2*dilationSize+1),(dilationSize, dilationSize))
print(kernal)
imageDilated = cv2.dilate(org_img_dilation, kernal)
cv2.imshow("Original Image", org_img_dilation)
cv2.imshow("Dilated Image", imageDilated)
### Erosion
org_img_dilation = cv2.imread("data/images/truth.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
dilationSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_CROSS, (2*dilationSize+1, 2*dilationSize+1),(dilationSize, dilationSize))
print(kernal)
imageDilated = cv2.erode(org_img_dilation, kernal)
cv2.imshow("Original Image", org_img_dilation)
cv2.imshow("Eroded Image", imageDilated)
### Morphological Open
org_img_morph = cv2.imread("data/images/opening.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
openingSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*openingSize+1, 2*openingSize+1),(openingSize, openingSize))
print(kernal)
imageMorphOpen = cv2.morphologyEx(org_img_morph, cv2.MORPH_OPEN, kernal, iterations=2)
cv2.imshow("Original Image", org_img_morph)
cv2.imshow("Open Image", imageMorphOpen)
### Morphological Close
org_img_morph = cv2.imread("data/images/closing.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
openingSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*openingSize+1, 2*openingSize+1),(openingSize, openingSize))
print(kernal)
imageMorphOpen = cv2.morphologyEx(org_img_morph, cv2.MORPH_CLOSE, kernal, iterations=2)
cv2.imshow("Original Image", org_img_morph)
cv2.imshow("Open Image", imageMorphOpen)
### Connected Component Analysis (CCA)
cca_image = cv2.imread('data/images/truth.png', cv2.IMREAD_GRAYSCALE)
threshold = 0
maxValue = 255
ret, imThresh = cv2.threshold(cca_image, threshold, maxValue, cv2.THRESH_BINARY)
_, imLabels = cv2.connectedComponents(imThresh)
def displayConnectedComponents(im):
imlabels = im
# find min max pixel value and their locations
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(imlabels)
print(minVal, maxVal, minLoc, maxLoc)
#normalizing the pixel values in range of 0-255
imLabels = 255 * (imlabels - minVal)/(maxVal - minVal)
#convert the image to 8bit unsigned type
imLabels = np.uint8(imLabels)
# Apply a color map
imColorMap = cv2.applyColorMap(imLabels, cv2.COLORMAP_RAINBOW)
#Display
cv2.imshow("Labels", imColorMap)
cv2.waitKey(0)
displayConnectedComponents(imLabels)
| import cv2
import numpy as np
### THRESHOLDING
# reading the image as greyscale
org_img = cv2.imread("data/images/threshold.png", cv2.IMREAD_GRAYSCALE)
threshold = 0 # set value above which thresholding works
maxValue = 255 # min value below which thresholding should be applied
#retval, dst = cv.threshold(src, thresh, maxval, type[, dst])
ret, th_img = cv2.threshold(org_img, threshold, maxValue, cv2.THRESH_BINARY)
# display the images
cv2.imshow("Original Image", org_img)
cv2.imshow("Thresholded Image", th_img)
# write the new image to disk
cv2.imwrite("data/images/threshold_out.png", th_img)
### Dilation
org_img_dilation = cv2.imread("data/images/truth.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
dilationSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_CROSS, (2*dilationSize+1, 2*dilationSize+1),(dilationSize, dilationSize))
print(kernal)
imageDilated = cv2.dilate(org_img_dilation, kernal)
cv2.imshow("Original Image", org_img_dilation)
cv2.imshow("Dilated Image", imageDilated)
### Erosion
org_img_dilation = cv2.imread("data/images/truth.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
dilationSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_CROSS, (2*dilationSize+1, 2*dilationSize+1),(dilationSize, dilationSize))
print(kernal)
imageDilated = cv2.erode(org_img_dilation, kernal)
cv2.imshow("Original Image", org_img_dilation)
cv2.imshow("Eroded Image", imageDilated)
### Morphological Open
org_img_morph = cv2.imread("data/images/opening.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
openingSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*openingSize+1, 2*openingSize+1),(openingSize, openingSize))
print(kernal)
imageMorphOpen = cv2.morphologyEx(org_img_morph, cv2.MORPH_OPEN, kernal, iterations=2)
cv2.imshow("Original Image", org_img_morph)
cv2.imshow("Open Image", imageMorphOpen)
### Morphological Close
org_img_morph = cv2.imread("data/images/closing.png", cv2.IMREAD_GRAYSCALE)
# We create the kernel/structuring element that is used for dilation operation.
openingSize = 5
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*openingSize+1, 2*openingSize+1),(openingSize, openingSize))
print(kernal)
imageMorphOpen = cv2.morphologyEx(org_img_morph, cv2.MORPH_CLOSE, kernal, iterations=2)
cv2.imshow("Original Image", org_img_morph)
cv2.imshow("Open Image", imageMorphOpen)
### Connected Component Analysis (CCA)
cca_image = cv2.imread('data/images/truth.png', cv2.IMREAD_GRAYSCALE)
threshold = 0
maxValue = 255
ret, imThresh = cv2.threshold(cca_image, threshold, maxValue, cv2.THRESH_BINARY)
_, imLabels = cv2.connectedComponents(imThresh)
def displayConnectedComponents(im):
imlabels = im
# find min max pixel value and their locations
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(imlabels)
print(minVal, maxVal, minLoc, maxLoc)
#normalizing the pixel values in range of 0-255
imLabels = 255 * (imlabels - minVal)/(maxVal - minVal)
#convert the image to 8bit unsigned type
imLabels = np.uint8(imLabels)
# Apply a color map
imColorMap = cv2.applyColorMap(imLabels, cv2.COLORMAP_RAINBOW)
#Display
cv2.imshow("Labels", imColorMap)
cv2.waitKey(0)
displayConnectedComponents(imLabels)
| en | 0.761203 | ### THRESHOLDING # reading the image as greyscale # set value above which thresholding works # min value below which thresholding should be applied #retval, dst = cv.threshold(src, thresh, maxval, type[, dst]) # display the images # write the new image to disk ### Dilation # We create the kernel/structuring element that is used for dilation operation. ### Erosion # We create the kernel/structuring element that is used for dilation operation. ### Morphological Open # We create the kernel/structuring element that is used for dilation operation. ### Morphological Close # We create the kernel/structuring element that is used for dilation operation. ### Connected Component Analysis (CCA) # find min max pixel value and their locations #normalizing the pixel values in range of 0-255 #convert the image to 8bit unsigned type # Apply a color map #Display | 3.751095 | 4 |
src_gpu/preprocessing/breast_detection/crop_main_file.py | rogov-dvp/medical-imaging-matching | 1 | 6621019 | #What packages were installed:
#
# python 3.9.7
# pip 21.2.4
# tensorflow 2.8.0
# opencv
# https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html
# (CPU):
# - download protobuf version: https://github.com/protocolbuffers/protobuf/releases/tag/v3.19.4
# - Add to environment path <PROTOBUF_PATH>/bin
# - run:
# # From within TensorFlow/models/research/
# protoc object_detection/protos/*.proto --python_out=.
# cp object_detection/packages/tf2/setup.py . //may need to run this file seperately
# python -m pip install --use-feature=2020-resolver .
# python object_detection/builders/model_builder_tf2_test.py
#
import tensorflow as tf
import os
import math
import cv2
import numpy as np
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.builders import model_builder
WORKSPACE_PATH = 'workspace'
ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'
IMAGE_PATH = WORKSPACE_PATH+'/cropped_images'
MODEL_PATH = WORKSPACE_PATH+'/models'
CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mob'
CUSTOM_MODEL_NAME = 'my_ssd_mob'
CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'
config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-6')).expect_partial()
# FUNCTIONS
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
def crop_breasts(image):
#category_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH + '/label_map.pbtxt')
image_out = np.copy(image)
img = np.zeros((image.shape[0], image.shape[1], 3))
img[:,:,0] = image
img[:,:,1] = image
img[:,:,2] = image
in_tensor = tf.convert_to_tensor(np.expand_dims(img,0), dtype=tf.float32)
detections = detect_fn(in_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
if detections['detection_scores'][0] < 0.75:
return image
box = detections['detection_boxes'][0]
left = math.floor(box[0] * image.shape[1])
right = math.ceil(box[2] * image.shape[1])
bot = math.floor(box[1] * image.shape[0])
top = math.ceil(box[3] * image.shape[0])
cropped_img = image_out[left:right, bot:top]
return cropped_img
# Set images and run function
# img = cv2.imread("test_images_kaggle/images/2016_BC003122_ CC_L.jpg") #test_images_kaggle/images
# crop_breasts(np.asarray([img])) #np.asarray([img,img]) | #What packages were installed:
#
# python 3.9.7
# pip 21.2.4
# tensorflow 2.8.0
# opencv
# https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html
# (CPU):
# - download protobuf version: https://github.com/protocolbuffers/protobuf/releases/tag/v3.19.4
# - Add to environment path <PROTOBUF_PATH>/bin
# - run:
# # From within TensorFlow/models/research/
# protoc object_detection/protos/*.proto --python_out=.
# cp object_detection/packages/tf2/setup.py . //may need to run this file seperately
# python -m pip install --use-feature=2020-resolver .
# python object_detection/builders/model_builder_tf2_test.py
#
import tensorflow as tf
import os
import math
import cv2
import numpy as np
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.builders import model_builder
WORKSPACE_PATH = 'workspace'
ANNOTATION_PATH = WORKSPACE_PATH+'/annotations'
IMAGE_PATH = WORKSPACE_PATH+'/cropped_images'
MODEL_PATH = WORKSPACE_PATH+'/models'
CHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mob'
CUSTOM_MODEL_NAME = 'my_ssd_mob'
CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'
config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-6')).expect_partial()
# FUNCTIONS
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
def crop_breasts(image):
#category_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH + '/label_map.pbtxt')
image_out = np.copy(image)
img = np.zeros((image.shape[0], image.shape[1], 3))
img[:,:,0] = image
img[:,:,1] = image
img[:,:,2] = image
in_tensor = tf.convert_to_tensor(np.expand_dims(img,0), dtype=tf.float32)
detections = detect_fn(in_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
if detections['detection_scores'][0] < 0.75:
return image
box = detections['detection_boxes'][0]
left = math.floor(box[0] * image.shape[1])
right = math.ceil(box[2] * image.shape[1])
bot = math.floor(box[1] * image.shape[0])
top = math.ceil(box[3] * image.shape[0])
cropped_img = image_out[left:right, bot:top]
return cropped_img
# Set images and run function
# img = cv2.imread("test_images_kaggle/images/2016_BC003122_ CC_L.jpg") #test_images_kaggle/images
# crop_breasts(np.asarray([img])) #np.asarray([img,img]) | en | 0.580216 | #What packages were installed: # # python 3.9.7 # pip 21.2.4 # tensorflow 2.8.0 # opencv # https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html # (CPU): # - download protobuf version: https://github.com/protocolbuffers/protobuf/releases/tag/v3.19.4 # - Add to environment path <PROTOBUF_PATH>/bin # - run: # # From within TensorFlow/models/research/ # protoc object_detection/protos/*.proto --python_out=. # cp object_detection/packages/tf2/setup.py . //may need to run this file seperately # python -m pip install --use-feature=2020-resolver . # python object_detection/builders/model_builder_tf2_test.py # # Load pipeline config and build a detection model # Restore checkpoint # FUNCTIONS #category_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH + '/label_map.pbtxt') # Set images and run function # img = cv2.imread("test_images_kaggle/images/2016_BC003122_ CC_L.jpg") #test_images_kaggle/images # crop_breasts(np.asarray([img])) #np.asarray([img,img]) | 2.279964 | 2 |
minigrid_and_pd_experiments/base.py | allenai/advisor | 5 | 6621020 | import abc
import math
import os
from typing import (
Optional,
List,
Any,
Dict,
cast,
Sequence,
Callable,
Union,
NamedTuple,
)
import gym
import torch
from gym_minigrid.minigrid import Lava, WorldObj, Wall
from torch import nn, optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO, A2C
from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.embodiedai.models.basic_models import LinearActorCritic
from allenact.utils.experiment_utils import (
LinearDecay,
Builder,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.lighthouse_plugin.lighthouse_models import (
LinearAdvisorActorCritic,
)
from allenact_plugins.minigrid_plugin.minigrid_environments import (
FastCrossing,
AskForHelpSimpleCrossing,
)
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
create_minigrid_offpolicy_data_iterator,
)
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from allenact_plugins.minigrid_plugin.minigrid_tasks import (
MiniGridTaskSampler,
MiniGridTask,
AskForHelpSimpleCrossingTask,
)
from poisoneddoors_plugin.poisoneddoors_models import RNNActorCriticWithEmbed
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
create_poisoneddoors_offpolicy_data_iterator,
)
from poisoneddoors_plugin.poisoneddoors_sensors import PoisonedDoorCurrentStateSensor
from poisoneddoors_plugin.poisoneddoors_tasks import (
PoisonedDoorsEnvironment,
PoisonedDoorsTask,
PoisonedDoorsTaskSampler,
)
from projects.advisor.minigrid_constants import MINIGRID_EXPERT_TRAJECTORIES_DIR
class MiniGridAndPDExperimentParams(NamedTuple):
TASK_NAME: str
# Default MiniGrid values
MG_AGENT_VIEW_SIZE: int = 7
MG_AGENT_VIEW_CHANNELS: int = 3
# Default Poisoned Doors values
PD_MAX_STEPS: int = 100
# Training params
NUM_TRAIN_SAMPLERS: int = 20 # if torch.cuda.is_available() else 1
ROLLOUT_STEPS: int = 100
MG_TOTAL_TRAIN_STEPS = int(1e6)
PD_TOTAL_TRAIN_STEPS = int(3e5)
NUM_TRAIN_TASKS: int = None
NUM_TEST_TASKS: int = 1000
GPU_ID: Optional[int] = 1 if torch.cuda.is_available() else None
USE_EXPERT: bool = False
RNN_TYPE: str = "LSTM"
CACHE_GRAPHS: bool = False
SHOULD_LOG = True
TEST_SEED_OFFSET: int = 0
# Hyperparameters
LR: Optional[float] = None
TF_RATIO: Optional[float] = None
FIXED_ALPHA: Optional[float] = None
ALPHA_START: Optional[float] = None
ALPHA_STOP: Optional[float] = None
# Auxiliary head parameters
INCLUDE_AUXILIARY_HEAD: bool = False
SAME_INIT_VALS_FOR_ADVISOR_HEAD: bool = False
# Logging / saving
METRIC_ACCUMULATE_INTERVAL = 10000 if torch.cuda.is_available() else 1000
CKPTS_TO_SAVE = 4
class BaseExperimentConfig(ExperimentConfig):
"""Base experiment."""
def __init__(self, task_name: str, **kwargs):
self.exp_params = MiniGridAndPDExperimentParams(TASK_NAME=task_name, **kwargs)
@property
def task_name(self):
return self.exp_params.TASK_NAME
def total_train_steps(self) -> int:
task_info = self.task_info()
return task_info["total_train_steps"]
def task_info(self):
"""All information needed about the underlying task.
# Returns
Dictionary of useful information:
- env_info: used to initialize the environment
- tag: string to use for logging
- env_class: callable of the underlying mini-grid / poisoned doors environment class
- task_class: callable of the corresponding task class
"""
name = self.task_name
output_data = dict()
if name == "PoisonedDoors":
# Specific base parameters
num_doors = 4
combination_length = 10
extra_tag = self.extra_tag()
# Parameters needed for other functions
output_data["env_info"] = {
"num_doors": num_doors,
"combination_length": combination_length,
}
output_data["task_sampler_args"] = {
**output_data["env_info"],
"max_steps": self.exp_params.PD_MAX_STEPS,
}
output_data["tag"] = "PoisonedDoorsN{}{}".format(num_doors, extra_tag,)
output_data["env_class"] = PoisonedDoorsEnvironment
output_data["task_class"] = PoisonedDoorsTask
output_data["task_sampler_class"] = PoisonedDoorsTaskSampler
elif name == "CrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingCorruptExpertS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
corrupt_expert_within_actions_of_goal = 15
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "WallCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
}
}
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "LavaCrossingCorruptExpertS15N7":
# Specific base parameters
grid_size = 15
num_crossings = 7
corrupt_expert_within_actions_of_goal = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "LavaCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
},
"repeat_failed_task_for_min_steps": 1000,
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossing":
# Specific base parameters
grid_size = 15
num_crossings = 7
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpSimpleCrossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossingOnce":
# Specific base parameters
grid_size = 25
num_crossings = 10
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpSimpleCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingOnce":
# Specific base parameters
grid_size = 15
num_crossings = 7
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpLavaCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingSmall":
# Specific base parameters
grid_size = 9
num_crossings = 4
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpLavaCrossingSmall{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
else:
raise NotImplementedError("Haven't implemented {}".format(name))
if name == "PoisonedDoors":
output_data["total_train_steps"] = self.exp_params.PD_TOTAL_TRAIN_STEPS
else:
# MiniGrid total train steps
output_data["total_train_steps"] = self.exp_params.MG_TOTAL_TRAIN_STEPS
output_data["name"] = name
return output_data
def tag(self):
return self.task_info()["tag"]
@abc.abstractmethod
def extra_tag(self):
raise NotImplementedError
def get_sensors(self) -> Sequence[Sensor]:
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
action_space = gym.spaces.Discrete(
len(
task_info["task_class"].class_action_names(
num_doors=task_info["env_info"]["num_doors"]
)
)
)
return [PoisonedDoorCurrentStateSensor()] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
else:
# Sensors for MiniGrid tasks
action_space = gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
)
return [
EgocentricMiniGridSensor(
agent_view_size=self.exp_params.MG_AGENT_VIEW_SIZE,
view_channels=self.exp_params.MG_AGENT_VIEW_CHANNELS,
)
] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
def machine_params(self, mode="train", gpu_id="default", **kwargs):
if mode == "train":
nprocesses = self.exp_params.NUM_TRAIN_SAMPLERS
elif mode == "valid":
nprocesses = 0
elif mode == "test":
nprocesses = min(
self.exp_params.NUM_TEST_TASKS, 500 if torch.cuda.is_available() else 50
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
gpu_ids = [] if self.exp_params.GPU_ID is None else [self.exp_params.GPU_ID]
return MachineParams(nprocesses=nprocesses, devices=gpu_ids)
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
return RNNActorCriticWithEmbed(
input_uuid=sensors[0].uuid,
num_embeddings=4,
embedding_dim=128,
input_len=1,
action_space=gym.spaces.Discrete(
3 + task_info["env_info"]["num_doors"]
),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
else:
# Model for MiniGrid tasks
return MiniGridSimpleConvRNN(
action_space=gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
),
num_objects=cast(EgocentricMiniGridSensor, sensors[0]).num_objects,
num_colors=cast(EgocentricMiniGridSensor, sensors[0]).num_colors,
num_states=cast(EgocentricMiniGridSensor, sensors[0]).num_states,
observation_space=SensorSuite(sensors).observation_spaces,
hidden_size=128,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
def make_sampler_fn(
self, **kwargs
) -> Union[PoisonedDoorsTaskSampler, MiniGridTaskSampler]:
return self.task_info()["task_sampler_class"](**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
info = self.task_info()
if info["name"] == "PoisonedDoors":
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"task_class": info["task_class"],
}
else:
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"cache_graphs": self.exp_params.CACHE_GRAPHS,
"task_class": info["task_class"],
}
if "task_sampler_args" in info:
args_dict.update(info["task_sampler_args"])
if self.exp_params.NUM_TRAIN_TASKS:
args_dict["max_tasks"] = self.exp_params.NUM_TRAIN_TASKS
return args_dict
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
max_tasks = self.exp_params.NUM_TEST_TASKS // total_processes + (
process_ind < (self.exp_params.NUM_TEST_TASKS % total_processes)
)
task_seeds_list = [
2 ** 31
- 1
+ self.exp_params.TEST_SEED_OFFSET
+ process_ind
+ total_processes * i
for i in range(max_tasks)
]
assert min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1
train_sampler_args = self.train_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
if "repeat_failed_task_for_min_steps" in train_sampler_args:
del train_sampler_args["repeat_failed_task_for_min_steps"]
return {
**train_sampler_args,
"task_seeds_list": task_seeds_list,
"max_tasks": max_tasks,
"deterministic_sampling": True,
"sensors": [
s for s in train_sampler_args["sensors"] if "Expert" not in str(type(s))
],
}
def offpolicy_demo_defaults(self, also_using_ppo: bool):
ppo_defaults = self.rl_loss_default("ppo", 1)
assert ppo_defaults["update_repeats"] % 2 == 0
output_data = {}
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
output_data.update(
{
"data_iterator_builder": lambda: create_poisoneddoors_offpolicy_data_iterator(
num_doors=task_info["env_info"]["num_doors"],
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
dataset_size=task_info["total_train_steps"],
),
}
)
else:
# Off-policy defaults for MiniGrid tasks
output_data.update(
{
"data_iterator_builder": lambda: create_minigrid_offpolicy_data_iterator(
path=os.path.join(
MINIGRID_EXPERT_TRAJECTORIES_DIR,
"MiniGrid-{}-v0{}.pkl".format(task_info["name"], "",),
),
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
instr_len=None,
restrict_max_steps_in_dataset=task_info["total_train_steps"],
),
}
)
# Off-policy defaults common to Poisoned Doors and MiniGrid tasks
output_data.update(
{
"ppo_update_repeats": ppo_defaults["update_repeats"] // 2
if also_using_ppo
else 0,
"ppo_num_mini_batch": ppo_defaults["num_mini_batch"]
if also_using_ppo
else 0,
"offpolicy_updates": ppo_defaults["num_mini_batch"]
* (
ppo_defaults["update_repeats"] // 2
if also_using_ppo
else ppo_defaults["update_repeats"]
),
}
)
return output_data
def rl_loss_default(self, alg: str, steps: Optional[int] = None):
if alg == "ppo":
assert steps is not None
return {
"loss": (PPO(clip_decay=LinearDecay(steps), **PPOConfig)),
"num_mini_batch": 2,
"update_repeats": 4,
}
elif alg == "a2c":
return {
"loss": A2C(**A2CConfig),
"num_mini_batch": 1,
"update_repeats": 1,
}
elif alg == "imitation":
return {
"loss": Imitation(),
"num_mini_batch": 2, # if torch.cuda.is_available() else 1,
"update_repeats": 4,
}
else:
raise NotImplementedError
def _training_pipeline(
self,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: Optional[int],
):
# When using many mini-batches or update repeats, decrease the learning
# rate so that the approximate size of the gradient update is similar.
lr = self.exp_params.LR
num_steps = self.exp_params.ROLLOUT_STEPS
metric_accumulate_interval = self.exp_params.METRIC_ACCUMULATE_INTERVAL
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 1.0
max_grad_norm = 0.5
total_train_steps = self.task_info()["total_train_steps"]
if self.exp_params.CKPTS_TO_SAVE == 0:
save_interval = None
else:
save_interval = math.ceil(total_train_steps / self.exp_params.CKPTS_TO_SAVE)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=self.exp_params.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore
),
)
| import abc
import math
import os
from typing import (
Optional,
List,
Any,
Dict,
cast,
Sequence,
Callable,
Union,
NamedTuple,
)
import gym
import torch
from gym_minigrid.minigrid import Lava, WorldObj, Wall
from torch import nn, optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO, A2C
from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.embodiedai.models.basic_models import LinearActorCritic
from allenact.utils.experiment_utils import (
LinearDecay,
Builder,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.lighthouse_plugin.lighthouse_models import (
LinearAdvisorActorCritic,
)
from allenact_plugins.minigrid_plugin.minigrid_environments import (
FastCrossing,
AskForHelpSimpleCrossing,
)
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
create_minigrid_offpolicy_data_iterator,
)
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from allenact_plugins.minigrid_plugin.minigrid_tasks import (
MiniGridTaskSampler,
MiniGridTask,
AskForHelpSimpleCrossingTask,
)
from poisoneddoors_plugin.poisoneddoors_models import RNNActorCriticWithEmbed
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
create_poisoneddoors_offpolicy_data_iterator,
)
from poisoneddoors_plugin.poisoneddoors_sensors import PoisonedDoorCurrentStateSensor
from poisoneddoors_plugin.poisoneddoors_tasks import (
PoisonedDoorsEnvironment,
PoisonedDoorsTask,
PoisonedDoorsTaskSampler,
)
from projects.advisor.minigrid_constants import MINIGRID_EXPERT_TRAJECTORIES_DIR
class MiniGridAndPDExperimentParams(NamedTuple):
TASK_NAME: str
# Default MiniGrid values
MG_AGENT_VIEW_SIZE: int = 7
MG_AGENT_VIEW_CHANNELS: int = 3
# Default Poisoned Doors values
PD_MAX_STEPS: int = 100
# Training params
NUM_TRAIN_SAMPLERS: int = 20 # if torch.cuda.is_available() else 1
ROLLOUT_STEPS: int = 100
MG_TOTAL_TRAIN_STEPS = int(1e6)
PD_TOTAL_TRAIN_STEPS = int(3e5)
NUM_TRAIN_TASKS: int = None
NUM_TEST_TASKS: int = 1000
GPU_ID: Optional[int] = 1 if torch.cuda.is_available() else None
USE_EXPERT: bool = False
RNN_TYPE: str = "LSTM"
CACHE_GRAPHS: bool = False
SHOULD_LOG = True
TEST_SEED_OFFSET: int = 0
# Hyperparameters
LR: Optional[float] = None
TF_RATIO: Optional[float] = None
FIXED_ALPHA: Optional[float] = None
ALPHA_START: Optional[float] = None
ALPHA_STOP: Optional[float] = None
# Auxiliary head parameters
INCLUDE_AUXILIARY_HEAD: bool = False
SAME_INIT_VALS_FOR_ADVISOR_HEAD: bool = False
# Logging / saving
METRIC_ACCUMULATE_INTERVAL = 10000 if torch.cuda.is_available() else 1000
CKPTS_TO_SAVE = 4
class BaseExperimentConfig(ExperimentConfig):
"""Base experiment."""
def __init__(self, task_name: str, **kwargs):
self.exp_params = MiniGridAndPDExperimentParams(TASK_NAME=task_name, **kwargs)
@property
def task_name(self):
return self.exp_params.TASK_NAME
def total_train_steps(self) -> int:
task_info = self.task_info()
return task_info["total_train_steps"]
def task_info(self):
"""All information needed about the underlying task.
# Returns
Dictionary of useful information:
- env_info: used to initialize the environment
- tag: string to use for logging
- env_class: callable of the underlying mini-grid / poisoned doors environment class
- task_class: callable of the corresponding task class
"""
name = self.task_name
output_data = dict()
if name == "PoisonedDoors":
# Specific base parameters
num_doors = 4
combination_length = 10
extra_tag = self.extra_tag()
# Parameters needed for other functions
output_data["env_info"] = {
"num_doors": num_doors,
"combination_length": combination_length,
}
output_data["task_sampler_args"] = {
**output_data["env_info"],
"max_steps": self.exp_params.PD_MAX_STEPS,
}
output_data["tag"] = "PoisonedDoorsN{}{}".format(num_doors, extra_tag,)
output_data["env_class"] = PoisonedDoorsEnvironment
output_data["task_class"] = PoisonedDoorsTask
output_data["task_sampler_class"] = PoisonedDoorsTaskSampler
elif name == "CrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingCorruptExpertS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
corrupt_expert_within_actions_of_goal = 15
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "WallCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
}
}
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "LavaCrossingCorruptExpertS15N7":
# Specific base parameters
grid_size = 15
num_crossings = 7
corrupt_expert_within_actions_of_goal = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "LavaCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
},
"repeat_failed_task_for_min_steps": 1000,
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossing":
# Specific base parameters
grid_size = 15
num_crossings = 7
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpSimpleCrossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossingOnce":
# Specific base parameters
grid_size = 25
num_crossings = 10
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpSimpleCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingOnce":
# Specific base parameters
grid_size = 15
num_crossings = 7
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpLavaCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingSmall":
# Specific base parameters
grid_size = 9
num_crossings = 4
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpLavaCrossingSmall{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
else:
raise NotImplementedError("Haven't implemented {}".format(name))
if name == "PoisonedDoors":
output_data["total_train_steps"] = self.exp_params.PD_TOTAL_TRAIN_STEPS
else:
# MiniGrid total train steps
output_data["total_train_steps"] = self.exp_params.MG_TOTAL_TRAIN_STEPS
output_data["name"] = name
return output_data
def tag(self):
return self.task_info()["tag"]
@abc.abstractmethod
def extra_tag(self):
raise NotImplementedError
def get_sensors(self) -> Sequence[Sensor]:
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
action_space = gym.spaces.Discrete(
len(
task_info["task_class"].class_action_names(
num_doors=task_info["env_info"]["num_doors"]
)
)
)
return [PoisonedDoorCurrentStateSensor()] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
else:
# Sensors for MiniGrid tasks
action_space = gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
)
return [
EgocentricMiniGridSensor(
agent_view_size=self.exp_params.MG_AGENT_VIEW_SIZE,
view_channels=self.exp_params.MG_AGENT_VIEW_CHANNELS,
)
] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
def machine_params(self, mode="train", gpu_id="default", **kwargs):
if mode == "train":
nprocesses = self.exp_params.NUM_TRAIN_SAMPLERS
elif mode == "valid":
nprocesses = 0
elif mode == "test":
nprocesses = min(
self.exp_params.NUM_TEST_TASKS, 500 if torch.cuda.is_available() else 50
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
gpu_ids = [] if self.exp_params.GPU_ID is None else [self.exp_params.GPU_ID]
return MachineParams(nprocesses=nprocesses, devices=gpu_ids)
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
return RNNActorCriticWithEmbed(
input_uuid=sensors[0].uuid,
num_embeddings=4,
embedding_dim=128,
input_len=1,
action_space=gym.spaces.Discrete(
3 + task_info["env_info"]["num_doors"]
),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
else:
# Model for MiniGrid tasks
return MiniGridSimpleConvRNN(
action_space=gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
),
num_objects=cast(EgocentricMiniGridSensor, sensors[0]).num_objects,
num_colors=cast(EgocentricMiniGridSensor, sensors[0]).num_colors,
num_states=cast(EgocentricMiniGridSensor, sensors[0]).num_states,
observation_space=SensorSuite(sensors).observation_spaces,
hidden_size=128,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
def make_sampler_fn(
self, **kwargs
) -> Union[PoisonedDoorsTaskSampler, MiniGridTaskSampler]:
return self.task_info()["task_sampler_class"](**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
info = self.task_info()
if info["name"] == "PoisonedDoors":
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"task_class": info["task_class"],
}
else:
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"cache_graphs": self.exp_params.CACHE_GRAPHS,
"task_class": info["task_class"],
}
if "task_sampler_args" in info:
args_dict.update(info["task_sampler_args"])
if self.exp_params.NUM_TRAIN_TASKS:
args_dict["max_tasks"] = self.exp_params.NUM_TRAIN_TASKS
return args_dict
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
max_tasks = self.exp_params.NUM_TEST_TASKS // total_processes + (
process_ind < (self.exp_params.NUM_TEST_TASKS % total_processes)
)
task_seeds_list = [
2 ** 31
- 1
+ self.exp_params.TEST_SEED_OFFSET
+ process_ind
+ total_processes * i
for i in range(max_tasks)
]
assert min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1
train_sampler_args = self.train_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
if "repeat_failed_task_for_min_steps" in train_sampler_args:
del train_sampler_args["repeat_failed_task_for_min_steps"]
return {
**train_sampler_args,
"task_seeds_list": task_seeds_list,
"max_tasks": max_tasks,
"deterministic_sampling": True,
"sensors": [
s for s in train_sampler_args["sensors"] if "Expert" not in str(type(s))
],
}
def offpolicy_demo_defaults(self, also_using_ppo: bool):
ppo_defaults = self.rl_loss_default("ppo", 1)
assert ppo_defaults["update_repeats"] % 2 == 0
output_data = {}
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
output_data.update(
{
"data_iterator_builder": lambda: create_poisoneddoors_offpolicy_data_iterator(
num_doors=task_info["env_info"]["num_doors"],
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
dataset_size=task_info["total_train_steps"],
),
}
)
else:
# Off-policy defaults for MiniGrid tasks
output_data.update(
{
"data_iterator_builder": lambda: create_minigrid_offpolicy_data_iterator(
path=os.path.join(
MINIGRID_EXPERT_TRAJECTORIES_DIR,
"MiniGrid-{}-v0{}.pkl".format(task_info["name"], "",),
),
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
instr_len=None,
restrict_max_steps_in_dataset=task_info["total_train_steps"],
),
}
)
# Off-policy defaults common to Poisoned Doors and MiniGrid tasks
output_data.update(
{
"ppo_update_repeats": ppo_defaults["update_repeats"] // 2
if also_using_ppo
else 0,
"ppo_num_mini_batch": ppo_defaults["num_mini_batch"]
if also_using_ppo
else 0,
"offpolicy_updates": ppo_defaults["num_mini_batch"]
* (
ppo_defaults["update_repeats"] // 2
if also_using_ppo
else ppo_defaults["update_repeats"]
),
}
)
return output_data
def rl_loss_default(self, alg: str, steps: Optional[int] = None):
if alg == "ppo":
assert steps is not None
return {
"loss": (PPO(clip_decay=LinearDecay(steps), **PPOConfig)),
"num_mini_batch": 2,
"update_repeats": 4,
}
elif alg == "a2c":
return {
"loss": A2C(**A2CConfig),
"num_mini_batch": 1,
"update_repeats": 1,
}
elif alg == "imitation":
return {
"loss": Imitation(),
"num_mini_batch": 2, # if torch.cuda.is_available() else 1,
"update_repeats": 4,
}
else:
raise NotImplementedError
def _training_pipeline(
self,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: Optional[int],
):
# When using many mini-batches or update repeats, decrease the learning
# rate so that the approximate size of the gradient update is similar.
lr = self.exp_params.LR
num_steps = self.exp_params.ROLLOUT_STEPS
metric_accumulate_interval = self.exp_params.METRIC_ACCUMULATE_INTERVAL
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 1.0
max_grad_norm = 0.5
total_train_steps = self.task_info()["total_train_steps"]
if self.exp_params.CKPTS_TO_SAVE == 0:
save_interval = None
else:
save_interval = math.ceil(total_train_steps / self.exp_params.CKPTS_TO_SAVE)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=self.exp_params.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore
),
)
| en | 0.480996 | # Default MiniGrid values # Default Poisoned Doors values # Training params # if torch.cuda.is_available() else 1 # Hyperparameters # Auxiliary head parameters # Logging / saving Base experiment. All information needed about the underlying task. # Returns Dictionary of useful information: - env_info: used to initialize the environment - tag: string to use for logging - env_class: callable of the underlying mini-grid / poisoned doors environment class - task_class: callable of the corresponding task class # Specific base parameters # Parameters needed for other functions # Specific base parameters # Parameters needed for other functions # Specific base parameters # Parameters needed for other functions # # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set # # repeat_failed_task_for_min_steps # output_data["task_sampler_args"] = { # "repeat_failed_task_for_min_steps": 1000 # } # Specific base parameters # Parameters needed for other functions # # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set # # repeat_failed_task_for_min_steps # output_data["task_sampler_args"] = { # "repeat_failed_task_for_min_steps": 1000 # } # Specific base parameters # Parameters needed for other functions # # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set # # repeat_failed_task_for_min_steps # Specific base parameters # Parameters needed for other functions # output_data["task_sampler_args"] = { # "repeat_failed_task_for_min_steps": 1000 # } # Specific base parameters # Parameters needed for other functions # Specific base parameters # Parameters needed for other functions # Specific base parameters # Parameters needed for other functions # MiniGrid total train steps # Sensors for MiniGrid tasks # type: ignore # Model for MiniGrid tasks # type: ignore # Off-policy defaults for MiniGrid tasks # Off-policy defaults common to Poisoned Doors and MiniGrid tasks # if torch.cuda.is_available() else 1, # When using many mini-batches or update repeats, decrease the learning # rate so that the approximate size of the gradient update is similar. # type: ignore | 1.544206 | 2 |
services/service-api/lib/wikipedia.py | elaisasearch/elaisa.org | 2 | 6621021 | """
Handles the Wikipedia data for the user's search value.
"""
import wikipedia
def getWikiEntry(terms: list, language: str) -> dict:
"""
Takes the user's search term and chosen language and returns the wikipedia article information.
:terms: List
:language: String
:return: Dictionary
"""
try:
wikipedia.set_lang(language)
page = wikipedia.page(terms[0])
return {
"url": page.url,
"title": page.title,
"summary": page.summary
}
except:
return {
"url": "",
"title": "",
"summary": ""
}
| """
Handles the Wikipedia data for the user's search value.
"""
import wikipedia
def getWikiEntry(terms: list, language: str) -> dict:
"""
Takes the user's search term and chosen language and returns the wikipedia article information.
:terms: List
:language: String
:return: Dictionary
"""
try:
wikipedia.set_lang(language)
page = wikipedia.page(terms[0])
return {
"url": page.url,
"title": page.title,
"summary": page.summary
}
except:
return {
"url": "",
"title": "",
"summary": ""
}
| en | 0.577977 | Handles the Wikipedia data for the user's search value. Takes the user's search term and chosen language and returns the wikipedia article information. :terms: List :language: String :return: Dictionary | 3.758421 | 4 |
python/qilinguist/worktree.py | vbarbaresi/qibuild | 0 | 6621022 | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import os
from qisys import ui
import qisys.worktree
import qisys.qixml
class LinguistWorkTree(qisys.worktree.WorkTreeObserver):
def __init__(self, worktree):
self.worktree = worktree
self.root = worktree.root
self.linguist_projects = list()
self._load_linguist_projects()
worktree.register(self)
def _load_linguist_projects(self):
self.linguist_projects = list()
for worktree_project in self.worktree.projects:
linguist_project = new_linguist_project(self, worktree_project)
if linguist_project:
self.check_unique_name(linguist_project)
self.linguist_projects.append(linguist_project)
def reload(self):
self._load_linguist_projects()
def get_linguist_project(self, name, raises=False):
for project in self.linguist_projects:
if project.name == name:
return project
if raises:
mess = ui.did_you_mean("No such linguist project: %s" % name,
name, [x.name for x in self.linguist_projects])
raise qisys.worktree.NoSuchProject(name, mess)
else:
return None
def check_unique_name(self, new_project):
project_with_same_name = self.get_linguist_project(new_project.name,
raises=False)
if project_with_same_name:
raise Exception("""\
Found two projects with the same name ({0})
In:
* {1}
* {2}
""".format(new_project.name, project_with_same_name.path, new_project.path))
def new_linguist_project(linguist_worktree, project): # pylint: disable=unused-argument
if not os.path.exists(project.qiproject_xml):
return None
tree = qisys.qixml.read(project.qiproject_xml)
root = tree.getroot()
if root.get("version") != "3":
return None
elem = root.find("qilinguist")
if elem is None:
# try deprecated name too
elem = root.find("translate")
if elem is None:
return None
name = elem.get("name")
if not name:
raise BadProjectConfig(project.qiproject_xml,
"Expecting a 'name' attribute")
domain = elem.get("domain")
if not domain:
domain = name
linguas = elem.get("linguas").split()
if not linguas:
linguas = ["en_US"]
tr_framework = elem.get("tr")
if not tr_framework:
raise BadProjectConfig(project.qiproject_xml,
"Expecting a 'tr' attribute")
if tr_framework not in ["linguist", "gettext"]:
mess = """ \
Unknow translation framework: {}.
Choose between 'linguist' or 'gettext'
"""
raise BadProjectConfig(mess.format(tr_framework))
if tr_framework == "linguist":
from qilinguist.qtlinguist import QtLinguistProject
new_project = QtLinguistProject(name, project.path, domain=domain,
linguas=linguas)
else:
from qilinguist.qigettext import GettextProject
new_project = GettextProject(name, project.path, domain=domain,
linguas=linguas)
return new_project
class BadProjectConfig(Exception):
def __str__(self):
return """
Incorrect configuration detected for project in {0}
{1}
""".format(*self.args)
| # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import os
from qisys import ui
import qisys.worktree
import qisys.qixml
class LinguistWorkTree(qisys.worktree.WorkTreeObserver):
def __init__(self, worktree):
self.worktree = worktree
self.root = worktree.root
self.linguist_projects = list()
self._load_linguist_projects()
worktree.register(self)
def _load_linguist_projects(self):
self.linguist_projects = list()
for worktree_project in self.worktree.projects:
linguist_project = new_linguist_project(self, worktree_project)
if linguist_project:
self.check_unique_name(linguist_project)
self.linguist_projects.append(linguist_project)
def reload(self):
self._load_linguist_projects()
def get_linguist_project(self, name, raises=False):
for project in self.linguist_projects:
if project.name == name:
return project
if raises:
mess = ui.did_you_mean("No such linguist project: %s" % name,
name, [x.name for x in self.linguist_projects])
raise qisys.worktree.NoSuchProject(name, mess)
else:
return None
def check_unique_name(self, new_project):
project_with_same_name = self.get_linguist_project(new_project.name,
raises=False)
if project_with_same_name:
raise Exception("""\
Found two projects with the same name ({0})
In:
* {1}
* {2}
""".format(new_project.name, project_with_same_name.path, new_project.path))
def new_linguist_project(linguist_worktree, project): # pylint: disable=unused-argument
if not os.path.exists(project.qiproject_xml):
return None
tree = qisys.qixml.read(project.qiproject_xml)
root = tree.getroot()
if root.get("version") != "3":
return None
elem = root.find("qilinguist")
if elem is None:
# try deprecated name too
elem = root.find("translate")
if elem is None:
return None
name = elem.get("name")
if not name:
raise BadProjectConfig(project.qiproject_xml,
"Expecting a 'name' attribute")
domain = elem.get("domain")
if not domain:
domain = name
linguas = elem.get("linguas").split()
if not linguas:
linguas = ["en_US"]
tr_framework = elem.get("tr")
if not tr_framework:
raise BadProjectConfig(project.qiproject_xml,
"Expecting a 'tr' attribute")
if tr_framework not in ["linguist", "gettext"]:
mess = """ \
Unknow translation framework: {}.
Choose between 'linguist' or 'gettext'
"""
raise BadProjectConfig(mess.format(tr_framework))
if tr_framework == "linguist":
from qilinguist.qtlinguist import QtLinguistProject
new_project = QtLinguistProject(name, project.path, domain=domain,
linguas=linguas)
else:
from qilinguist.qigettext import GettextProject
new_project = GettextProject(name, project.path, domain=domain,
linguas=linguas)
return new_project
class BadProjectConfig(Exception):
def __str__(self):
return """
Incorrect configuration detected for project in {0}
{1}
""".format(*self.args)
| en | 0.714176 | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the COPYING file. \ Found two projects with the same name ({0}) In: * {1} * {2} # pylint: disable=unused-argument # try deprecated name too \ Unknow translation framework: {}. Choose between 'linguist' or 'gettext' Incorrect configuration detected for project in {0} {1} | 2.26642 | 2 |
fiery/models/__init__.py | sty61010/fiery | 0 | 6621023 | from importlib import import_module
from fiery.layers.bev_self_attention import BEVSelfAttention
from fiery.models.encoder import ImageAttention
from fiery.models.head_wrappers.Anchor3DHeadWrapper import Anchor3DHeadWrapper
from fiery.models.head_wrappers.CenterHeadWrapper import CenterHeadWrapper
def import_obj(cfg):
if cfg is None or (cfg.get('type', None) is None and cfg.get('NAME') is None):
return None
classname = cfg.get('type', None) or cfg.get('NAME', None)
if classname not in globals():
globals()[classname] = getattr(import_module(classname[:classname.rfind('.')]), classname.split('.')[-1])
classname = globals()[classname]
return classname
def build_obj(cfg):
classname = import_obj(cfg)
if classname is None:
return None
cfg = cfg.convert_to_dict()
cfg.pop('type', None)
cfg.pop('NAME', None)
obj = classname(**cfg)
return obj
__all__ = [
'build_obj',
'Anchor3DHeadWrapper',
'BottleneckModule',
'BEVSelfAttention',
'CenterHeadWrapper',
'ImageAttention',
]
| from importlib import import_module
from fiery.layers.bev_self_attention import BEVSelfAttention
from fiery.models.encoder import ImageAttention
from fiery.models.head_wrappers.Anchor3DHeadWrapper import Anchor3DHeadWrapper
from fiery.models.head_wrappers.CenterHeadWrapper import CenterHeadWrapper
def import_obj(cfg):
if cfg is None or (cfg.get('type', None) is None and cfg.get('NAME') is None):
return None
classname = cfg.get('type', None) or cfg.get('NAME', None)
if classname not in globals():
globals()[classname] = getattr(import_module(classname[:classname.rfind('.')]), classname.split('.')[-1])
classname = globals()[classname]
return classname
def build_obj(cfg):
classname = import_obj(cfg)
if classname is None:
return None
cfg = cfg.convert_to_dict()
cfg.pop('type', None)
cfg.pop('NAME', None)
obj = classname(**cfg)
return obj
__all__ = [
'build_obj',
'Anchor3DHeadWrapper',
'BottleneckModule',
'BEVSelfAttention',
'CenterHeadWrapper',
'ImageAttention',
]
| none | 1 | 2.158286 | 2 | |
DaPy/methods/evaluator.py | huihui7987/DaPy | 552 | 6621024 | from DaPy.core import Matrix, SeriesSet, Series
from DaPy.core import LogInfo, LogWarn, LogErr, is_seq
from DaPy.matlib import zeros, mean
from math import sqrt
def ConfuMat(Y, y_, labels):
'''calculate confution Matrix'''
labels = sorted(set(Y) | set(y_))
confu = zeros((len(labels) + 1, len(labels) + 1))
temp = SeriesSet({'Y': Y, 'y': y_})
for i, l1 in enumerate(labels):
subtemp = temp.select(lambda row: row[0] == l1)
for j, l2 in enumerate(labels):
confu[i, j] = len(subtemp.select(lambda row: row[1] == l2))
confu[i, -1] = sum(confu[i])
for j in range(len(labels) + 1):
confu[-1, j] = sum(confu[:, j].tolist()[0])
return confu
def Accuracy(confumat):
upper = sum([confumat[i][i] for i in range(confumat.shape[1] - 1)])
return round(upper / float(confumat[-1][-1]), 4)
def Kappa(confumat):
as_ = confumat[:, -1].tolist()[:-1]
bs_ = confumat[-1][:-1]
Po = Accuracy(confumat) /100
upper = sum([a * b for a, b in zip(as_, bs_)])
Pe = float(upper) / confumat[-1][-1] ** 2
return (Po - Pe) / (1 - Pe)
def Auc(target, predict, n_bins=100):
pos_len = sum(target)
neg_len = len(target) - pos_len
total = pos_len * neg_len
pos_histogram = [0] * n_bins
neg_histogram = [0] * n_bins
bin_width = 1.0 / n_bins
for tar, pre in zip(target, predict):
nth_bin = int(pre / bin_width)
if tar == 1:
pos_histogram[nth_bin] += 1
else:
neg_histogram[nth_bin] += 1
accumulate_neg, satisfied_pair = 0, 0
for pos_his, neg_his in zip(pos_histogram, neg_histogram):
satisfied_pair += (pos_his * accumulate_neg + pos_his * neg_his*0.5)
accumulate_neg += neg_his
return satisfied_pair / float(total)
def Performance(predictor, data, target, mode='reg'):
assert mode in ('clf', 'reg'), "`mode` must be `clf` or `reg` only."
assert len(data) == len(target),"the number of target data is not equal to variable data"
if mode == 'clf':
result = predictor.predict(data)
if hasattr(result, 'shape') is False:
result = SeriesSet(result)
if hasattr(target, 'shape') is False:
target = SeriesSet(target)
assert target.shape[1] == 1, 'testify target must be a sequence'
target = target[target.columns[0]]
if hasattr(predictor, 'labels'):
labels = predictor.labels
else:
labels = sorted(set(result) | set(target))
confuMat = ConfuMat(target, result, labels)
LogInfo('Classification Accuracy: %.4f' % Accuracy(confuMat))
LogInfo('Classification Kappa: %.4f' % Kappa(confuMat))
if confuMat.shape[1] == 3:
proba = predictor.predict_proba(data)
if proba.shape[1] == 2:
proba = proba[:, 0]
target = Series(1 if _ == labels[0] else 0 for _ in target)
LogInfo('Classification AUC: %.4f' % Auc(target, proba))
return confuMat
elif mode == 'reg':
target = Series(target)
predict = Series(predictor.predict(data).T.tolist()[0])
mean_abs_err = Score.MAE(target, predict)
mean_sqrt_err = Score.MSE(target, predict)
R2 = Score.R2_score(target, predict)
mean_abs_percent_erro = Score.MAPE(target, predict)
LogInfo('Regression MAE: %.4f' % mean_abs_err)
LogInfo('Regression MSE: %.4f' % mean_sqrt_err)
LogInfo('Regression MAPE: %.4f' % mean_abs_percent_erro)
LogInfo(u'Regression R²: %.4f' % R2)
class Score(object):
'''performace score to evalulate a regressor'''
@staticmethod
def error(target, predict):
if predict.shape[1] != 1:
predict = predict.T
assert predict.shape[0] == target.shape[0]
return target - predict
@staticmethod
def MAE(target, predict):
return mean(abs(Score.error(target, predict)))
@staticmethod
def MSE(target, predict):
return mean(Score.error(target, predict) ** 2)
@staticmethod
def R2_score(target, predict):
SSE = sum(Score.error(target, predict) ** 2)
SST = sum((target - mean(target)) ** 2)
return 1 - SSE / SST
@staticmethod
def MAPE(target, predict):
return mean(abs(Score.error(target, predict) / target))
| from DaPy.core import Matrix, SeriesSet, Series
from DaPy.core import LogInfo, LogWarn, LogErr, is_seq
from DaPy.matlib import zeros, mean
from math import sqrt
def ConfuMat(Y, y_, labels):
'''calculate confution Matrix'''
labels = sorted(set(Y) | set(y_))
confu = zeros((len(labels) + 1, len(labels) + 1))
temp = SeriesSet({'Y': Y, 'y': y_})
for i, l1 in enumerate(labels):
subtemp = temp.select(lambda row: row[0] == l1)
for j, l2 in enumerate(labels):
confu[i, j] = len(subtemp.select(lambda row: row[1] == l2))
confu[i, -1] = sum(confu[i])
for j in range(len(labels) + 1):
confu[-1, j] = sum(confu[:, j].tolist()[0])
return confu
def Accuracy(confumat):
upper = sum([confumat[i][i] for i in range(confumat.shape[1] - 1)])
return round(upper / float(confumat[-1][-1]), 4)
def Kappa(confumat):
as_ = confumat[:, -1].tolist()[:-1]
bs_ = confumat[-1][:-1]
Po = Accuracy(confumat) /100
upper = sum([a * b for a, b in zip(as_, bs_)])
Pe = float(upper) / confumat[-1][-1] ** 2
return (Po - Pe) / (1 - Pe)
def Auc(target, predict, n_bins=100):
pos_len = sum(target)
neg_len = len(target) - pos_len
total = pos_len * neg_len
pos_histogram = [0] * n_bins
neg_histogram = [0] * n_bins
bin_width = 1.0 / n_bins
for tar, pre in zip(target, predict):
nth_bin = int(pre / bin_width)
if tar == 1:
pos_histogram[nth_bin] += 1
else:
neg_histogram[nth_bin] += 1
accumulate_neg, satisfied_pair = 0, 0
for pos_his, neg_his in zip(pos_histogram, neg_histogram):
satisfied_pair += (pos_his * accumulate_neg + pos_his * neg_his*0.5)
accumulate_neg += neg_his
return satisfied_pair / float(total)
def Performance(predictor, data, target, mode='reg'):
assert mode in ('clf', 'reg'), "`mode` must be `clf` or `reg` only."
assert len(data) == len(target),"the number of target data is not equal to variable data"
if mode == 'clf':
result = predictor.predict(data)
if hasattr(result, 'shape') is False:
result = SeriesSet(result)
if hasattr(target, 'shape') is False:
target = SeriesSet(target)
assert target.shape[1] == 1, 'testify target must be a sequence'
target = target[target.columns[0]]
if hasattr(predictor, 'labels'):
labels = predictor.labels
else:
labels = sorted(set(result) | set(target))
confuMat = ConfuMat(target, result, labels)
LogInfo('Classification Accuracy: %.4f' % Accuracy(confuMat))
LogInfo('Classification Kappa: %.4f' % Kappa(confuMat))
if confuMat.shape[1] == 3:
proba = predictor.predict_proba(data)
if proba.shape[1] == 2:
proba = proba[:, 0]
target = Series(1 if _ == labels[0] else 0 for _ in target)
LogInfo('Classification AUC: %.4f' % Auc(target, proba))
return confuMat
elif mode == 'reg':
target = Series(target)
predict = Series(predictor.predict(data).T.tolist()[0])
mean_abs_err = Score.MAE(target, predict)
mean_sqrt_err = Score.MSE(target, predict)
R2 = Score.R2_score(target, predict)
mean_abs_percent_erro = Score.MAPE(target, predict)
LogInfo('Regression MAE: %.4f' % mean_abs_err)
LogInfo('Regression MSE: %.4f' % mean_sqrt_err)
LogInfo('Regression MAPE: %.4f' % mean_abs_percent_erro)
LogInfo(u'Regression R²: %.4f' % R2)
class Score(object):
'''performace score to evalulate a regressor'''
@staticmethod
def error(target, predict):
if predict.shape[1] != 1:
predict = predict.T
assert predict.shape[0] == target.shape[0]
return target - predict
@staticmethod
def MAE(target, predict):
return mean(abs(Score.error(target, predict)))
@staticmethod
def MSE(target, predict):
return mean(Score.error(target, predict) ** 2)
@staticmethod
def R2_score(target, predict):
SSE = sum(Score.error(target, predict) ** 2)
SST = sum((target - mean(target)) ** 2)
return 1 - SSE / SST
@staticmethod
def MAPE(target, predict):
return mean(abs(Score.error(target, predict) / target))
| en | 0.606136 | calculate confution Matrix performace score to evalulate a regressor | 2.224506 | 2 |
planner.py | haeinous/a-relational-db | 0 | 6621025 | #!/usr/bin/python3
"""
Implement a Scan node that yields a single record each time its next method is called, as well as
a Selection node initialized with a predicate function (one which returns true or false), which yields
the next record for which the predicate function returns true whenever its own next method is called.
"""
| #!/usr/bin/python3
"""
Implement a Scan node that yields a single record each time its next method is called, as well as
a Selection node initialized with a predicate function (one which returns true or false), which yields
the next record for which the predicate function returns true whenever its own next method is called.
"""
| en | 0.935807 | #!/usr/bin/python3 Implement a Scan node that yields a single record each time its next method is called, as well as a Selection node initialized with a predicate function (one which returns true or false), which yields the next record for which the predicate function returns true whenever its own next method is called. | 3.218206 | 3 |
factories/go_ethereum_osx.py | vaporyproject/ethereum-buildbot | 35 | 6621026 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import factory
reload(factory)
from factory import *
def osx_go_factory(branch='develop', isPullRequest=False):
factory = BuildFactory()
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl='https://github.com/ethereum/go-ethereum.git',
branch=branch,
mode='full',
method='copy',
codebase='go-ethereum',
retry=(5, 3)
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="update-version",
command='gsed -ne "s/^\([0-9]*\.[0-9]*\.[0-9]*\).*/\\1/p" VERSION',
property="version"
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="make-clean",
description="cleaning up",
descriptionDone="clean up",
command=["make", "clean"]
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="make-all",
description="installing",
descriptionDone="install",
command=["make", "all"]
),
ShellCommand(
haltOnFailure=True,
name="go-test",
description="go testing",
descriptionDone="go test",
command=["make", "test"],
maxTime=900
)
]: factory.addStep(step)
if not isPullRequest:
for step in [
Trigger(
name='brew-el-capitan',
schedulerNames=["go-ethereum-%s-el-capitan" % branch],
waitForFinish=False,
set_properties={
"version": Interpolate("%(prop:version)s")
}
)
]: factory.addStep(step)
return factory
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import factory
reload(factory)
from factory import *
def osx_go_factory(branch='develop', isPullRequest=False):
factory = BuildFactory()
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl='https://github.com/ethereum/go-ethereum.git',
branch=branch,
mode='full',
method='copy',
codebase='go-ethereum',
retry=(5, 3)
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="update-version",
command='gsed -ne "s/^\([0-9]*\.[0-9]*\.[0-9]*\).*/\\1/p" VERSION',
property="version"
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="make-clean",
description="cleaning up",
descriptionDone="clean up",
command=["make", "clean"]
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="make-all",
description="installing",
descriptionDone="install",
command=["make", "all"]
),
ShellCommand(
haltOnFailure=True,
name="go-test",
description="go testing",
descriptionDone="go test",
command=["make", "test"],
maxTime=900
)
]: factory.addStep(step)
if not isPullRequest:
for step in [
Trigger(
name='brew-el-capitan',
schedulerNames=["go-ethereum-%s-el-capitan" % branch],
waitForFinish=False,
set_properties={
"version": Interpolate("%(prop:version)s")
}
)
]: factory.addStep(step)
return factory
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.004995 | 2 |
mars_scrape.py | Megaexoplanet/Web_scraping_challenge | 0 | 6621027 | <gh_stars>0
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import re
import time
def scrape():
browser = Browser('chrome',executable_path='chromedriver', headless=True)
news_title,news_p = mars_news(browser)
mars_data = {
'mars_title': news_title,
'mars_p': news_p,
'featured_img': featured_image(browser),
'mars_weather': mars_weather(browser),
'hemispheres': hemi(browser),
'mars_facts': mars_facts()
}
return mars_data
def mars_news(browser):
# # Visit the NASA Mars News Site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
browser.is_element_present_by_css('ul.item_list li.slide', wait_time=1)
html = browser.html
news_soup = bs(html,'html.parser')
slide_elem = news_soup.select_one('ul.item_list li.slide')
try:
news_title=slide_elem.find('div', class_='content_title').get_text()
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# JPL Space Images Featured Image
def featured_image(browser):
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
full_image_elem = browser.find_by_id('full_image')
full_image_elem.click()
browser.is_element_not_present_by_text('more info', wait_time=1)
more_info_elem = browser.links.find_by_partial_text('more info')
more_info_elem.click()
html=browser.html
img_soup = bs(html,"html.parser")
try:
img_url_rel = img_soup.select("figure.lede a img")[0].get('src')
img_url = f"https://www.jpl.nasa.gov{img_url_rel}"
except AttributeError:
return None
return img_url
# Mars Weather
def mars_weather(browser):
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(5)
html = browser.html
weather_soup = bs(html,'html.parser')
mars_weather_tweet = weather_soup.find('div', attrs={'class': 'tweet', 'data-name':'Mars Weather'})
try:
mars_weather = mars_weather_tweet.find('p','tweet-text').get_text()
except AttributeError:
pattern = re.compile(r'sol')
mars_weather = weather_soup.find('span', text=pattern).text
return mars_weather
# Mars Hemispheres
def hemi(browser):
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
links = browser.find_by_css('a.product-item.itemLink h3')
len(links)
for i in range(len(links)):
hemisphere = {}
browser.find_by_css('a.product-item h3')[i].click()
sample_elem = browser.links.find_by_text('Sample').first
hemisphere['img_url'] = sample_elem['href']
hemisphere['title'] = browser.find_by_css('h2.title').text
hemisphere_image_urls.append(hemisphere)
browser.back()
browser.quit()
return hemisphere_image_urls
# Mars Facts
def mars_facts():
df = pd.read_html('https://space-facts.com/mars/')[0]
df.columns=['description','value']
df.set_index('description')
return df.to_html()
| from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import re
import time
def scrape():
browser = Browser('chrome',executable_path='chromedriver', headless=True)
news_title,news_p = mars_news(browser)
mars_data = {
'mars_title': news_title,
'mars_p': news_p,
'featured_img': featured_image(browser),
'mars_weather': mars_weather(browser),
'hemispheres': hemi(browser),
'mars_facts': mars_facts()
}
return mars_data
def mars_news(browser):
# # Visit the NASA Mars News Site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
browser.is_element_present_by_css('ul.item_list li.slide', wait_time=1)
html = browser.html
news_soup = bs(html,'html.parser')
slide_elem = news_soup.select_one('ul.item_list li.slide')
try:
news_title=slide_elem.find('div', class_='content_title').get_text()
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# JPL Space Images Featured Image
def featured_image(browser):
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
full_image_elem = browser.find_by_id('full_image')
full_image_elem.click()
browser.is_element_not_present_by_text('more info', wait_time=1)
more_info_elem = browser.links.find_by_partial_text('more info')
more_info_elem.click()
html=browser.html
img_soup = bs(html,"html.parser")
try:
img_url_rel = img_soup.select("figure.lede a img")[0].get('src')
img_url = f"https://www.jpl.nasa.gov{img_url_rel}"
except AttributeError:
return None
return img_url
# Mars Weather
def mars_weather(browser):
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(5)
html = browser.html
weather_soup = bs(html,'html.parser')
mars_weather_tweet = weather_soup.find('div', attrs={'class': 'tweet', 'data-name':'Mars Weather'})
try:
mars_weather = mars_weather_tweet.find('p','tweet-text').get_text()
except AttributeError:
pattern = re.compile(r'sol')
mars_weather = weather_soup.find('span', text=pattern).text
return mars_weather
# Mars Hemispheres
def hemi(browser):
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
links = browser.find_by_css('a.product-item.itemLink h3')
len(links)
for i in range(len(links)):
hemisphere = {}
browser.find_by_css('a.product-item h3')[i].click()
sample_elem = browser.links.find_by_text('Sample').first
hemisphere['img_url'] = sample_elem['href']
hemisphere['title'] = browser.find_by_css('h2.title').text
hemisphere_image_urls.append(hemisphere)
browser.back()
browser.quit()
return hemisphere_image_urls
# Mars Facts
def mars_facts():
df = pd.read_html('https://space-facts.com/mars/')[0]
df.columns=['description','value']
df.set_index('description')
return df.to_html() | en | 0.329858 | # # Visit the NASA Mars News Site # JPL Space Images Featured Image # Mars Weather # Mars Hemispheres # Mars Facts | 3.042222 | 3 |
scrapybot/urlf/__init__.py | roadt/scrapybot | 0 | 6621028 |
import re
from functools import partial
def scheme(url, **kwargs):
schemes = 'scheme' in kwargs and kwargs['scheme'].split(',') or ['http','https']
return any(map(lambda scheme: url.startswith(scheme+'://'), schemes))
def regex(url, **kwargs):
exprs = 'regex' in kwargs and kwargs['regex'].split(',') or ['.*']
def check(url, expr):
return bool(re.search(expr, url))
return any(map(partial(check, url), exprs)) |
import re
from functools import partial
def scheme(url, **kwargs):
schemes = 'scheme' in kwargs and kwargs['scheme'].split(',') or ['http','https']
return any(map(lambda scheme: url.startswith(scheme+'://'), schemes))
def regex(url, **kwargs):
exprs = 'regex' in kwargs and kwargs['regex'].split(',') or ['.*']
def check(url, expr):
return bool(re.search(expr, url))
return any(map(partial(check, url), exprs)) | none | 1 | 2.953453 | 3 | |
src/domainClient/api/demographics_api.py | diabolical-ninja/smart-property-search | 5 | 6621029 | <filename>src/domainClient/api/demographics_api.py<gh_stars>1-10
# coding: utf-8
"""
Domain Group API V1
Provides public access to Domain's microservices # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from domainClient.api_client import ApiClient
class DemographicsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def demographics_get(self, level, id, **kwargs): # noqa: E501
"""Retrieves demographic information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.demographics_get(level, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required)
:param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required)
:param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork`
:param str year: Year of the source data. Valid values are: `2011`, `2016`
:return: DomainDemographicsServiceV1ModelDemographicsResultsModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.demographics_get_with_http_info(level, id, **kwargs) # noqa: E501
else:
(data) = self.demographics_get_with_http_info(level, id, **kwargs) # noqa: E501
return data
def demographics_get_with_http_info(self, level, id, **kwargs): # noqa: E501
"""Retrieves demographic information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.demographics_get_with_http_info(level, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required)
:param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required)
:param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork`
:param str year: Year of the source data. Valid values are: `2011`, `2016`
:return: DomainDemographicsServiceV1ModelDemographicsResultsModel
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['level', 'id', 'types', 'year'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method demographics_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'level' is set
if ('level' not in params or
params['level'] is None):
raise ValueError("Missing the required parameter `level` when calling `demographics_get`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `demographics_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'level' in params:
query_params.append(('level', params['level'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'types' in params:
query_params.append(('types', params['types'])) # noqa: E501
if 'year' in params:
query_params.append(('year', params['year'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/demographics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DomainDemographicsServiceV1ModelDemographicsResultsModel', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| <filename>src/domainClient/api/demographics_api.py<gh_stars>1-10
# coding: utf-8
"""
Domain Group API V1
Provides public access to Domain's microservices # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from domainClient.api_client import ApiClient
class DemographicsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def demographics_get(self, level, id, **kwargs): # noqa: E501
"""Retrieves demographic information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.demographics_get(level, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required)
:param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required)
:param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork`
:param str year: Year of the source data. Valid values are: `2011`, `2016`
:return: DomainDemographicsServiceV1ModelDemographicsResultsModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.demographics_get_with_http_info(level, id, **kwargs) # noqa: E501
else:
(data) = self.demographics_get_with_http_info(level, id, **kwargs) # noqa: E501
return data
def demographics_get_with_http_info(self, level, id, **kwargs): # noqa: E501
"""Retrieves demographic information # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.demographics_get_with_http_info(level, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required)
:param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required)
:param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork`
:param str year: Year of the source data. Valid values are: `2011`, `2016`
:return: DomainDemographicsServiceV1ModelDemographicsResultsModel
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['level', 'id', 'types', 'year'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method demographics_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'level' is set
if ('level' not in params or
params['level'] is None):
raise ValueError("Missing the required parameter `level` when calling `demographics_get`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `demographics_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'level' in params:
query_params.append(('level', params['level'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'types' in params:
query_params.append(('types', params['types'])) # noqa: E501
if 'year' in params:
query_params.append(('year', params['year'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/demographics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DomainDemographicsServiceV1ModelDemographicsResultsModel', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| en | 0.588243 | # coding: utf-8 Domain Group API V1 Provides public access to Domain's microservices # noqa: E501 OpenAPI spec version: v1 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 # python 2 and python 3 compatibility library NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen # noqa: E501 Retrieves demographic information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.demographics_get(level, id, async_req=True) >>> result = thread.get() :param async_req bool :param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required) :param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required) :param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork` :param str year: Year of the source data. Valid values are: `2011`, `2016` :return: DomainDemographicsServiceV1ModelDemographicsResultsModel If the method is called asynchronously, returns the request thread. # noqa: E501 # noqa: E501 # noqa: E501 Retrieves demographic information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.demographics_get_with_http_info(level, id, async_req=True) >>> result = thread.get() :param async_req bool :param str level: Geographic level. Valid values are: `Postcode`, `Suburb` (required) :param int id: Location identifier. If the geographic level is Suburb this is a Suburb ID value, if the geographic level is postcode this is a Postcode ID value. See the `/addressLocators` endpoint. (required) :param str types: Comma separated list of demographic data requested. If not provided, all data will be returned. Valid values are: `AgeGroupOfPopulation`, `CountryOfBirth`, `NatureOfOccupancy`, `GeographicalPopulation`, `DwellingStructure`, `HousingLoanRepayment`, `MaritalStatus`, `Religion`, `Occupation`, `EducationAttendance`, `TransportToWork` :param str year: Year of the source data. Valid values are: `2011`, `2016` :return: DomainDemographicsServiceV1ModelDemographicsResultsModel If the method is called asynchronously, returns the request thread. # noqa: E501 # verify the required parameter 'level' is set # noqa: E501 # verify the required parameter 'id' is set # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # HTTP header `Accept` # noqa: E501 # Authentication setting # noqa: E501 # noqa: E501 | 2.533288 | 3 |
main/models.py | r4bc1/monaco_2018_racing | 0 | 6621030 | from peewee import *
db = SqliteDatabase("report.db")
class Racer(Model):
position = IntegerField()
name = CharField()
team = CharField()
time = CharField()
driver_id = CharField()
class Meta:
database = db
| from peewee import *
db = SqliteDatabase("report.db")
class Racer(Model):
position = IntegerField()
name = CharField()
team = CharField()
time = CharField()
driver_id = CharField()
class Meta:
database = db
| none | 1 | 2.443696 | 2 | |
Estrutura_Decisao/pay_your_taxes.py | M3nin0/supreme-broccoli | 0 | 6621031 | <reponame>M3nin0/supreme-broccoli
valor = int(input("Insira o valor da hora: "))
hora = int(input("Insira a quntidade de horas trabalhadas: "))
sb = valor * hora
if sb <= 900:
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 900 and sb <= 1500:
ir = sb * (5 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 1500 and sb <= 2500:
ir = sb * (10 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 2500:
ir = sb * (20 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
| valor = int(input("Insira o valor da hora: "))
hora = int(input("Insira a quntidade de horas trabalhadas: "))
sb = valor * hora
if sb <= 900:
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 900 and sb <= 1500:
ir = sb * (5 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 1500 and sb <= 2500:
ir = sb * (10 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc)
elif sb > 2500:
ir = sb * (20 / 100)
inss = sb * (10 / 100)
fgts = sb * (11 / 100)
desc = inss + fgts + ir
print ("O valor total dos descontos é:",desc,"\nE seu salario liquido é:",sb - desc) | none | 1 | 3.882248 | 4 | |
pkg_tf_micromouse/scripts/obstacle.py | SuyashVerma2311/micromouse_maze_solver | 0 | 6621032 | <filename>pkg_tf_micromouse/scripts/obstacle.py
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf import transformations
import numpy as np
class Obstacle(object):
def __init__(self, delta_err=3):
self.sub = rospy.Subscriber('/my_mm_robot/laser/scan', LaserScan, self.clbk_laser)
self.sub_odom = rospy.Subscriber('/odom', Odometry, self.clbk_odom)
self.dist2wall = None
self.lcr = None # left-center-right
self.position = None
self.yaw = None
def clbk_laser(self, msg):
# print("size: ", len(msg.ranges))
self.dist2wall = [
round(100*min(min(msg.ranges[0:71]), 100)),
round(100*min(min(msg.ranges[72:143]), 100)),
round(100*min(min(msg.ranges[144:215]), 100)),
round(100*min(min(msg.ranges[216:287]), 100)),
round(100*min(min(msg.ranges[288:359]), 100)),
]
self.lcr = [self.dist2wall[4], self.dist2wall[2], self.dist2wall[0]]
def clbk_odom(self, msg):
# position
self.position = msg.pose.pose.position
# yaw
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = transformations.euler_from_quaternion(quaternion)
self.yaw = (euler[2]* 180.0) / np.pi
def get_state(self):
return {"pos": self.position, "yaw": self.yaw }
def display(self):
print("d2w: ", self.dist2wall)
print("lcr: ", self.lcr)
print("pos: ", self.position)
print("yaw: ", self.yaw) | <filename>pkg_tf_micromouse/scripts/obstacle.py
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf import transformations
import numpy as np
class Obstacle(object):
def __init__(self, delta_err=3):
self.sub = rospy.Subscriber('/my_mm_robot/laser/scan', LaserScan, self.clbk_laser)
self.sub_odom = rospy.Subscriber('/odom', Odometry, self.clbk_odom)
self.dist2wall = None
self.lcr = None # left-center-right
self.position = None
self.yaw = None
def clbk_laser(self, msg):
# print("size: ", len(msg.ranges))
self.dist2wall = [
round(100*min(min(msg.ranges[0:71]), 100)),
round(100*min(min(msg.ranges[72:143]), 100)),
round(100*min(min(msg.ranges[144:215]), 100)),
round(100*min(min(msg.ranges[216:287]), 100)),
round(100*min(min(msg.ranges[288:359]), 100)),
]
self.lcr = [self.dist2wall[4], self.dist2wall[2], self.dist2wall[0]]
def clbk_odom(self, msg):
# position
self.position = msg.pose.pose.position
# yaw
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = transformations.euler_from_quaternion(quaternion)
self.yaw = (euler[2]* 180.0) / np.pi
def get_state(self):
return {"pos": self.position, "yaw": self.yaw }
def display(self):
print("d2w: ", self.dist2wall)
print("lcr: ", self.lcr)
print("pos: ", self.position)
print("yaw: ", self.yaw) | en | 0.393115 | # left-center-right # print("size: ", len(msg.ranges)) # position # yaw | 2.240153 | 2 |
scripts/email-anonymizer.py | bokysan/docker-postfix | 271 | 6621033 | <filename>scripts/email-anonymizer.py<gh_stars>100-1000
#!/usr/bin/env python3
"""
Filter to anonyimize email addresses. It reads input line by line,
finds all emails in the input and masks them using given filter.
Big thanks to [<NAME>](https://github.com/sdelrio)
for the concept and the idea, although not a lot of the code went
into this commit in the end.
"""
import re
import logging
import typing
import json
import sys
import importlib
logger = logging.getLogger(__name__)
# BIG FAT NOTICE on emails and regular expressions:
# If you're planning on using a regular expression to validate an email: don't. Emails
# are much more complext than you would imagine and most regular expressions will not
# cover all usecases. Newer RFCs even allow for international (read: UTF-8) email addresses.
# Most of your favourite programming languages will have a dedicated library for validating
# addresses.
#
# This pattern below, should, however match (hopefully) anything that looks like an email
# It is too broad, though, as it will match things which are not considered valid email
# addresses as well. But for our use case, that's OK and more than sufficient.
EMAIL_CATCH_ALL_PATTERN = '([^ "\\[\\]<>]+|".+")@(\[([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+|[A-Za-z0-9]+:.+)\]|([^ \\{}():;]+(\.[^ \\{}():;]+)*))'
EMAIL_CATCH_ALL = re.compile(EMAIL_CATCH_ALL_PATTERN)
EMPTY_RESPONSE = json.dumps({})
# Postfix formats message IDs like this. Let's not mask them
# 20211207101128.0805BA272@31bfa77a2cab
MESSAGE_ID_PATTERN = '[0-9]+\.[0-9A-F]+@[0-9a-f]+'
MESSAGE_ID = re.compile(MESSAGE_ID_PATTERN)
"""A default filter, if none other is provided."""
DEFAULT_FILTER_CLASS: str = 'SmartFilter'
"""Map filter names to friendly names"""
FILTER_MAPPINGS = {
'default': DEFAULT_FILTER_CLASS,
'smart': 'SmartFilter',
'paranoid': 'ParanoidFilter',
'noop': 'NoopFilter',
}
# ---------------------------------------- #
class Filter():
def init(self, args: list[str]) -> None:
pass
def processMessage(self, msg: str) -> str:
pass
"""
This filter does nothing.
"""
class NoopFilter(Filter):
def processMessage(self, msg: str) -> str:
return EMPTY_RESPONSE
"""
This filter will take an educated guess at how to best mask the emails, specifically:
* It will leave the first and the last letter of the local part (if it's oly one letter, it will get repated)
* If the local part is in quotes, it will remove the quotes (Warning: if the email starts with a space, this might look weird in logs)
* It will replace all the letters inbetween with **ONE** asterisk
* It will replace everything but a TLD with a star
* Address-style domains will see the number replaced with stars
E.g.:
* `<EMAIL>` -> `d*o@*******.org`
* `<EMAIL>` -> `j*e@*******.solutions`
* `sa@localhost` -> `s*a@*********`
* `s@[192.168.8.10]` -> `s*s@[*]`
* `"multi....dot"@[IPv6:2001:db8:85a3:8d3:1319:8a2e:370:7348]` -> `m*t@[IPv6:*]`
"""
class SmartFilter(Filter):
mask_symbol: str = '*'
def mask_local(self, local: str) -> str:
if local[0] == '"' and local[-1] == '"':
return local[:2] + self.mask_symbol + local[-2:]
else:
return local[0] + self.mask_symbol + local[-1]
def mask_domain(self, domain: str) -> str:
if domain[0] == '[' and domain[-1] == ']': # Numerical domain
if ':' in domain[1:-1]:
left, right = domain.split(":", 1)
return left + ':' + (len(right)-1) * self.mask_symbol + ']'
else:
return '[*.*.*.*]'
elif '.' in domain: # Normal domain
s, tld = domain.rsplit('.', 1)
return len(s) * self.mask_symbol + '.' + tld
pass
else: # Local domain
return len(domain) * self.mask_symbol
def replace(self, match: re.match) -> str:
email = match.group()
# Return the details unchanged if they look like Postfix message ID
if bool(MESSAGE_ID.match(email)):
return email
# The "@" can show up in the local part, but shouldn't appear in the
# domain part (at least not that we know).
local, domain = email.rsplit("@", 1)
local = self.mask_local(local)
domain = self.mask_domain(domain)
return local + '@' + domain
def processMessage(self, msg: str) -> typing.Optional[str]:
result = EMAIL_CATCH_ALL.sub(
lambda x: self.replace(x), msg
)
return json.dumps({'msg': result}, ensure_ascii=False) if result != msg else EMPTY_RESPONSE
class ParanoidFilter(SmartFilter):
def mask_local(self, local: str) -> str:
return self.mask_symbol
def mask_domain(self, domain: str) -> str:
if domain[0] == '[' and domain[-1] == ']': # Numerical domain
if ':' in domain[1:-1]:
left, right = domain.split(":", 1)
return left + ':*]'
else:
return '[*]'
elif '.' in domain: # Normal domain
s, tld = domain.rsplit('.', 1)
return self.mask_symbol + '.' + tld
pass
else: # Local domain
return self.mask_symbol
# ---------------------------------------- #
def get_filter() -> Filter:
"""
Initialize the filter
This method will check your configuration and create a new filter
:return: Returns a specific implementation of the `Filter`
"""
opts: list[str] = []
clazz: typing.Optional[str] = None
if len(sys.argv) > 1:
clazz = sys.argv[1].strip()
opts = sys.argv[2:]
if clazz.lower() in FILTER_MAPPINGS:
clazz = FILTER_MAPPINGS[clazz.lower()]
if clazz is None or clazz.strip() == '':
clazz = DEFAULT_FILTER_CLASS
logger.debug(f"Constructing new {clazz} filter.")
try:
if "." in clazz:
module_name, class_name = clazz.rsplit(".", 1)
filter_class = getattr(importlib.import_module(module_name), class_name)
filter_obj: Filter = filter_class()
else:
filter_class = getattr(sys.modules[__name__], clazz)
filter_obj: Filter = filter_class()
except Exception as e:
raise RuntimeError(f'Could not instatiate filter named "{clazz}"!') from e
try:
filter_obj.init(opts)
except Exception as e:
raise RuntimeError(f'Init of filter "{clazz}" with parameters {opts} failed!') from e
return filter_obj
def process(f: Filter) -> None:
while True:
message = sys.stdin.readline()
if message:
message = message[:-1] # Remove line feed
result = f.processMessage(message)
print(result)
sys.stdout.flush()
else:
# Empty line. stdin has been closed
break
process(get_filter()) | <filename>scripts/email-anonymizer.py<gh_stars>100-1000
#!/usr/bin/env python3
"""
Filter to anonyimize email addresses. It reads input line by line,
finds all emails in the input and masks them using given filter.
Big thanks to [<NAME>](https://github.com/sdelrio)
for the concept and the idea, although not a lot of the code went
into this commit in the end.
"""
import re
import logging
import typing
import json
import sys
import importlib
logger = logging.getLogger(__name__)
# BIG FAT NOTICE on emails and regular expressions:
# If you're planning on using a regular expression to validate an email: don't. Emails
# are much more complext than you would imagine and most regular expressions will not
# cover all usecases. Newer RFCs even allow for international (read: UTF-8) email addresses.
# Most of your favourite programming languages will have a dedicated library for validating
# addresses.
#
# This pattern below, should, however match (hopefully) anything that looks like an email
# It is too broad, though, as it will match things which are not considered valid email
# addresses as well. But for our use case, that's OK and more than sufficient.
EMAIL_CATCH_ALL_PATTERN = '([^ "\\[\\]<>]+|".+")@(\[([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+|[A-Za-z0-9]+:.+)\]|([^ \\{}():;]+(\.[^ \\{}():;]+)*))'
EMAIL_CATCH_ALL = re.compile(EMAIL_CATCH_ALL_PATTERN)
EMPTY_RESPONSE = json.dumps({})
# Postfix formats message IDs like this. Let's not mask them
# 20211207101128.0805BA272@31bfa77a2cab
MESSAGE_ID_PATTERN = '[0-9]+\.[0-9A-F]+@[0-9a-f]+'
MESSAGE_ID = re.compile(MESSAGE_ID_PATTERN)
"""A default filter, if none other is provided."""
DEFAULT_FILTER_CLASS: str = 'SmartFilter'
"""Map filter names to friendly names"""
FILTER_MAPPINGS = {
'default': DEFAULT_FILTER_CLASS,
'smart': 'SmartFilter',
'paranoid': 'ParanoidFilter',
'noop': 'NoopFilter',
}
# ---------------------------------------- #
class Filter():
def init(self, args: list[str]) -> None:
pass
def processMessage(self, msg: str) -> str:
pass
"""
This filter does nothing.
"""
class NoopFilter(Filter):
def processMessage(self, msg: str) -> str:
return EMPTY_RESPONSE
"""
This filter will take an educated guess at how to best mask the emails, specifically:
* It will leave the first and the last letter of the local part (if it's oly one letter, it will get repated)
* If the local part is in quotes, it will remove the quotes (Warning: if the email starts with a space, this might look weird in logs)
* It will replace all the letters inbetween with **ONE** asterisk
* It will replace everything but a TLD with a star
* Address-style domains will see the number replaced with stars
E.g.:
* `<EMAIL>` -> `d*o@*******.org`
* `<EMAIL>` -> `j*e@*******.solutions`
* `sa@localhost` -> `s*a@*********`
* `s@[192.168.8.10]` -> `s*s@[*]`
* `"multi....dot"@[IPv6:2001:db8:85a3:8d3:1319:8a2e:370:7348]` -> `m*t@[IPv6:*]`
"""
class SmartFilter(Filter):
mask_symbol: str = '*'
def mask_local(self, local: str) -> str:
if local[0] == '"' and local[-1] == '"':
return local[:2] + self.mask_symbol + local[-2:]
else:
return local[0] + self.mask_symbol + local[-1]
def mask_domain(self, domain: str) -> str:
if domain[0] == '[' and domain[-1] == ']': # Numerical domain
if ':' in domain[1:-1]:
left, right = domain.split(":", 1)
return left + ':' + (len(right)-1) * self.mask_symbol + ']'
else:
return '[*.*.*.*]'
elif '.' in domain: # Normal domain
s, tld = domain.rsplit('.', 1)
return len(s) * self.mask_symbol + '.' + tld
pass
else: # Local domain
return len(domain) * self.mask_symbol
def replace(self, match: re.match) -> str:
email = match.group()
# Return the details unchanged if they look like Postfix message ID
if bool(MESSAGE_ID.match(email)):
return email
# The "@" can show up in the local part, but shouldn't appear in the
# domain part (at least not that we know).
local, domain = email.rsplit("@", 1)
local = self.mask_local(local)
domain = self.mask_domain(domain)
return local + '@' + domain
def processMessage(self, msg: str) -> typing.Optional[str]:
result = EMAIL_CATCH_ALL.sub(
lambda x: self.replace(x), msg
)
return json.dumps({'msg': result}, ensure_ascii=False) if result != msg else EMPTY_RESPONSE
class ParanoidFilter(SmartFilter):
def mask_local(self, local: str) -> str:
return self.mask_symbol
def mask_domain(self, domain: str) -> str:
if domain[0] == '[' and domain[-1] == ']': # Numerical domain
if ':' in domain[1:-1]:
left, right = domain.split(":", 1)
return left + ':*]'
else:
return '[*]'
elif '.' in domain: # Normal domain
s, tld = domain.rsplit('.', 1)
return self.mask_symbol + '.' + tld
pass
else: # Local domain
return self.mask_symbol
# ---------------------------------------- #
def get_filter() -> Filter:
"""
Initialize the filter
This method will check your configuration and create a new filter
:return: Returns a specific implementation of the `Filter`
"""
opts: list[str] = []
clazz: typing.Optional[str] = None
if len(sys.argv) > 1:
clazz = sys.argv[1].strip()
opts = sys.argv[2:]
if clazz.lower() in FILTER_MAPPINGS:
clazz = FILTER_MAPPINGS[clazz.lower()]
if clazz is None or clazz.strip() == '':
clazz = DEFAULT_FILTER_CLASS
logger.debug(f"Constructing new {clazz} filter.")
try:
if "." in clazz:
module_name, class_name = clazz.rsplit(".", 1)
filter_class = getattr(importlib.import_module(module_name), class_name)
filter_obj: Filter = filter_class()
else:
filter_class = getattr(sys.modules[__name__], clazz)
filter_obj: Filter = filter_class()
except Exception as e:
raise RuntimeError(f'Could not instatiate filter named "{clazz}"!') from e
try:
filter_obj.init(opts)
except Exception as e:
raise RuntimeError(f'Init of filter "{clazz}" with parameters {opts} failed!') from e
return filter_obj
def process(f: Filter) -> None:
while True:
message = sys.stdin.readline()
if message:
message = message[:-1] # Remove line feed
result = f.processMessage(message)
print(result)
sys.stdout.flush()
else:
# Empty line. stdin has been closed
break
process(get_filter()) | en | 0.847476 | #!/usr/bin/env python3 Filter to anonyimize email addresses. It reads input line by line, finds all emails in the input and masks them using given filter. Big thanks to [<NAME>](https://github.com/sdelrio) for the concept and the idea, although not a lot of the code went into this commit in the end. # BIG FAT NOTICE on emails and regular expressions: # If you're planning on using a regular expression to validate an email: don't. Emails # are much more complext than you would imagine and most regular expressions will not # cover all usecases. Newer RFCs even allow for international (read: UTF-8) email addresses. # Most of your favourite programming languages will have a dedicated library for validating # addresses. # # This pattern below, should, however match (hopefully) anything that looks like an email # It is too broad, though, as it will match things which are not considered valid email # addresses as well. But for our use case, that's OK and more than sufficient. # Postfix formats message IDs like this. Let's not mask them # 20211207101128.0805BA272@31bfa77a2cab A default filter, if none other is provided. Map filter names to friendly names # ---------------------------------------- # This filter does nothing. This filter will take an educated guess at how to best mask the emails, specifically: * It will leave the first and the last letter of the local part (if it's oly one letter, it will get repated) * If the local part is in quotes, it will remove the quotes (Warning: if the email starts with a space, this might look weird in logs) * It will replace all the letters inbetween with **ONE** asterisk * It will replace everything but a TLD with a star * Address-style domains will see the number replaced with stars E.g.: * `<EMAIL>` -> `d*o@*******.org` * `<EMAIL>` -> `j*e@*******.solutions` * `sa@localhost` -> `s*a@*********` * `s@[192.168.8.10]` -> `s*s@[*]` * `"multi....dot"@[IPv6:2001:db8:85a3:8d3:1319:8a2e:370:7348]` -> `m*t@[IPv6:*]` # Numerical domain # Normal domain # Local domain # Return the details unchanged if they look like Postfix message ID # The "@" can show up in the local part, but shouldn't appear in the # domain part (at least not that we know). # Numerical domain # Normal domain # Local domain # ---------------------------------------- # Initialize the filter This method will check your configuration and create a new filter :return: Returns a specific implementation of the `Filter` # Remove line feed # Empty line. stdin has been closed | 3.38834 | 3 |
rascil/__init__.py | SKA-ScienceDataProcessor/rascil | 7 | 6621034 |
from . import data_models
from . import processing_components
from . import workflows
from .processing_components.util.installation_checks import check_data_directory
from astropy.utils import iers, data
check_data_directory()
# iers.conf.auto_max_age = None
iers.conf.remote_timeout = 100.0
data.conf.download_cache_lock_attempts = 10
|
from . import data_models
from . import processing_components
from . import workflows
from .processing_components.util.installation_checks import check_data_directory
from astropy.utils import iers, data
check_data_directory()
# iers.conf.auto_max_age = None
iers.conf.remote_timeout = 100.0
data.conf.download_cache_lock_attempts = 10
| it | 0.542374 | # iers.conf.auto_max_age = None | 1.239912 | 1 |
schema/queries.py | benyakirten/a-weeks-worth-backend | 0 | 6621035 | <filename>schema/queries.py
import graphene
from graphene_django import DjangoListField
from graphql_jwt.decorators import login_required, superuser_required
from aww.models import Individual, Group, Recipe
from .types import (
RecipeStepType,
RecipeIngredientType,
RecipeType,
GroupShoppingItemType,
GroupMealType,
GroupType,
GroupsType,
IndividualShoppingItemType,
IndividualMealType,
IndividualType,
LimitedIndividualType
)
class Query(graphene.ObjectType):
recipes = DjangoListField(RecipeType)
groups = DjangoListField(GroupsType)
recipe = graphene.Field(RecipeType, id=graphene.ID(
required=False), name=graphene.String(required=False))
def resolve_recipe(root, info, id="", name=""):
if not id and not name:
raise Exception("ID or name must be provided")
if id and name:
raise Exception("Both ID and name cannot be provided")
try:
if id:
return Recipe.objects.get(id=id)
if name:
return Recipe.objects.get(name=name)
except:
raise Exception("No recipe found by that id or name")
recipe_urls = graphene.List(graphene.String)
def resolve_recipe_urls(root, info):
_recipes = Recipe.objects.all()
return [recipe.url for recipe in _recipes]
individual = graphene.Field(IndividualType, id=graphene.ID(
required=False), email=graphene.String(required=False))
# This same function is better done by the MeQuery.
# That said, this is a function for a superuser to look for an individual
@superuser_required
def resolve_individual(root, info, id="", email=""):
if not id and not email:
raise Exception("ID or email must be provided")
if id and email:
raise Exception("Both ID and name cannot be provided")
try:
if id:
_user = Individual.objects.get(id=id)
if email:
_individuals = Individual.objects.all()
[_user] = [individualUser for individualUser in _individuals if individualUser.user.email == email]
return _user
except:
raise Exception("No individual found by that id or email")
all_individuals = graphene.List(LimitedIndividualType)
@superuser_required
def resolve_all_individuals(root, info):
return Individual.objects.all()
group = graphene.Field(GroupType, id=graphene.ID(
required=False), name=graphene.String(required=False))
# No idea why you'd ever need this query, given the MeQuery
# But might as well get some experience
@login_required
def resolve_group(root, info, id="", name=""):
if not id and not name:
raise Exception("Id or name must be provided")
try:
if id:
_group = Group.objects.get(id=id)
if name:
_group = Group.objects.get(name=name)
except:
raise Exception("No group found by that id or name")
if info.context.user in [member.user for member in _group.members.all()]:
return _group
else:
raise Exception(
"Single group may only be queried by its members")
my_groups = graphene.Field(graphene.List(GroupType))
@login_required
def resolve_my_groups(root, info):
return info.context.user.individual.groups.all() | <filename>schema/queries.py
import graphene
from graphene_django import DjangoListField
from graphql_jwt.decorators import login_required, superuser_required
from aww.models import Individual, Group, Recipe
from .types import (
RecipeStepType,
RecipeIngredientType,
RecipeType,
GroupShoppingItemType,
GroupMealType,
GroupType,
GroupsType,
IndividualShoppingItemType,
IndividualMealType,
IndividualType,
LimitedIndividualType
)
class Query(graphene.ObjectType):
recipes = DjangoListField(RecipeType)
groups = DjangoListField(GroupsType)
recipe = graphene.Field(RecipeType, id=graphene.ID(
required=False), name=graphene.String(required=False))
def resolve_recipe(root, info, id="", name=""):
if not id and not name:
raise Exception("ID or name must be provided")
if id and name:
raise Exception("Both ID and name cannot be provided")
try:
if id:
return Recipe.objects.get(id=id)
if name:
return Recipe.objects.get(name=name)
except:
raise Exception("No recipe found by that id or name")
recipe_urls = graphene.List(graphene.String)
def resolve_recipe_urls(root, info):
_recipes = Recipe.objects.all()
return [recipe.url for recipe in _recipes]
individual = graphene.Field(IndividualType, id=graphene.ID(
required=False), email=graphene.String(required=False))
# This same function is better done by the MeQuery.
# That said, this is a function for a superuser to look for an individual
@superuser_required
def resolve_individual(root, info, id="", email=""):
if not id and not email:
raise Exception("ID or email must be provided")
if id and email:
raise Exception("Both ID and name cannot be provided")
try:
if id:
_user = Individual.objects.get(id=id)
if email:
_individuals = Individual.objects.all()
[_user] = [individualUser for individualUser in _individuals if individualUser.user.email == email]
return _user
except:
raise Exception("No individual found by that id or email")
all_individuals = graphene.List(LimitedIndividualType)
@superuser_required
def resolve_all_individuals(root, info):
return Individual.objects.all()
group = graphene.Field(GroupType, id=graphene.ID(
required=False), name=graphene.String(required=False))
# No idea why you'd ever need this query, given the MeQuery
# But might as well get some experience
@login_required
def resolve_group(root, info, id="", name=""):
if not id and not name:
raise Exception("Id or name must be provided")
try:
if id:
_group = Group.objects.get(id=id)
if name:
_group = Group.objects.get(name=name)
except:
raise Exception("No group found by that id or name")
if info.context.user in [member.user for member in _group.members.all()]:
return _group
else:
raise Exception(
"Single group may only be queried by its members")
my_groups = graphene.Field(graphene.List(GroupType))
@login_required
def resolve_my_groups(root, info):
return info.context.user.individual.groups.all() | en | 0.962366 | # This same function is better done by the MeQuery. # That said, this is a function for a superuser to look for an individual # No idea why you'd ever need this query, given the MeQuery # But might as well get some experience | 2.588155 | 3 |
vision/api/label/snippets_test.py | baditaflorin/python-docs-samples | 1 | 6621036 | <reponame>baditaflorin/python-docs-samples
#!/usr/bin/env python
# Copyright 2016 Google, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import snippets
def test_crop_hint_response_count(capsys, resource):
snippets.crop_hint(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
assert len(result['responses']) == 1
def test_crop_hint_response_dim(capsys, resource):
snippets.crop_hint(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
crop_hint = result['responses'][0]
crop_hint_annotation = crop_hint['cropHintsAnnotation']['cropHints'][0]
confidence = crop_hint_annotation['confidence']
assert 0.5 < confidence < 0.9
def test_web_annotations(capsys, resource):
snippets.web_annotation(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
web_annotation = result['responses'][0]['webAnnotation']
web_entities = web_annotation['webEntities']
assert len(web_entities) == 10
russian_blue = False
for entity in web_entities:
entity_id = entity['entityId']
desc = entity['description']
if entity_id == '/m/012cc2' and desc == 'Russian Blue':
russian_blue = True
assert russian_blue is True
| #!/usr/bin/env python
# Copyright 2016 Google, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import snippets
def test_crop_hint_response_count(capsys, resource):
snippets.crop_hint(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
assert len(result['responses']) == 1
def test_crop_hint_response_dim(capsys, resource):
snippets.crop_hint(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
crop_hint = result['responses'][0]
crop_hint_annotation = crop_hint['cropHintsAnnotation']['cropHints'][0]
confidence = crop_hint_annotation['confidence']
assert 0.5 < confidence < 0.9
def test_web_annotations(capsys, resource):
snippets.web_annotation(resource('cat.jpg'))
stdout, _ = capsys.readouterr()
result = json.loads(stdout)
web_annotation = result['responses'][0]['webAnnotation']
web_entities = web_annotation['webEntities']
assert len(web_entities) == 10
russian_blue = False
for entity in web_entities:
entity_id = entity['entityId']
desc = entity['description']
if entity_id == '/m/012cc2' and desc == 'Russian Blue':
russian_blue = True
assert russian_blue is True | en | 0.830997 | #!/usr/bin/env python # Copyright 2016 Google, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.281902 | 2 |
gen_sql_handler.py | freewillfx-azenqos/azm_db_merge | 5 | 6621037 | <gh_stars>1-10
'''
module to handle merging (importing) of (azqdata.db from
azq .azm files) sqlite3 dump lines into a PostgreSQL and Microsoft SQL Server db.
Copyright: Copyright (C) 2016 Freewill FX Co., Ltd. All rights reserved.
'''
from debug_helpers import dprint
import azm_db_constants
from subprocess import call
import os
import sys
import traceback
import time
import datetime
from dateutil.relativedelta import relativedelta
import random
import glob
import pandas as pd
import numpy as np
import pyarrow as pa
from pyarrow import csv
import pyarrow.parquet as pq
PARQUET_COMPRESSION = 'snappy'
WKB_POINT_LAT_LON_BYTES_LEN = 25
# global vars
g_is_postgre = False
g_is_ms = False
g_prev_create_statement_column_names = None
g_prev_create_statement_table_name = None
g_bulk_insert_mode = True
g_unmerge_logs_row = None # would be set in --unmerge mode
g_cursor = None
g_conn = None
g_exec_buf = []
"""
now we already use 'autocommit = True' as recommended by MSDN doc
so set g_always_commit to False
old: sometimes imports
work fine without cursor.commit() but after a --unmerge task, imports dont work
anymore until we do commit() after each execute for all tables
"""
# TODO: set/use as global - from args from azm_db_merge - where .db is extracted from azm
g_dir_processing_azm = None
pa_type_replace_dict = {
"text": pa.string(),
"bigint unique": pa.int64(),
"bigint": pa.int64(),
"biginteger": pa.int64(),
"int": pa.int32(),
"integer": pa.int32(),
"short": pa.int16(),
"double": pa.float64(),
"real": pa.float64(),
"float": pa.float64(),
"geometry": pa.binary(),
# because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218'
"timestamp": pa.string(),
"datetime": pa.string(),
}
KNOWN_COL_TYPES_LOWER_TO_PD_PARQUET_TYPE_DICT = {
"timestamp": datetime,
"time": datetime,
"date": datetime,
"datetime": datetime,
"text": str,
"geometry": str,
"double": np.float64,
"real": np.float64,
"float": np.float64,
"biginteger": np.float64, # EXCEPT special allowed cols like 'log_hash'
"bigint": np.float64, # EXCEPT special allowed cols like 'log_hash' that will never be null - they will be np.int64 - but for generic numbers can be null so pd df needs it as float64
"integer": np.float64, # for generic numbers can be null so pd df needs it as float64
"int": np.float64, # for generic numbers can be null so pd df needs it as float64
}
### below are functions required/used by azq_db_merge
def connect(args):
global g_bulk_insert_mode
global g_dir_processing_azm
global g_cursor, g_conn
global g_exec_buf
global g_is_ms, g_is_postgre
if (args['target_db_type'] == 'postgresql'):
print("PostgreSQL mode initializing...")
g_is_postgre = True
import psycopg2
elif (args['target_db_type'] == 'mssql'):
g_is_ms = True
import pyodbc
# cleanup old stuff just in case
close(args)
g_bulk_insert_mode = True # always bulk insert mode now
g_dir_processing_azm = args['dir_processing_azm']
if g_is_ms:
print("Connecting... Target DBMS type: mssql")
dprint("connect args: {} {} {} {}".format(args['server_url'],
args['server_user'],
args['server_password'],
args['server_database']
)
)
driver = args['mssql_odbc_driver']
connect_str = 'DRIVER={};SERVER={};DATABASE={};UID={};PWD={}'.format(
driver,
args['server_url'],
args['server_database'],
args['server_user'],
args['server_password'])
#unsafe as users might see in logs print "using connect_str: "+connect_str
"""
https://msdn.microsoft.com/en-us/library/ms131281.aspx
ODBC applications should not use Transact-SQL transaction statements such as
BEGIN TRANSACTION, COMMIT TRANSACTION, or ROLLBACK TRANSACTION because this
can cause indeterminate behavior in the driver. An ODBC application should
run in autocommit mode and not use any transaction management functions or
statements, or run in manual-commit mode and use the ODBC SQLEndTran
function to either commit or roll back transactions.
https://mkleehammer.github.io/pyodbc/api.html >> 'autocommit' in our case set to false and buffer all atomic cmds into g_exec_buf for run once before commit
"""
g_conn = pyodbc.connect(connect_str, autocommit = False)
elif g_is_postgre:
print("Connecting... Target DBMS type: PostgreSQL")
# example: conn = psycopg2.connect("dbname=azqdb user=azqdb")
connect_str = "dbname={} user={} password={} port={}".format(
args['server_database'],
args['server_user'],
args['server_password'],
args['pg_port']
)
print(connect_str)
if args['pg_host'] != None:
connect_str = "host="+args['pg_host']+" "+connect_str
#unsafe as users might see in logs print "using connect_str: "+connect_str
args['connect_str'] = connect_str
g_conn = psycopg2.connect(connect_str)
if (g_conn is None):
print("psycopg2.connect returned None")
return False
print("connected")
g_cursor = g_conn.cursor()
# post connect steps for each dbms
if g_is_postgre and not args['unmerge']:
try_cre_postgis(schema="public") # create postgis at public schema first
if args["pg_schema"] != "public":
print("pg mode create pg_schema:", args["pg_schema"])
try:
with g_conn as c:
g_cursor.execute("create schema if not exists "+args["pg_schema"])
c.commit()
print("success: create schema "+args["pg_schema"]+ " success")
except Exception as e:
estr = str(e)
if 'already exists' in estr:
dprint("schema already exists")
pass
else:
print(("FATAL: CREATE schema failed:"+args["pg_schema"]))
raise e
# create postgis in public only - print "pg using schema start"
# try_cre_postgis(schema=args["pg_schema"]) # inside new schema
if g_is_ms:
pass
''' somehow not working - let qgis detect itself for now...
try:
# set 'f_table_name' to unique so we can blindly insert table_name:geom (on create handlers) to it without checking (let mssql check)
ret = g_cursor.execute("""
CREATE TABLE [dbo].[geometry_columns](
[f_table_catalog] [varchar](50) NULL,
[f_table_schema] [varchar](50) NULL,
[f_table_name] [varchar](100) NULL UNIQUE,
[f_geometry_column] [varchar](50) NULL,
[coord_dimension] [int] NULL,
[srid] [int] NULL,
[geometry_type] [varchar](50) NULL
)
""")
print "created qgis table: geometry_columns"
except Exception as e:
pass
try:
# below execute would raise an exception if it is already created
ret = g_cursor.execute("""
CREATE TABLE spatial_ref_sys (srid INTEGER NOT NULL PRIMARY KEY,auth_name VARCHAR(256) NOT NULL,auth_srid INTEGER NOT NULL,ref_sys_name VARCHAR(256),proj4text VARCHAR(2048) NOT NULL);
""")
print "created qgis table: spatial_ref_sys"
# if control reaches here means the table didn't exist (table was just created and is empty) so insert wgs84 into it...
ret = g_cursor.execute("""
INSERT INTO "spatial_ref_sys" VALUES(4326,'epsg',4326,'WGS 84','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs');
""")
print "added wgs84 to qgis table: spatial_ref_sys"
except Exception as e:
pass
'''
return True
def try_cre_postgis(schema="public"):
global g_conn
global g_cursor
try:
with g_conn as c:
sql = "CREATE EXTENSION if not exists postgis SCHEMA {}".format(schema)
print("try: CREATE EXTENSION postgis on schema:", schema, "sql:", sql)
g_cursor.execute(sql)
c.commit()
print("success: CREATE EXTENSION postgis")
except Exception as e:
estr = str(e)
if 'already exists' in estr:
print("postgis already exists")
pass
else:
print("FATAL: CREATE EXTENSION postgis - failed - please make sure postgis is correctly installed.")
raise e
def check_if_already_merged(args, log_hash):
global g_unmerge_logs_row
global g_cursor
global g_exec_buf
global g_is_ms, g_is_postgre
if args["pg_schema"] != "public":
g_cursor.execute("SET search_path = '{}','public';".format(args["pg_schema"]))
try:
print("checking if this log_hash has already been imported/merged: "+log_hash)
sqlstr = "select \"log_hash\" from \"logs\" where \"log_hash\" = ?"
if g_is_postgre:
sqlstr = sqlstr.replace("?","%s")
print(("check log cmd: "+sqlstr))
row = None
# use with for auto rollback() on g_conn on exception - otherwise we cant use the cursor again - would fail as: current transaction is aborted, commands ignored until end of transaction block
with g_conn:
g_cursor.execute(sqlstr, [log_hash])
row = g_cursor.fetchone()
print(("after cmd check if exists row:", row))
if (row is None):
# azm never imported
if (args['unmerge']):
# unmerge mode - this azm is not in target db
raise Exception("ABORT: This azm is already not present in target db's logs' table")
else:
print("This log hasn't been imported into target db yet - ok to proceed")
pass
return True
else:
# azm already imported
if (args['unmerge']):
#dprint("um0: row: "+str(row))
if g_is_postgre or g_is_ms:
#dprint("upg 0")
# row is a tuple - make it a dict
#dprint("upg 01")
# now we only need 'log_hash' to unmerge and the used odbc cant parse geom too - cols = get_remote_columns(args,'logs')
cols = [['log_hash', 'bigint']]
#dprint("upg 1: cols: "+str(cols))
drow = {}
i = 0
for col in cols:
print(row[i])
drow[col[0]] = row[i]
i = i+1
row = drow
#dprint("um1")
print("### unmerge mode - delete start for azm: log_hash {}".format(row['log_hash']))
g_unmerge_logs_row = row
sqlstr = "delete from \"logs\" where \"log_hash\" = '{}'".format(log_hash)
g_exec_buf.append(sqlstr)
print("delete from logs table added to g_exec_buf: ", sqlstr)
else:
raise Exception("ABORT: This log ({}) has already been imported/exists in target db (use --unmerge to remove first if you want to re-import).".format(log_hash))
except Exception as e:
estr = str(e)
if ("Invalid object name 'logs'" in estr or '42S02' in estr
or 'relation "logs" does not exist' in estr):
print("looks like this is the first-time log import - no table named logs exists yet - ok...")
if args['unmerge']:
raise Exception("--unmerge mode called on an empty database: no related 'logs' table exist yet")
# first time import - no table named logs exists yet
else:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("re-raise exception e - ",exstr)
raise e
return False
def close(args):
global g_cursor, g_conn
global g_exec_buf
global g_prev_create_statement_column_names
global g_prev_create_statement_table_name
global g_bulk_insert_mode
global g_unmerge_logs_row
print("mssql_handler close() - cleanup()")
g_prev_create_statement_column_names = None
g_prev_create_statement_table_name = None
g_bulk_insert_mode = True
g_unmerge_logs_row = None
del g_exec_buf[:]
if g_cursor is not None:
try:
g_cursor.close()
g_cursor = None
except Exception as e:
print("warning: mssql cursor close failed: "+str(e))
if g_conn is not None:
try:
g_conn.close()
g_conn = None
except Exception as e:
print("warning: mssql conn close failed: "+str(e))
return True
def commit(args, line):
global g_cursor, g_conn
global g_prev_create_statement_table_name
global g_exec_buf
g_prev_create_statement_table_name = None
n = len(g_exec_buf)
# make sure all create/alters are committed
g_conn.commit()
print("### total cmds to execute for operation: "+str(n))
i = 0
for buf in g_exec_buf:
if isinstance(buf, tuple):
# for COPY from stdin
buf, dump_fp = buf
with open(dump_fp, "rb") as dump_fp_fo:
g_cursor.copy_expert(buf, dump_fp_fo)
else:
try:
if args['dump_parquet']:
#print("dump_parquet mode exec buf:", buf)
skip = True
if 'delete from "logs" where' in buf:
skip = False
if skip:
print("dump_parquet mode SKIP exec buf:", buf)
continue
with g_conn: # needed otherwise cursor would become invalid and unmerge would fail for no table cases handled below
g_cursor.execute(buf)
except Exception as e:
if "does not exist" in str(e) and args['unmerge']:
print("WARNING: unmerge exception: {} - but ok for --umnerge mode if exec delete and face - does not exist exception...".format(e))
else:
raise e
print("# done execute cmd {}/{}: {}".format(i, n, buf))
i = i + 1
print("### all cmds exec success - COMMIT now...")
g_conn.commit()
print("### COMMIT success...")
# do mc cp all parquet files to object store...
if args['dump_parquet']:
bucket_name = ""
if "AZM_BUCKET_NAME_OVERRIDE" in os.environ and os.environ["AZM_BUCKET_NAME_OVERRIDE"]:
bucket_name = os.environ["AZM_BUCKET_NAME_OVERRIDE"]
else:
subdomain = os.environ['WEB_DOMAIN_NAME'].split(".")[0]
bucket_name = "azm-"+subdomain
bucket_ym_folder_name = args['log_hash_ym_str'].replace("_", "-")
if args['unmerge']:
if False:
# object listing would cost too much cpu and class a operations so skip this for parquet mode
rmcmd = "mc find minio_logs/{}/{}/ --name '*_{}.parquet'".format(
bucket_name,
bucket_ym_folder_name,
args['log_hash']
)
rmcmd += " --exec 'mc rm {}'"
print("mc rmcmd:", rmcmd)
rmcmdret = os.system(rmcmd)
if rmcmdret != 0:
raise Exception("Remove files from object store failed cmcmdret: {}".format(rmcmdret))
try:
with g_conn:
update_sql = "update uploaded_logs set non_azm_object_size_bytes = null where log_hash = {};".format(args['log_hash'])
print("update_sql:", update_sql)
g_cursor.execute(update_sql)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: update uploaded_logs set non_azm_object_size_bytes to null failed exception:", exstr)
else:
cpcmd = "mc cp {}/*.parquet minio_logs/{}/{}/".format(
g_dir_processing_azm,
bucket_name,
bucket_ym_folder_name,
)
print("mc cpcmd:", cpcmd)
cpcmdret = os.system(cpcmd)
if cpcmdret != 0:
raise Exception("Copy files to object store failed cmcmdret: {}".format(cpcmdret))
try:
combined_pq_size = 0
for fp in glob.glob('{}/*.parquet'.format(g_dir_processing_azm)):
fp_sz = os.path.getsize(fp)
combined_pq_size += fp_sz
with g_conn:
update_sql = "update uploaded_logs set non_azm_object_size_bytes = {} where log_hash = {};".format(combined_pq_size, args['log_hash'])
print("update_sql:", update_sql)
g_cursor.execute(update_sql)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: update uploaded_logs set non_azm_object_size_bytes to parquets size failed exception:", exstr)
return True
def find_and_conv_spatialite_blob_to_wkb(csv_line):
#print "fac csv_line:", csv_line
spat_blob_offset = csv_line.find('0001E6100000')
if spat_blob_offset == -1:
return csv_line
part = csv_line[spat_blob_offset:spat_blob_offset+120+1]
#print "part[120]:", part[120]
#dprint("csv_line spatialite_geom_part: "+part)
spatialite_geom_contents = ""
if (g_is_postgre and (part[120] == ',' or part[120] == '\n')) or (g_is_ms and part[120] == '\t'):
spatialite_geom_contents = part[0:120]
else:
dprint("check of spatialite_geom_part - failed - abort")
return csv_line
#dprint("spatialite_geom_contents: len "+str(len(spatialite_geom_contents))+" val: "+spatialite_geom_contents)
# convert spatialite geometry blob to wkb
"""
Spatialite BLOB Format (Point)
------------------------------
http://www.gaia-gis.it/gaia-sins/BLOB-Geometry.html
example:
0001E6100000DD30C0F46C2A594041432013008E2B40DD30C0F46C2A594041432013008E2B407C01000000DD30C0F46C2A594041432013008E2B40FE
parse:
spatialite header: 00 (str_off 0 str_len 2)
endian: 01 little endian (str_off 2 str_len 2) (spec: if this GEOMETRY is BIG_ENDIAN ordered must contain a 0x00 byte value otherwise, if this GEOMETRY is LITTLE_ENDIAN ordered must contain a 0x01 byte value)
SRID: E6 10 00 00 (str_off 4 str_len 8)
MBR_MIN_X: DD 30 C0 F4 6C 2A 59 40 (str_off 12 str_len 16)
MBR_MIN_Y: 41 43 20 13 00 8E 2B 40 (str_off 28 str_len 16)
MBR_MAX_X: DD 30 C0 F4 6C 2A 59 40 (str_off 42 str_len 16)
MBR_MAX_Y: 41 43 20 13 00 8E 2B 40 (str_off 58 str_len 16)
MBR_END: 7C (str_off 76 str_len 2)
CLASS_TYPE: 01 00 00 00 (str_off 78 str_len 8)
POINT:
X: DD 30 C0 F4 6C 2A 59 40 (str_off 86 str_len 16)
Y: 41 43 20 13 00 8E 2B 40 (str_off 102 str_len 16)
END: FE (str_off 118 str_len 2)
---
WKB Format
----------
See "3.3.2.6 Description of WKBGeometry Representations"
in https://portal.opengeospatial.org/files/?artifact_id=829
Point {
double x;
double y;
};
WKBPoint {
byte byteOrder;
uint32 wkbType; //class_type
Point point;
}
Therefore, for "Point" we need from spatialite blob parts:
endian, CLASS_TYPE, POINT
"""
# spatialite blob point size is 60 bytes = 120 chars in hex - as in above example and starts with 00
if len(spatialite_geom_contents) == 120 and spatialite_geom_contents[0] == '0' and spatialite_geom_contents[1] == '0':
endian = spatialite_geom_contents[2:4] # 2 + len 2
class_type = "<unset>"
if g_is_postgre:
"""
old code: class_type = spatialite_geom_contents[78:86] # 78 + 8
change class_type to 'point' BITWISE_OR SRID flag as per https://trac.osgeo.org/postgis/browser/trunk/doc/ZMSgeoms.txt
"
wkbSRID = 0x20000000
If the SRID flag is set it's value is encoded as a 4byte integer
right after the type integer.
"
so our class is pont | wkbSRID = 0x20000001 (little endian 32: 01000020)
then add srid "right after the type integer"
our srid = 4326 = 0x10E6 (little endian 32: E6100000)
therefore, class_type_point_with_srid_wgs84 little_endian is 01000020E6100000
"""
class_type = "01000020E6100000"
elif g_is_ms:
class_type = ""
point = spatialite_geom_contents[86:118] # 86 + 16 + 16
wkb = ""
if g_is_postgre:
wkb = endian + class_type + point # example: 01 01000020e6100000 ae17f9ab76565340 59528b140ca03c40
if g_is_ms:
"""
https://msdn.microsoft.com/en-us/library/ee320529.aspx
0xE6100000 01 0C 0000000000001440 0000000000002440
This string is interpreted as shown in the following table.
Binary value Description
E6100000 SRID = 4326
01 Version = 1
0C Serialization Properties = V + P (geometry is valid, single point)
0000000000001440 X = 5
0000000000002440 Y = 10
"""
wkb = "E6100000010C"+point
csv_line = csv_line.replace(spatialite_geom_contents,wkb,1)
else:
pass
#dprint("not entering spatialite blob parse - len "+str(len(spatialite_geom_contents)))
#dprint("find_and_conv_spatialite_blob_to_wkb ret: "+csv_line)
return csv_line
def create(args, line):
global g_cursor, g_conn
global g_prev_create_statement_table_name
global g_prev_create_statement_column_names
global g_exec_buf
global g_is_ms, g_is_postgre
global g_unmerge_logs_row
g_prev_create_statement_column_names = None
if args["pg_schema"] != "public":
g_cursor.execute("SET search_path = '{}','public';".format(args["pg_schema"]))
line_adj = sql_adj_line(line)
table_name = get_table_name(line_adj)
schema_per_month_name = "per_month_{}".format(table_name)
if table_name.startswith("spatialite_history"):
return False # omit these tables - import fails
if table_name == "logs":
uline = line.replace('"log_hash" BIGINT,','"log_hash" BIGINT UNIQUE,',1)
print("'logs' table cre - make log_hash unique for this table: ", uline)
line_adj = sql_adj_line(uline)
if table_name == "wifi_scanned":
wifi_scanned_MIN_APP_V0 = 3
wifi_scanned_MIN_APP_V1 = 0
wifi_scanned_MIN_APP_V2 = 742
print("check azm apk ver for wifi_scanned table omit: ", args["azm_apk_version"])
if args["azm_apk_version"] < wifi_scanned_MIN_APP_V0*1000*1000 + wifi_scanned_MIN_APP_V1*1000 + wifi_scanned_MIN_APP_V2:
print("omit invalidly huge wifi_scanned table in older app vers requested by a customer - causes various db issues")
return False
if args['import_geom_column_in_location_table_only'] and table_name != "location":
line_adj = sql_adj_line(line.replace(',"geom" BLOB','',1))
if (g_unmerge_logs_row is not None):
print("### unmerge mode - delete all rows for this azm in table: "+table_name)
""" now we use log_hash - no need to parse time
# remove 3 traling 000 from microsecs str
start_dt_str = str(g_unmerge_logs_row['log_start_time'])[:-3]
end_dt_str = str(g_unmerge_logs_row['log_end_time'])[:-3]
"""
sqlstr = "delete from \""+table_name+"\" where \"log_hash\" = {}".format(g_unmerge_logs_row['log_hash'])
g_exec_buf.append(sqlstr)
return True
g_prev_create_statement_table_name = table_name
sqlstr = line_adj
'''
Now get local columns
Example sqlstr:
CREATE TABLE "browse" ("time" DATETIME,"time_ms" INT,"posid" INT,"seqid" INT,"netid" INT, "Browse_All_Session_Throughput_Avg" real, "Data_Browse_Throughput" real, "Data_Browse_Throughput_Avg" real, "Data_Browse_Total_Loaded_Obj" smallint, "Data_Browse_Total_Page_Obj" smallint, "Data_Browse_Page_Load_Time" real, "Data_Browse_Page_Load_Time_Avg" real, "Data_Browse_Total_Sessions" smallint, "Data_Browse_Total_Success" smallint, "Data_Browse_Total_Fail_Page" smallint, "Data_Browse_Total_Fail_Obj" smallint, "Data_Browse_Total_Timeout" smallint, "Data_Browse_Exterior_Fail_Page" smallint, "Data_Browse_Exterior_Fail_Obj" smallint, "Browse_Throughput" real, "Browse_Throughput_max" real, "Browse_Throughput_min" real, "Browse_Duration" real, "Browse_Duration_max" real, "Browse_Duration_min" real);
'''
# get part inside parenthesis
ls = line_adj.split('" (')
#dprint("ls :" + str(ls))
ls = ls[1].split(");")[0]
# split by comma
ls = ls.split(",")
# parse column names and keep for insert commands
local_column_dict = {}
local_columns = []
local_column_names = []
for lsp in ls:
splitted = lsp.split('"')
if len(splitted) < 3:
raise Exception("failed to get col_name/col_type for lsp: {}".format(lsp))
col_name = splitted[1]
col_type = splitted[2].strip()
omit_col = False
"""
import_geom_column_in_location_table_only feature already implemented at line_adj above
if args['import_geom_column_in_location_table_only'] and col_name == "geom" and table_name != "location":
omit_col = True
"""
if omit_col == False:
local_column_dict[col_name] = col_type
local_columns.append([col_name, col_type])
local_column_names.append(col_name)
# args['prev_create_statement_column_names']
g_prev_create_statement_column_names = str(local_column_names).replace("'","").replace("[","(").replace("]",")")
remote_column_names = None
if (not args['dump_parquet']) or (table_name == "logs"):
try:
#dprint("create sqlstr: "+sqlstr)
if g_is_postgre:
if args['pg10_partition_by_month']:
if table_name == "logs":
# dont partition logs table
pass
else:
# create target partition for this log + table
# ok - partition this table
sqlstr = sqlstr.replace(";","") +" PARTITION BY RANGE (time);"
try:
with g_conn:
g_cursor.execute("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{}';".format(schema_per_month_name))
if bool(g_cursor.rowcount):
print("schema_per_month_name already exists:", schema_per_month_name)
pass
else:
print("cre schema now because: NOT schema_per_month_name already exists:", schema_per_month_name)
c_table_per_month_sql = "create schema {};".format(schema_per_month_name)
ret = g_cursor.execute(c_table_per_month_sql)
g_conn.commit()
print("success: create per_month ["+c_table_per_month_sql+"] success")
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: create table_per_month schema failed - next insert/COPY commands would likely faile now - exstr:", exstr)
#dprint("create sqlstr postgres mod: "+sqlstr)
# postgis automatically creates/maintains "geometry_columns" 'view'
if g_is_ms:
#dprint("create sqlstr mod mssql geom: "+sqlstr)
pass
if g_is_postgre:
with g_conn:
#too slow and high cpu: g_cursor.execute("select * from information_schema.tables where table_schema=%s and table_name=%s", (args["pg_schema"],table_name,))
g_cursor.execute("""
SELECT FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = %s
AND c.relname = %s
AND c.relkind = 'r'""" , (args["pg_schema"],table_name,))
if bool(g_cursor.rowcount):
print("omit create already existing 'logs' table - raise exception to check columns instead")
raise Exception("table {} already exists - no need to create".format(table_name))
else:
print("table not exists")
ret = None
# use with for auto rollback() on g_conn on expected fails like already exists
with g_conn:
sqlstr = sqlstr.replace('" bigintEGER,', '" bigint,')
print("exec:", sqlstr)
ret = g_cursor.execute(sqlstr)
# commit now otherwise COPY might not see partitions
g_conn.commit()
#dprint("create execute ret: "+str(ret))
""" if control reaches here then the create is successful
- table was not existing earlier - so remote cols must be the same
"""
remote_column_names = local_column_names
except Exception as e:
emsg = str(e)
dprint("create failed: " + emsg + "\n from sqlstr:\n" +
sqlstr+"\nori line:\n"+line)
if ("There is already an object named" in emsg or
" already exists" in emsg):
if args['need_check_remote_cols']:
print(("args['need_check_remote_cols']", args['need_check_remote_cols'], "so must do alter check"))
print("""This table already exists -
checking if all local columns already exist in remote
- otherwise will add each missing cols to
remote table before inserting to it.""")
remote_columns = get_remote_columns(args, table_name)
remote_column_names = get_col_names(remote_columns)
if (len(remote_columns) == 0):
raise Exception("FATAL: failed to parse/list remote columns")
# now get local columns that are not in remote
local_columns_not_in_remote = []
for col in local_columns:
col_name = col[0]
col_type = col[1]
####### quickfix: col_type override for unsigned int32 cols from sqlite (bindLong already) - conv to bigint in pg as pg doesnt have unsigned
if col_name == "lte_volte_rtp_source_ssrc" or col_name == "lte_volte_rtp_timestamp":
# might need to psql to do first manually if log was already imported using older azm_db_merge:
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_source_ssrc type bigint;
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_timestamp type bigint;
col_type = "bigint"
#######################
is_already_in_table = col_name in remote_column_names
dprint("local_col_name: " + col_name +
" col_type: " + col_type +
" - is_already_in_table: "+str(is_already_in_table))
if (not is_already_in_table):
local_columns_not_in_remote.append(
' "{}" {}'.format(col_name, col_type))
# TODO: handle if different type?
n_cols_to_add = len(local_columns_not_in_remote)
if (n_cols_to_add == 0):
pass
#dprint("n_cols_to_add == 0 - no need to alter table")
else:
print("n_cols_to_add: " + str(n_cols_to_add) + " - need to alter table - add cols:" + str(local_columns_not_in_remote) + "\nremote_cols:\n"+str(remote_columns))
# example: ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL, column_c INT NULL ;
alter_str = "ALTER TABLE \"{}\" ".format(table_name)
alter_cols = ""
for new_col in local_columns_not_in_remote:
# not first
prefix = ""
if (alter_cols != ""):
prefix = ", "
alter_cols = alter_cols + prefix + " ADD " + new_col
alter_str = alter_str + alter_cols + ";"
sqlstr = sql_adj_line(alter_str)
print("execute alter_str: " + sqlstr)
exec_creatept_or_alter_handle_concurrency(sqlstr)
# re-get remote cols
remote_columns = get_remote_columns(args, table_name)
remote_column_names = get_col_names(remote_columns)
print(("get_remote_columns after alter: "+str(remote_column_names)))
else:
print(("args['need_check_remote_cols']", args['need_check_remote_cols'], "so no need to do alter check"))
else:
raise Exception("FATAL: create table error - : \nemsg:\n "+emsg+" \nsqlstr:\n"+sqlstr)
local_col_name_to_type_dict = {}
if g_bulk_insert_mode:
if args['pg10_partition_by_month'] and not args['dump_parquet']:
if table_name == "logs":
# dont partition logs table
pass
else:
## check/create partitions for month for log_hash, prev month, after month
ori_log_hash_datetime = args['ori_log_hash_datetime']
months_pt_check_list = [ori_log_hash_datetime+relativedelta(months=-1), ori_log_hash_datetime, ori_log_hash_datetime+relativedelta(months=+1)]
for pre_post_month_log_hash_datetime in months_pt_check_list:
log_hash_ym_str = pre_post_month_log_hash_datetime.strftime('%Y_%m')
#print "log_hash_datetime:", log_hash_datetime
ntn = "logs_{}".format(log_hash_ym_str) # simpler name because we got cases where schema's table name got truncated: activate_dedicated_eps_bearer_context_request_params_3170932708
pltn = "{}.{}".format(schema_per_month_name, ntn)
per_month_table_already_exists = False
with g_conn:
# too slow and high cpu check_sql = "select * from information_schema.tables where table_schema='{}' and table_name='{}'".format(schema_per_month_name, ntn)
check_sql = """SELECT FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = '{}'
AND c.relname = '{}'
AND c.relkind = 'r'""".format(schema_per_month_name, ntn)
print("check_sql partition of table exists or not:", check_sql)
g_cursor.execute(check_sql)
if bool(g_cursor.rowcount):
per_month_table_already_exists = True
if per_month_table_already_exists:
print("omit create already existing per_month table:", pltn)
pass
else:
print("NOT omit create already existing per_month table:", pltn)
cre_target_pt_sql = "CREATE TABLE {} PARTITION OF {} FOR VALUES from ('{}-1') to ('{}-1');".format(
pltn,
table_name,
pre_post_month_log_hash_datetime.strftime("%Y-%m"),
(pre_post_month_log_hash_datetime+relativedelta(months=+1)).strftime("%Y-%m")
)
if args['pg10_partition_index_log_hash']:
cre_index_for_pt_sql = "CREATE INDEX ON {} (log_hash);".format(pltn)
cre_target_pt_sql += " "+cre_index_for_pt_sql
print(("cre_target_pt_sql:", cre_target_pt_sql))
exec_creatept_or_alter_handle_concurrency(cre_target_pt_sql, allow_exstr_list=[" already exists"])
###### let sqlite3 dump contents of table into file
table_dump_fp = os.path.join(g_dir_processing_azm, table_name + ".csv")
table_dump_format_fp = os.path.join(g_dir_processing_azm, table_name + ".fmt")
#print("table_dump_fp: "+table_dump_fp)
#print("table_dump_format_fp: "+table_dump_format_fp)
# create dump csv of that table
"""
example dump of logs table:
sqlite3 azqdata.db -list -newline "|" -separator "," ".out c:\\azq\\azq_report_gen\\azm_db_merge\\logs.csv" "select * from logs"
"""
# get col list, and hex(col) for blob coulumns
i = 0
col_select = ""
first = True
#dprint("local_columns: "+str(local_columns))
for col in local_columns:
col_name = col[0]
col_type = col[1]
local_col_name_to_type_dict[col_name] = col_type
if first:
first = False
else:
col_select = col_select + ","
pre = " "
post = ""
if col_type == "geometry" or (g_is_postgre and col_type == "bytea") or (g_is_ms and col_type.startswith("varbinary")):
pre = " nullif(hex("
post = "),'')"
if col_name == "geom":
pass
#geom_col_index = i
############## wrong data format fixes
### custom limit bsic len in case matched wrongly entered bsic to long str but pg takes max 5 char len for bsic
if col_name == "modem_time":
# handle invalid modem_time case: 159841018-03-10 07:24:42.191
col_name = "strftime('%Y-%m-%d %H:%M:%f', modem_time) as modem_time"
elif col_name == "gsm_bsic":
col_name = "substr(gsm_bsic, 0, 6) as gsm_bsic" # limit to 5 char len (6 is last index excluding)
elif col_name == "android_cellid_from_cellfile":
col_name = "cast(android_cellid_from_cellfile as int) as android_cellid_from_cellfile" # type cast required to remove non-int in cellfile data
elif col_name.endswith("duration") or col_name.endswith("time"):
# many _duration cols in detected_radion_voice_call_session and in pp_ tables have wrong types or even has right type but values came as "" so would be ,"" in csv which postgres and pyarrow wont allow for double/float/numeric cols - check by col_name only is faster than nullif() on all numericols - as most cases are these _duration cols only
col_name = "nullif({},'') as {}".format(col_name, col_name)
elif table_name == "nr_cell_meas":
# special table handling
if "int" in col_type.lower():
print("nr_cell_meas cast to int: col_name {} col_type {}".format(col_name, col_type))
pre = "cast("
post = " as int)"
elif "double" in col_type.lower():
print("nr_cell_meas cast to double: col_name {} col_type {}".format(col_name, col_type))
pre = "cast("
post = " as double)"
col_select = col_select + pre + col_name + post
i = i + 1
dprint("col_select: "+col_select)
if g_is_ms:
ret = call(
[
args['sqlite3_executable'],
args['file'],
"-ascii",
"-list",
'-separator', azm_db_constants.BULK_INSERT_COL_SEPARATOR_VALUE,
'-newline', azm_db_constants.BULK_INSERT_LINE_SEPARATOR_VALUE,
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
'select '+col_select+' from '+ table_name + ' where time is not null'
], shell = False
)
if g_is_postgre:
select_sqlstr = 'select '+col_select+' from '+ table_name
# filter all tables but not the main logs table
if table_name != "logs":
pass
select_sqlstr += " where time >= '{}' and time <= '{}'".format(args['log_data_min_time'], args['log_data_max_time'])
#print "select_sqlstr:", select_sqlstr
dump_cmd = [
args['sqlite3_executable'],
args['file'],
"-ascii",
"-csv",
'-separator',',',
'-newline', '\n',
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
select_sqlstr
]
#dprint("dump_cmd:", dump_cmd)
# if parquet dump mode do only logs table dump to track already imported
start_time = datetime.datetime.now()
if True:#(not args['dump_parquet']) or parquet_arrow_mode or table_name == "logs":
ret = call(
dump_cmd,
shell=False
)
#print("dump_cmd:", dump_cmd)
#print "dump_cmd ret:", ret
append_table_operation_stats(args, table_name, "dump_csv duration:", (datetime.datetime.now() - start_time).total_seconds())
table_dump_fp_ori = table_dump_fp
pqfp = table_dump_fp_ori.replace(".csv","_{}.parquet".format(args['log_hash']))
table_dump_fp_adj = table_dump_fp + "_adj.csv"
geom_format_in_csv_is_wkb = False
# in parquet mode we are modifying geom anyway so assume geom is spatialite format instead of wkb
if (not args['dump_parquet']) or (table_name == "logs"):
geom_format_in_csv_is_wkb = True
start_time = datetime.datetime.now()
with open(table_dump_fp,"rb") as of:
with open(table_dump_fp_adj,"w") as nf: # wb required for windows so that \n is 0x0A - otherwise \n will be 0x0D 0x0A and doest go with our fmt file and only 1 row will be inserted per table csv in bulk inserts...
while True:
ofl = of.readline().decode()
''' this causes python test_browse_performance_timing.py to fail as its json got changed
if g_is_postgre:
ofl = ofl.replace(',""',',') # keep this legacy code for postgres mode code jus to be sure, although we already did nullif checks during sqlite csv dunp...
'''
""" no need to check this, only old stale thread versions would have these cases and will have other cases too so let it crash in all those cases
if ofl.strip() == all_cols_null_line:
continue
"""
ofl = find_and_conv_spatialite_blob_to_wkb(ofl)
if ofl == "":
break
nf.write(ofl)
table_dump_fp = table_dump_fp_adj
append_table_operation_stats(args, table_name, """find_and_conv_spatialite_blob_to_wkb, replace ,"" with , total file duration:""", (datetime.datetime.now() - start_time).total_seconds())
#dprint("dump table: "+table_name+" for bulk insert ret: "+str(ret))
if (ret != 0):
print("WARNING: dump table: "+table_name+" for bulk insert failed - likely sqlite db file error like: database disk image is malformed. In many cases, data is still correct/complete so continue.")
if (os.stat(table_dump_fp).st_size == 0):
print("this table is empty...")
return True
# if control reaches here then the table is not empty
################## read csv to arrow, set types, dump to parqet - return True, but if log_table dont return - let it enter pg too...
# yes, arrow read from csv, convert to pd to mod datetime col and add lat lon is faster than pd.read_sql() and converting fields and to parquet
if args['dump_parquet']:
#print "local_column_names:", local_column_names
pa_column_types = local_column_dict.copy()
for col in list(pa_column_types.keys()):
sqlite_col_type = pa_column_types[col].lower()
if sqlite_col_type in list(pa_type_replace_dict.keys()):
pa_column_types[col] = pa_type_replace_dict[sqlite_col_type]
elif sqlite_col_type.startswith("varchar"):
pa_column_types[col] = "string"
# special cases
if is_datetime_col(col):
# because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218'
pa_column_types[col] = pa.string()
elif col.endswith("duration"):
pa_column_types[col] = pa.float64()
elif col.endswith("session_master_session_id"):
pa_column_types[col] = pa.string() # some old db invalid type cases
elif pa_column_types[col] == "test":
pa_column_types[col] = pa.string()
elif col == "exynos_basic_info_nr_cellid":
pa_column_types[col] = pa.uint64()
# adj types for pa
start_time = datetime.datetime.now()
print("read csv into pa:", table_dump_fp)
#print("pa_column_types:", pa_column_types)
#print("local_column_names:", local_column_names)
padf = csv.read_csv(
table_dump_fp,
read_options=csv.ReadOptions(
column_names=local_column_names,
autogenerate_column_names=False,
),
parse_options=csv.ParseOptions(
newlines_in_values=True
),
convert_options=csv.ConvertOptions(
column_types=pa_column_types,
null_values=[""],
strings_can_be_null=True,
)
)
append_table_operation_stats(args, table_name, "padf read_csv duration:", (datetime.datetime.now() - start_time).total_seconds())
start_time = datetime.datetime.now()
cur_schema = padf.schema
field_indexes_need_pd_datetime = []
fields_need_pd_datetime = []
field_index_to_drop = []
has_geom_field = False
geom_field_index = None
field_index = -1
signalling_symbol_column_index = None
for field in cur_schema:
field_index += 1
if field.name == "time_ms":
field_index_to_drop.append(field_index)
continue
if table_name == "signalling" and field.name == "symbol":
signalling_symbol_column_index = field_index
# check if has geom
if field.name == "geom":
has_geom_field = True
geom_field_index = field_index
# change type of field in new schema to timestamp if required
if is_datetime_col(field.name):
fields_need_pd_datetime.append(pa.field(field.name, pa.timestamp('ns')))
field_indexes_need_pd_datetime.append(field_index)
##### special mods for each table
if table_name == "signalling":
# create int column 'direction' for faster queries instead of the string 'symbol' column
assert signalling_symbol_column_index is not None
symbol_sr = padf.column(signalling_symbol_column_index).to_pandas().astype(str, copy=False)
direction_sr = pd.Series(np.zeros(len(symbol_sr), dtype=np.uint8))
uplink_mask = symbol_sr == "send"
direction_sr.loc[uplink_mask] = 1
#print "direction_sr.dtype", direction_sr.dtype
padf = padf.append_column(
# org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_8);
# org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_16);
# so had to use uint32
pa.field("direction", pa.uint32()),
pa.Array.from_pandas(direction_sr.astype(np.uint32))
)
#print "symbol_sr:", symbol_sr
#print "direction_sr:", direction_sr
# conv datetime fields with pandas then assign back to padf - do this before adding lat lon as index would change...
for i in range(len(fields_need_pd_datetime)):
index = field_indexes_need_pd_datetime[i]
field = fields_need_pd_datetime[i]
print("converting field index {} name {} to datetime...".format(index, field))
# convert
converted_sr = pd.to_datetime(padf.column(index).to_pandas())
#print "converted_sr head:", converted_sr.head()
# assign it back
# print "padf.schema:\n", padf.schema
padf = padf.set_column(index, field, pa.Array.from_pandas(converted_sr))
if has_geom_field:
# use pandas to decode geom from hex to binary, then extract lat, lon from wkb
geom_sr = padf.column(geom_field_index).to_pandas()
geom_sr_null_mask = pd.isnull(geom_sr)
geom_sr = geom_sr.str.decode('ascii')
geom_sr = geom_sr.fillna("")
#print("ori geom_sr:", geom_sr)
if not geom_format_in_csv_is_wkb:
print("geom in csv is in spatialite format - convert to wkb first...")
spatialite_geom_sr = geom_sr
class_type = "01000020E6100000"
endian = "01" # spatialite_geom_sr.str.slice(start=2, stop=4)
point = spatialite_geom_sr.str.slice(start=86, stop=118) # 86 + 16 + 16
geom_sr = endian + class_type + point # wkb
geom_sr = geom_sr.str.decode("hex")
geom_sr[geom_sr_null_mask] = None
#print('wkb geom_sr.head():', geom_sr.head())
lon_sr = geom_sr.apply(lambda x: None if (pd.isnull(x) or len(x) != WKB_POINT_LAT_LON_BYTES_LEN) else np.frombuffer(x[9:9+8], dtype=np.float64)).astype(np.float64) # X
lat_sr = geom_sr.apply(lambda x: None if (pd.isnull(x) or len(x) != WKB_POINT_LAT_LON_BYTES_LEN) else np.frombuffer(x[9+8:9+8+8], dtype=np.float64)).astype(np.float64) # Y
#print('lon_sr', lon_sr.head())
#print('lat_sr', lat_sr.head())
##### assign all three back to padf
## replace geom with newly converted to binary geom_sr
geom_sr_len = len(geom_sr)
pa_array = None
if pd.isnull(geom_sr).all():
print("geom_sr null all case")
pa_array = pa.array(geom_sr.values.tolist()+[b'']).slice(0, geom_sr_len) # convert tolist() and add [""] then slice() back to ori len required to avoid pyarrow.lib.ArrowInvalid: Field type did not match data type - see azq_report_gen/test_spark_wkb_exception.py
else:
print("not geom_sr null all case")
pa_array = pa.array(geom_sr)
assert pa_array is not None
padf = padf.set_column(geom_field_index, pa.field("geom", "binary"), pa_array)
## insert lat, lon
padf = padf.add_column(geom_field_index+1, pa.field("lat", pa.float64()), pa.Array.from_pandas(lat_sr))
padf = padf.add_column(geom_field_index+2, pa.field("lon", pa.float64()), pa.Array.from_pandas(lon_sr))
# finally drop 'time_ms' legacy column used long ago in mysql where it didnt have milliseconds - not used anymore
for drop_index in field_index_to_drop:
padf = padf.remove_column(drop_index)
#print "padf.schema:\n", padf.schema
append_table_operation_stats(args, table_name, "padf processing and conversion with pd duration:", (datetime.datetime.now() - start_time).total_seconds())
print("padf len:", len(padf))
start_time = datetime.datetime.now()
# use snappy and use_dictionary - https://wesmckinney.com/blog/python-parquet-multithreading/
pq.write_table(padf, pqfp, flavor='spark', compression=PARQUET_COMPRESSION, use_dictionary=True)
assert os.path.isfile(pqfp)
append_table_operation_stats(args, table_name, "pq.write_table duration:", (datetime.datetime.now() - start_time).total_seconds())
print("wrote pqfp:", pqfp)
# if log_table dont return - let it enter pg too...
if table_name == "logs":
pass # import logs table to pg too
else:
return True
if args['target_db_type'] == 'mssql':
# create fmt format file for that table
"""
generate format file:
https://msdn.microsoft.com/en-us/library/ms178129.aspx
format file contents:
https://msdn.microsoft.com/en-us/library/ms191479(v=sql.110).aspx
"""
n_local_cols = len(local_column_names)
fmt = open(table_dump_format_fp,"w")
fmt.write("11.0\n") # ver - 11.0 = SQL Server 2012
fmt.write(str(n_local_cols)+"\n") # n cols
host_field_order = 0 # dyn gen - first inc wil get it to 1
host_file_data_type = "SQLCHAR"
prefix_length = 0
host_file_data_length = 0 # When a delimited text file having a prefix length of 0 and a terminator is imported, the field-length value is ignored, because the storage space used by the field equals the length of the data plus the terminator
terminator = None # dyn gen
server_col_order = None # dyn gen
server_col_name = None # dyn gen
col_coalition = ""
for col in local_column_names:
host_field_order = host_field_order + 1
if (n_local_cols == host_field_order): #last
terminator = azm_db_constants.BULK_INSERT_LINE_SEPARATOR_PARAM
else:
terminator = azm_db_constants.BULK_INSERT_COL_SEPARATOR_PARAM
if not table_name.startswith("wifi_scanned"):
#dprint("remote_column_names: "+str(remote_column_names))
pass
#dprint("col: "+str(col))
server_col_order = remote_column_names.index(col) + 1 # not 0 based
server_col_name = col # always same col name
fmt.write(
'{}\t{}\t{}\t{}\t"{}"\t{}\t"{}"\t"{}"\n'.format(
host_field_order,
host_file_data_type,
prefix_length,
host_file_data_length,
terminator,
server_col_order,
server_col_name,
col_coalition
)
)
fmt.flush()
fmt.close()
# both dump csv and format fmt files are ready
# execute bulk insert sql now
if g_is_ms:
sqlstr = "bulk insert \"{}\" from '{}' with ( formatfile = '{}' );".format(
table_name,
table_dump_fp,
table_dump_format_fp
)
if g_is_postgre:
colnames = ""
first = True
for col in local_column_names:
if not first:
colnames = colnames + ","
if first:
first = False
colnames = colnames + '"' + col + '"'
sqlstr = "copy \"{}\" ({}) from STDIN with (format csv, NULL '')".format(
table_name,
colnames
)
#dprint("START bulk insert sqlstr: "+sqlstr)
g_exec_buf.append((sqlstr, table_dump_fp))
# print("DONE bulk insert - nrows inserted: "+str(ret.rowcount))
return True
### below are functions not used by azq_db_merge
def sql_adj_line(line):
global g_is_postgre
sqlstr = line
#sqlstr = sqlstr.replace('`', '"')
sqlstr = sqlstr.replace("\" Double", "\" float")
sqlstr = sqlstr.replace("\" double", "\" float")
sqlstr = sqlstr.replace("\" DOUBLE", "\" float")
sqlstr = sqlstr.replace("\" FLOAT", "\" float")
sqlstr = sqlstr.replace("\" smallint", "\" bigint")
sqlstr = sqlstr.replace("\" INT", "\" bigint")
sqlstr = sqlstr.replace('"geom" BLOB','"geom" geometry',1)
# sqlite pandas regen db uses lowercase
sqlstr = sqlstr.replace('"geom" blob','"geom" geometry',1)
if g_is_postgre:
sqlstr = sqlstr.replace("\" DATETIME", "\" timestamp")
sqlstr = sqlstr.replace("\" datetime", "\" timestamp")
sqlstr = sqlstr.replace("\" BLOB", "\" bytea")
sqlstr = sqlstr.replace("\" blob", "\" bytea")
sqlstr = sqlstr.replace('" string', '" text')
if g_is_ms:
sqlstr = sqlstr.replace("\" BLOB", "\" varbinary(MAX)")
# default empty fields to text type
# sqlstr = sqlstr.replace("\" ,", "\" text,")
# sqlstr = sqlstr.replace("\" );", "\" text);")
return sqlstr
def get_table_name(line_adj):
return line_adj.split(" ")[2].replace("\"", "")
def get_col_names(cols):
ret = []
for col in cols:
ret.append(col[0])
return ret
def get_remote_columns(args, table_name):
global g_cursor
global g_is_ms, g_is_postgre
#dprint("table_name: "+table_name)
sqlstr = ""
if g_is_ms:
sqlstr = "sp_columns @table_name=\"{}\"".format(table_name)
if g_is_postgre:
sqlstr = "select * from \"{}\" where false".format(table_name)
#dprint("check table columns sqlstr: "+sqlstr)
g_cursor.execute(sqlstr)
#dprint("query execute ret: "+str(ret))
rows = g_cursor.fetchall()
'''
Now get remote column list for this table...
'''
remote_columns = []
if g_is_postgre:
colnames = [desc[0] for desc in g_cursor.description]
for col in colnames:
remote_columns.append([col,""])
return remote_columns
if g_is_ms:
# MS SQL
for row in rows:
'''
MSSQL Column str return example:
row n: {0: u'azqdemo', 1: u'dbo', 2: u'android_metadata', 3: u'locale', 4: -1, 5: u'text', u'DATA_TYPE': -1, 7: 2147483647, 8: None, 9: None, 10: 1, 11: None, 12: None, 13: -1, 14: None, 15: 2147483647, u'COLUMN_DEF': None, 17: u'YES', 18: 35, u'SCALE': None, u'TABLE_NAME': u'android_metadata', u'SQL_DATA_TYPE': -1, 6: 2147483647, u'NULLABLE': 1, u'REMARKS': None, u'CHAR_OCTET_LENGTH': 2147483647, u'COLUMN_NAME': u'locale', u'SQL_DATETIME_SUB': None, u'TABLE_OWNER': u'dbo', 16: 1, u'RADIX': None, u'SS_DATA_TYPE': 35, u'TYPE_NAME': u'text', u'PRECISION': 2147483647, u'IS_NULLABLE': u'YES', u'LENGTH': 2147483647, u'ORDINAL_POSITION': 1, u'TABLE_QUALIFIER': u'azqdemo'}
Result:
col_name: locale
col_type: text
'''
rs = str(row)
#dprint("row n: " + rs)
splitted = rs.split(", u")
col_name = splitted[3].split("'")[1]
#dprint("col_name: "+col_name)
col_type = splitted[4].split("'")[1]
#dprint("col_type: "+col_type)
remote_columns.append([col_name,col_type])
return remote_columns
def exec_creatept_or_alter_handle_concurrency(sqlstr, raise_exception_if_fail=True, allow_exstr_list=[]):
global g_conn
global g_cursor
print(("exec_creatept_or_alter_handle_concurrency START sqlstr: {}".format(sqlstr)))
ret = False
prev_exstr = ""
exec_creatept_or_alter_handle_concurrency_max_retries = 2
for retry in range(exec_creatept_or_alter_handle_concurrency_max_retries):
try:
# use with for auto rollback() on g_conn on expected fails like already exists
with g_conn as con:
print(("exec_creatept_or_alter_handle_concurrency retry {} sqlstr: {}".format(retry, sqlstr)))
execret = g_cursor.execute(sqlstr)
print(("exec_creatept_or_alter_handle_concurrency retry {} sqlstr: {} execret: {}".format(retry, sqlstr, execret)))
# commit now otherwise upcoming COPY commands might not see partitions
con.commit()
print("exec_creatept_or_alter_handle_concurrency commit done")
ret = True
break
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
for allow_case in allow_exstr_list:
if allow_case in exstr:
print("exec_creatept_or_alter_handle_concurrency got exception but matches allow_exstr_list allow_case: {} - so treat as success".format(allow_case))
ret = True
break
if ret == True:
break
prev_exstr = "WARNING: exec_creatept_or_alter_handle_concurrency retry {} exception: {}".format(retry, exstr)
print(prev_exstr)
sleep_dur = random.random() + 0.5
time.sleep(sleep_dur)
print(("exec_creatept_or_alter_handle_concurrency DONE sqlstr: {} - ret {}".format(sqlstr, ret)))
if ret is False and raise_exception_if_fail:
raise Exception("exec_creatept_or_alter_handle_concurrency FAILED after max retries: {} prev_exstr: {}".format(exec_creatept_or_alter_handle_concurrency_max_retries, prev_exstr))
return ret
def is_datetime_col(col):
return col.endswith("time") and (not col.endswith("trip_time"))
def is_numeric_col_type(col_type):
cl = col_type.lower()
if cl in ("int", "integer", "bigint", "biginteger", "real", "double", "float"):
return True
return False
def append_table_operation_stats(args, table, operation, duration):
print("operation_stats: {}:{}:{} seconds".format(table, operation, duration))
od = args["table_operation_stats"]
od["table"].append(table)
od["operation"].append(operation)
od["duration"].append(duration)
| '''
module to handle merging (importing) of (azqdata.db from
azq .azm files) sqlite3 dump lines into a PostgreSQL and Microsoft SQL Server db.
Copyright: Copyright (C) 2016 Freewill FX Co., Ltd. All rights reserved.
'''
from debug_helpers import dprint
import azm_db_constants
from subprocess import call
import os
import sys
import traceback
import time
import datetime
from dateutil.relativedelta import relativedelta
import random
import glob
import pandas as pd
import numpy as np
import pyarrow as pa
from pyarrow import csv
import pyarrow.parquet as pq
PARQUET_COMPRESSION = 'snappy'
WKB_POINT_LAT_LON_BYTES_LEN = 25
# global vars
g_is_postgre = False
g_is_ms = False
g_prev_create_statement_column_names = None
g_prev_create_statement_table_name = None
g_bulk_insert_mode = True
g_unmerge_logs_row = None # would be set in --unmerge mode
g_cursor = None
g_conn = None
g_exec_buf = []
"""
now we already use 'autocommit = True' as recommended by MSDN doc
so set g_always_commit to False
old: sometimes imports
work fine without cursor.commit() but after a --unmerge task, imports dont work
anymore until we do commit() after each execute for all tables
"""
# TODO: set/use as global - from args from azm_db_merge - where .db is extracted from azm
g_dir_processing_azm = None
pa_type_replace_dict = {
"text": pa.string(),
"bigint unique": pa.int64(),
"bigint": pa.int64(),
"biginteger": pa.int64(),
"int": pa.int32(),
"integer": pa.int32(),
"short": pa.int16(),
"double": pa.float64(),
"real": pa.float64(),
"float": pa.float64(),
"geometry": pa.binary(),
# because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218'
"timestamp": pa.string(),
"datetime": pa.string(),
}
KNOWN_COL_TYPES_LOWER_TO_PD_PARQUET_TYPE_DICT = {
"timestamp": datetime,
"time": datetime,
"date": datetime,
"datetime": datetime,
"text": str,
"geometry": str,
"double": np.float64,
"real": np.float64,
"float": np.float64,
"biginteger": np.float64, # EXCEPT special allowed cols like 'log_hash'
"bigint": np.float64, # EXCEPT special allowed cols like 'log_hash' that will never be null - they will be np.int64 - but for generic numbers can be null so pd df needs it as float64
"integer": np.float64, # for generic numbers can be null so pd df needs it as float64
"int": np.float64, # for generic numbers can be null so pd df needs it as float64
}
### below are functions required/used by azq_db_merge
def connect(args):
global g_bulk_insert_mode
global g_dir_processing_azm
global g_cursor, g_conn
global g_exec_buf
global g_is_ms, g_is_postgre
if (args['target_db_type'] == 'postgresql'):
print("PostgreSQL mode initializing...")
g_is_postgre = True
import psycopg2
elif (args['target_db_type'] == 'mssql'):
g_is_ms = True
import pyodbc
# cleanup old stuff just in case
close(args)
g_bulk_insert_mode = True # always bulk insert mode now
g_dir_processing_azm = args['dir_processing_azm']
if g_is_ms:
print("Connecting... Target DBMS type: mssql")
dprint("connect args: {} {} {} {}".format(args['server_url'],
args['server_user'],
args['server_password'],
args['server_database']
)
)
driver = args['mssql_odbc_driver']
connect_str = 'DRIVER={};SERVER={};DATABASE={};UID={};PWD={}'.format(
driver,
args['server_url'],
args['server_database'],
args['server_user'],
args['server_password'])
#unsafe as users might see in logs print "using connect_str: "+connect_str
"""
https://msdn.microsoft.com/en-us/library/ms131281.aspx
ODBC applications should not use Transact-SQL transaction statements such as
BEGIN TRANSACTION, COMMIT TRANSACTION, or ROLLBACK TRANSACTION because this
can cause indeterminate behavior in the driver. An ODBC application should
run in autocommit mode and not use any transaction management functions or
statements, or run in manual-commit mode and use the ODBC SQLEndTran
function to either commit or roll back transactions.
https://mkleehammer.github.io/pyodbc/api.html >> 'autocommit' in our case set to false and buffer all atomic cmds into g_exec_buf for run once before commit
"""
g_conn = pyodbc.connect(connect_str, autocommit = False)
elif g_is_postgre:
print("Connecting... Target DBMS type: PostgreSQL")
# example: conn = psycopg2.connect("dbname=azqdb user=azqdb")
connect_str = "dbname={} user={} password={} port={}".format(
args['server_database'],
args['server_user'],
args['server_password'],
args['pg_port']
)
print(connect_str)
if args['pg_host'] != None:
connect_str = "host="+args['pg_host']+" "+connect_str
#unsafe as users might see in logs print "using connect_str: "+connect_str
args['connect_str'] = connect_str
g_conn = psycopg2.connect(connect_str)
if (g_conn is None):
print("psycopg2.connect returned None")
return False
print("connected")
g_cursor = g_conn.cursor()
# post connect steps for each dbms
if g_is_postgre and not args['unmerge']:
try_cre_postgis(schema="public") # create postgis at public schema first
if args["pg_schema"] != "public":
print("pg mode create pg_schema:", args["pg_schema"])
try:
with g_conn as c:
g_cursor.execute("create schema if not exists "+args["pg_schema"])
c.commit()
print("success: create schema "+args["pg_schema"]+ " success")
except Exception as e:
estr = str(e)
if 'already exists' in estr:
dprint("schema already exists")
pass
else:
print(("FATAL: CREATE schema failed:"+args["pg_schema"]))
raise e
# create postgis in public only - print "pg using schema start"
# try_cre_postgis(schema=args["pg_schema"]) # inside new schema
if g_is_ms:
pass
''' somehow not working - let qgis detect itself for now...
try:
# set 'f_table_name' to unique so we can blindly insert table_name:geom (on create handlers) to it without checking (let mssql check)
ret = g_cursor.execute("""
CREATE TABLE [dbo].[geometry_columns](
[f_table_catalog] [varchar](50) NULL,
[f_table_schema] [varchar](50) NULL,
[f_table_name] [varchar](100) NULL UNIQUE,
[f_geometry_column] [varchar](50) NULL,
[coord_dimension] [int] NULL,
[srid] [int] NULL,
[geometry_type] [varchar](50) NULL
)
""")
print "created qgis table: geometry_columns"
except Exception as e:
pass
try:
# below execute would raise an exception if it is already created
ret = g_cursor.execute("""
CREATE TABLE spatial_ref_sys (srid INTEGER NOT NULL PRIMARY KEY,auth_name VARCHAR(256) NOT NULL,auth_srid INTEGER NOT NULL,ref_sys_name VARCHAR(256),proj4text VARCHAR(2048) NOT NULL);
""")
print "created qgis table: spatial_ref_sys"
# if control reaches here means the table didn't exist (table was just created and is empty) so insert wgs84 into it...
ret = g_cursor.execute("""
INSERT INTO "spatial_ref_sys" VALUES(4326,'epsg',4326,'WGS 84','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs');
""")
print "added wgs84 to qgis table: spatial_ref_sys"
except Exception as e:
pass
'''
return True
def try_cre_postgis(schema="public"):
global g_conn
global g_cursor
try:
with g_conn as c:
sql = "CREATE EXTENSION if not exists postgis SCHEMA {}".format(schema)
print("try: CREATE EXTENSION postgis on schema:", schema, "sql:", sql)
g_cursor.execute(sql)
c.commit()
print("success: CREATE EXTENSION postgis")
except Exception as e:
estr = str(e)
if 'already exists' in estr:
print("postgis already exists")
pass
else:
print("FATAL: CREATE EXTENSION postgis - failed - please make sure postgis is correctly installed.")
raise e
def check_if_already_merged(args, log_hash):
global g_unmerge_logs_row
global g_cursor
global g_exec_buf
global g_is_ms, g_is_postgre
if args["pg_schema"] != "public":
g_cursor.execute("SET search_path = '{}','public';".format(args["pg_schema"]))
try:
print("checking if this log_hash has already been imported/merged: "+log_hash)
sqlstr = "select \"log_hash\" from \"logs\" where \"log_hash\" = ?"
if g_is_postgre:
sqlstr = sqlstr.replace("?","%s")
print(("check log cmd: "+sqlstr))
row = None
# use with for auto rollback() on g_conn on exception - otherwise we cant use the cursor again - would fail as: current transaction is aborted, commands ignored until end of transaction block
with g_conn:
g_cursor.execute(sqlstr, [log_hash])
row = g_cursor.fetchone()
print(("after cmd check if exists row:", row))
if (row is None):
# azm never imported
if (args['unmerge']):
# unmerge mode - this azm is not in target db
raise Exception("ABORT: This azm is already not present in target db's logs' table")
else:
print("This log hasn't been imported into target db yet - ok to proceed")
pass
return True
else:
# azm already imported
if (args['unmerge']):
#dprint("um0: row: "+str(row))
if g_is_postgre or g_is_ms:
#dprint("upg 0")
# row is a tuple - make it a dict
#dprint("upg 01")
# now we only need 'log_hash' to unmerge and the used odbc cant parse geom too - cols = get_remote_columns(args,'logs')
cols = [['log_hash', 'bigint']]
#dprint("upg 1: cols: "+str(cols))
drow = {}
i = 0
for col in cols:
print(row[i])
drow[col[0]] = row[i]
i = i+1
row = drow
#dprint("um1")
print("### unmerge mode - delete start for azm: log_hash {}".format(row['log_hash']))
g_unmerge_logs_row = row
sqlstr = "delete from \"logs\" where \"log_hash\" = '{}'".format(log_hash)
g_exec_buf.append(sqlstr)
print("delete from logs table added to g_exec_buf: ", sqlstr)
else:
raise Exception("ABORT: This log ({}) has already been imported/exists in target db (use --unmerge to remove first if you want to re-import).".format(log_hash))
except Exception as e:
estr = str(e)
if ("Invalid object name 'logs'" in estr or '42S02' in estr
or 'relation "logs" does not exist' in estr):
print("looks like this is the first-time log import - no table named logs exists yet - ok...")
if args['unmerge']:
raise Exception("--unmerge mode called on an empty database: no related 'logs' table exist yet")
# first time import - no table named logs exists yet
else:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("re-raise exception e - ",exstr)
raise e
return False
def close(args):
global g_cursor, g_conn
global g_exec_buf
global g_prev_create_statement_column_names
global g_prev_create_statement_table_name
global g_bulk_insert_mode
global g_unmerge_logs_row
print("mssql_handler close() - cleanup()")
g_prev_create_statement_column_names = None
g_prev_create_statement_table_name = None
g_bulk_insert_mode = True
g_unmerge_logs_row = None
del g_exec_buf[:]
if g_cursor is not None:
try:
g_cursor.close()
g_cursor = None
except Exception as e:
print("warning: mssql cursor close failed: "+str(e))
if g_conn is not None:
try:
g_conn.close()
g_conn = None
except Exception as e:
print("warning: mssql conn close failed: "+str(e))
return True
def commit(args, line):
global g_cursor, g_conn
global g_prev_create_statement_table_name
global g_exec_buf
g_prev_create_statement_table_name = None
n = len(g_exec_buf)
# make sure all create/alters are committed
g_conn.commit()
print("### total cmds to execute for operation: "+str(n))
i = 0
for buf in g_exec_buf:
if isinstance(buf, tuple):
# for COPY from stdin
buf, dump_fp = buf
with open(dump_fp, "rb") as dump_fp_fo:
g_cursor.copy_expert(buf, dump_fp_fo)
else:
try:
if args['dump_parquet']:
#print("dump_parquet mode exec buf:", buf)
skip = True
if 'delete from "logs" where' in buf:
skip = False
if skip:
print("dump_parquet mode SKIP exec buf:", buf)
continue
with g_conn: # needed otherwise cursor would become invalid and unmerge would fail for no table cases handled below
g_cursor.execute(buf)
except Exception as e:
if "does not exist" in str(e) and args['unmerge']:
print("WARNING: unmerge exception: {} - but ok for --umnerge mode if exec delete and face - does not exist exception...".format(e))
else:
raise e
print("# done execute cmd {}/{}: {}".format(i, n, buf))
i = i + 1
print("### all cmds exec success - COMMIT now...")
g_conn.commit()
print("### COMMIT success...")
# do mc cp all parquet files to object store...
if args['dump_parquet']:
bucket_name = ""
if "AZM_BUCKET_NAME_OVERRIDE" in os.environ and os.environ["AZM_BUCKET_NAME_OVERRIDE"]:
bucket_name = os.environ["AZM_BUCKET_NAME_OVERRIDE"]
else:
subdomain = os.environ['WEB_DOMAIN_NAME'].split(".")[0]
bucket_name = "azm-"+subdomain
bucket_ym_folder_name = args['log_hash_ym_str'].replace("_", "-")
if args['unmerge']:
if False:
# object listing would cost too much cpu and class a operations so skip this for parquet mode
rmcmd = "mc find minio_logs/{}/{}/ --name '*_{}.parquet'".format(
bucket_name,
bucket_ym_folder_name,
args['log_hash']
)
rmcmd += " --exec 'mc rm {}'"
print("mc rmcmd:", rmcmd)
rmcmdret = os.system(rmcmd)
if rmcmdret != 0:
raise Exception("Remove files from object store failed cmcmdret: {}".format(rmcmdret))
try:
with g_conn:
update_sql = "update uploaded_logs set non_azm_object_size_bytes = null where log_hash = {};".format(args['log_hash'])
print("update_sql:", update_sql)
g_cursor.execute(update_sql)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: update uploaded_logs set non_azm_object_size_bytes to null failed exception:", exstr)
else:
cpcmd = "mc cp {}/*.parquet minio_logs/{}/{}/".format(
g_dir_processing_azm,
bucket_name,
bucket_ym_folder_name,
)
print("mc cpcmd:", cpcmd)
cpcmdret = os.system(cpcmd)
if cpcmdret != 0:
raise Exception("Copy files to object store failed cmcmdret: {}".format(cpcmdret))
try:
combined_pq_size = 0
for fp in glob.glob('{}/*.parquet'.format(g_dir_processing_azm)):
fp_sz = os.path.getsize(fp)
combined_pq_size += fp_sz
with g_conn:
update_sql = "update uploaded_logs set non_azm_object_size_bytes = {} where log_hash = {};".format(combined_pq_size, args['log_hash'])
print("update_sql:", update_sql)
g_cursor.execute(update_sql)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: update uploaded_logs set non_azm_object_size_bytes to parquets size failed exception:", exstr)
return True
def find_and_conv_spatialite_blob_to_wkb(csv_line):
#print "fac csv_line:", csv_line
spat_blob_offset = csv_line.find('0001E6100000')
if spat_blob_offset == -1:
return csv_line
part = csv_line[spat_blob_offset:spat_blob_offset+120+1]
#print "part[120]:", part[120]
#dprint("csv_line spatialite_geom_part: "+part)
spatialite_geom_contents = ""
if (g_is_postgre and (part[120] == ',' or part[120] == '\n')) or (g_is_ms and part[120] == '\t'):
spatialite_geom_contents = part[0:120]
else:
dprint("check of spatialite_geom_part - failed - abort")
return csv_line
#dprint("spatialite_geom_contents: len "+str(len(spatialite_geom_contents))+" val: "+spatialite_geom_contents)
# convert spatialite geometry blob to wkb
"""
Spatialite BLOB Format (Point)
------------------------------
http://www.gaia-gis.it/gaia-sins/BLOB-Geometry.html
example:
0001E6100000DD30C0F46C2A594041432013008E2B40DD30C0F46C2A594041432013008E2B407C01000000DD30C0F46C2A594041432013008E2B40FE
parse:
spatialite header: 00 (str_off 0 str_len 2)
endian: 01 little endian (str_off 2 str_len 2) (spec: if this GEOMETRY is BIG_ENDIAN ordered must contain a 0x00 byte value otherwise, if this GEOMETRY is LITTLE_ENDIAN ordered must contain a 0x01 byte value)
SRID: E6 10 00 00 (str_off 4 str_len 8)
MBR_MIN_X: DD 30 C0 F4 6C 2A 59 40 (str_off 12 str_len 16)
MBR_MIN_Y: 41 43 20 13 00 8E 2B 40 (str_off 28 str_len 16)
MBR_MAX_X: DD 30 C0 F4 6C 2A 59 40 (str_off 42 str_len 16)
MBR_MAX_Y: 41 43 20 13 00 8E 2B 40 (str_off 58 str_len 16)
MBR_END: 7C (str_off 76 str_len 2)
CLASS_TYPE: 01 00 00 00 (str_off 78 str_len 8)
POINT:
X: DD 30 C0 F4 6C 2A 59 40 (str_off 86 str_len 16)
Y: 41 43 20 13 00 8E 2B 40 (str_off 102 str_len 16)
END: FE (str_off 118 str_len 2)
---
WKB Format
----------
See "3.3.2.6 Description of WKBGeometry Representations"
in https://portal.opengeospatial.org/files/?artifact_id=829
Point {
double x;
double y;
};
WKBPoint {
byte byteOrder;
uint32 wkbType; //class_type
Point point;
}
Therefore, for "Point" we need from spatialite blob parts:
endian, CLASS_TYPE, POINT
"""
# spatialite blob point size is 60 bytes = 120 chars in hex - as in above example and starts with 00
if len(spatialite_geom_contents) == 120 and spatialite_geom_contents[0] == '0' and spatialite_geom_contents[1] == '0':
endian = spatialite_geom_contents[2:4] # 2 + len 2
class_type = "<unset>"
if g_is_postgre:
"""
old code: class_type = spatialite_geom_contents[78:86] # 78 + 8
change class_type to 'point' BITWISE_OR SRID flag as per https://trac.osgeo.org/postgis/browser/trunk/doc/ZMSgeoms.txt
"
wkbSRID = 0x20000000
If the SRID flag is set it's value is encoded as a 4byte integer
right after the type integer.
"
so our class is pont | wkbSRID = 0x20000001 (little endian 32: 01000020)
then add srid "right after the type integer"
our srid = 4326 = 0x10E6 (little endian 32: E6100000)
therefore, class_type_point_with_srid_wgs84 little_endian is 01000020E6100000
"""
class_type = "01000020E6100000"
elif g_is_ms:
class_type = ""
point = spatialite_geom_contents[86:118] # 86 + 16 + 16
wkb = ""
if g_is_postgre:
wkb = endian + class_type + point # example: 01 01000020e6100000 ae17f9ab76565340 59528b140ca03c40
if g_is_ms:
"""
https://msdn.microsoft.com/en-us/library/ee320529.aspx
0xE6100000 01 0C 0000000000001440 0000000000002440
This string is interpreted as shown in the following table.
Binary value Description
E6100000 SRID = 4326
01 Version = 1
0C Serialization Properties = V + P (geometry is valid, single point)
0000000000001440 X = 5
0000000000002440 Y = 10
"""
wkb = "E6100000010C"+point
csv_line = csv_line.replace(spatialite_geom_contents,wkb,1)
else:
pass
#dprint("not entering spatialite blob parse - len "+str(len(spatialite_geom_contents)))
#dprint("find_and_conv_spatialite_blob_to_wkb ret: "+csv_line)
return csv_line
def create(args, line):
global g_cursor, g_conn
global g_prev_create_statement_table_name
global g_prev_create_statement_column_names
global g_exec_buf
global g_is_ms, g_is_postgre
global g_unmerge_logs_row
g_prev_create_statement_column_names = None
if args["pg_schema"] != "public":
g_cursor.execute("SET search_path = '{}','public';".format(args["pg_schema"]))
line_adj = sql_adj_line(line)
table_name = get_table_name(line_adj)
schema_per_month_name = "per_month_{}".format(table_name)
if table_name.startswith("spatialite_history"):
return False # omit these tables - import fails
if table_name == "logs":
uline = line.replace('"log_hash" BIGINT,','"log_hash" BIGINT UNIQUE,',1)
print("'logs' table cre - make log_hash unique for this table: ", uline)
line_adj = sql_adj_line(uline)
if table_name == "wifi_scanned":
wifi_scanned_MIN_APP_V0 = 3
wifi_scanned_MIN_APP_V1 = 0
wifi_scanned_MIN_APP_V2 = 742
print("check azm apk ver for wifi_scanned table omit: ", args["azm_apk_version"])
if args["azm_apk_version"] < wifi_scanned_MIN_APP_V0*1000*1000 + wifi_scanned_MIN_APP_V1*1000 + wifi_scanned_MIN_APP_V2:
print("omit invalidly huge wifi_scanned table in older app vers requested by a customer - causes various db issues")
return False
if args['import_geom_column_in_location_table_only'] and table_name != "location":
line_adj = sql_adj_line(line.replace(',"geom" BLOB','',1))
if (g_unmerge_logs_row is not None):
print("### unmerge mode - delete all rows for this azm in table: "+table_name)
""" now we use log_hash - no need to parse time
# remove 3 traling 000 from microsecs str
start_dt_str = str(g_unmerge_logs_row['log_start_time'])[:-3]
end_dt_str = str(g_unmerge_logs_row['log_end_time'])[:-3]
"""
sqlstr = "delete from \""+table_name+"\" where \"log_hash\" = {}".format(g_unmerge_logs_row['log_hash'])
g_exec_buf.append(sqlstr)
return True
g_prev_create_statement_table_name = table_name
sqlstr = line_adj
'''
Now get local columns
Example sqlstr:
CREATE TABLE "browse" ("time" DATETIME,"time_ms" INT,"posid" INT,"seqid" INT,"netid" INT, "Browse_All_Session_Throughput_Avg" real, "Data_Browse_Throughput" real, "Data_Browse_Throughput_Avg" real, "Data_Browse_Total_Loaded_Obj" smallint, "Data_Browse_Total_Page_Obj" smallint, "Data_Browse_Page_Load_Time" real, "Data_Browse_Page_Load_Time_Avg" real, "Data_Browse_Total_Sessions" smallint, "Data_Browse_Total_Success" smallint, "Data_Browse_Total_Fail_Page" smallint, "Data_Browse_Total_Fail_Obj" smallint, "Data_Browse_Total_Timeout" smallint, "Data_Browse_Exterior_Fail_Page" smallint, "Data_Browse_Exterior_Fail_Obj" smallint, "Browse_Throughput" real, "Browse_Throughput_max" real, "Browse_Throughput_min" real, "Browse_Duration" real, "Browse_Duration_max" real, "Browse_Duration_min" real);
'''
# get part inside parenthesis
ls = line_adj.split('" (')
#dprint("ls :" + str(ls))
ls = ls[1].split(");")[0]
# split by comma
ls = ls.split(",")
# parse column names and keep for insert commands
local_column_dict = {}
local_columns = []
local_column_names = []
for lsp in ls:
splitted = lsp.split('"')
if len(splitted) < 3:
raise Exception("failed to get col_name/col_type for lsp: {}".format(lsp))
col_name = splitted[1]
col_type = splitted[2].strip()
omit_col = False
"""
import_geom_column_in_location_table_only feature already implemented at line_adj above
if args['import_geom_column_in_location_table_only'] and col_name == "geom" and table_name != "location":
omit_col = True
"""
if omit_col == False:
local_column_dict[col_name] = col_type
local_columns.append([col_name, col_type])
local_column_names.append(col_name)
# args['prev_create_statement_column_names']
g_prev_create_statement_column_names = str(local_column_names).replace("'","").replace("[","(").replace("]",")")
remote_column_names = None
if (not args['dump_parquet']) or (table_name == "logs"):
try:
#dprint("create sqlstr: "+sqlstr)
if g_is_postgre:
if args['pg10_partition_by_month']:
if table_name == "logs":
# dont partition logs table
pass
else:
# create target partition for this log + table
# ok - partition this table
sqlstr = sqlstr.replace(";","") +" PARTITION BY RANGE (time);"
try:
with g_conn:
g_cursor.execute("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{}';".format(schema_per_month_name))
if bool(g_cursor.rowcount):
print("schema_per_month_name already exists:", schema_per_month_name)
pass
else:
print("cre schema now because: NOT schema_per_month_name already exists:", schema_per_month_name)
c_table_per_month_sql = "create schema {};".format(schema_per_month_name)
ret = g_cursor.execute(c_table_per_month_sql)
g_conn.commit()
print("success: create per_month ["+c_table_per_month_sql+"] success")
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: create table_per_month schema failed - next insert/COPY commands would likely faile now - exstr:", exstr)
#dprint("create sqlstr postgres mod: "+sqlstr)
# postgis automatically creates/maintains "geometry_columns" 'view'
if g_is_ms:
#dprint("create sqlstr mod mssql geom: "+sqlstr)
pass
if g_is_postgre:
with g_conn:
#too slow and high cpu: g_cursor.execute("select * from information_schema.tables where table_schema=%s and table_name=%s", (args["pg_schema"],table_name,))
g_cursor.execute("""
SELECT FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = %s
AND c.relname = %s
AND c.relkind = 'r'""" , (args["pg_schema"],table_name,))
if bool(g_cursor.rowcount):
print("omit create already existing 'logs' table - raise exception to check columns instead")
raise Exception("table {} already exists - no need to create".format(table_name))
else:
print("table not exists")
ret = None
# use with for auto rollback() on g_conn on expected fails like already exists
with g_conn:
sqlstr = sqlstr.replace('" bigintEGER,', '" bigint,')
print("exec:", sqlstr)
ret = g_cursor.execute(sqlstr)
# commit now otherwise COPY might not see partitions
g_conn.commit()
#dprint("create execute ret: "+str(ret))
""" if control reaches here then the create is successful
- table was not existing earlier - so remote cols must be the same
"""
remote_column_names = local_column_names
except Exception as e:
emsg = str(e)
dprint("create failed: " + emsg + "\n from sqlstr:\n" +
sqlstr+"\nori line:\n"+line)
if ("There is already an object named" in emsg or
" already exists" in emsg):
if args['need_check_remote_cols']:
print(("args['need_check_remote_cols']", args['need_check_remote_cols'], "so must do alter check"))
print("""This table already exists -
checking if all local columns already exist in remote
- otherwise will add each missing cols to
remote table before inserting to it.""")
remote_columns = get_remote_columns(args, table_name)
remote_column_names = get_col_names(remote_columns)
if (len(remote_columns) == 0):
raise Exception("FATAL: failed to parse/list remote columns")
# now get local columns that are not in remote
local_columns_not_in_remote = []
for col in local_columns:
col_name = col[0]
col_type = col[1]
####### quickfix: col_type override for unsigned int32 cols from sqlite (bindLong already) - conv to bigint in pg as pg doesnt have unsigned
if col_name == "lte_volte_rtp_source_ssrc" or col_name == "lte_volte_rtp_timestamp":
# might need to psql to do first manually if log was already imported using older azm_db_merge:
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_source_ssrc type bigint;
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_timestamp type bigint;
col_type = "bigint"
#######################
is_already_in_table = col_name in remote_column_names
dprint("local_col_name: " + col_name +
" col_type: " + col_type +
" - is_already_in_table: "+str(is_already_in_table))
if (not is_already_in_table):
local_columns_not_in_remote.append(
' "{}" {}'.format(col_name, col_type))
# TODO: handle if different type?
n_cols_to_add = len(local_columns_not_in_remote)
if (n_cols_to_add == 0):
pass
#dprint("n_cols_to_add == 0 - no need to alter table")
else:
print("n_cols_to_add: " + str(n_cols_to_add) + " - need to alter table - add cols:" + str(local_columns_not_in_remote) + "\nremote_cols:\n"+str(remote_columns))
# example: ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL, column_c INT NULL ;
alter_str = "ALTER TABLE \"{}\" ".format(table_name)
alter_cols = ""
for new_col in local_columns_not_in_remote:
# not first
prefix = ""
if (alter_cols != ""):
prefix = ", "
alter_cols = alter_cols + prefix + " ADD " + new_col
alter_str = alter_str + alter_cols + ";"
sqlstr = sql_adj_line(alter_str)
print("execute alter_str: " + sqlstr)
exec_creatept_or_alter_handle_concurrency(sqlstr)
# re-get remote cols
remote_columns = get_remote_columns(args, table_name)
remote_column_names = get_col_names(remote_columns)
print(("get_remote_columns after alter: "+str(remote_column_names)))
else:
print(("args['need_check_remote_cols']", args['need_check_remote_cols'], "so no need to do alter check"))
else:
raise Exception("FATAL: create table error - : \nemsg:\n "+emsg+" \nsqlstr:\n"+sqlstr)
local_col_name_to_type_dict = {}
if g_bulk_insert_mode:
if args['pg10_partition_by_month'] and not args['dump_parquet']:
if table_name == "logs":
# dont partition logs table
pass
else:
## check/create partitions for month for log_hash, prev month, after month
ori_log_hash_datetime = args['ori_log_hash_datetime']
months_pt_check_list = [ori_log_hash_datetime+relativedelta(months=-1), ori_log_hash_datetime, ori_log_hash_datetime+relativedelta(months=+1)]
for pre_post_month_log_hash_datetime in months_pt_check_list:
log_hash_ym_str = pre_post_month_log_hash_datetime.strftime('%Y_%m')
#print "log_hash_datetime:", log_hash_datetime
ntn = "logs_{}".format(log_hash_ym_str) # simpler name because we got cases where schema's table name got truncated: activate_dedicated_eps_bearer_context_request_params_3170932708
pltn = "{}.{}".format(schema_per_month_name, ntn)
per_month_table_already_exists = False
with g_conn:
# too slow and high cpu check_sql = "select * from information_schema.tables where table_schema='{}' and table_name='{}'".format(schema_per_month_name, ntn)
check_sql = """SELECT FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = '{}'
AND c.relname = '{}'
AND c.relkind = 'r'""".format(schema_per_month_name, ntn)
print("check_sql partition of table exists or not:", check_sql)
g_cursor.execute(check_sql)
if bool(g_cursor.rowcount):
per_month_table_already_exists = True
if per_month_table_already_exists:
print("omit create already existing per_month table:", pltn)
pass
else:
print("NOT omit create already existing per_month table:", pltn)
cre_target_pt_sql = "CREATE TABLE {} PARTITION OF {} FOR VALUES from ('{}-1') to ('{}-1');".format(
pltn,
table_name,
pre_post_month_log_hash_datetime.strftime("%Y-%m"),
(pre_post_month_log_hash_datetime+relativedelta(months=+1)).strftime("%Y-%m")
)
if args['pg10_partition_index_log_hash']:
cre_index_for_pt_sql = "CREATE INDEX ON {} (log_hash);".format(pltn)
cre_target_pt_sql += " "+cre_index_for_pt_sql
print(("cre_target_pt_sql:", cre_target_pt_sql))
exec_creatept_or_alter_handle_concurrency(cre_target_pt_sql, allow_exstr_list=[" already exists"])
###### let sqlite3 dump contents of table into file
table_dump_fp = os.path.join(g_dir_processing_azm, table_name + ".csv")
table_dump_format_fp = os.path.join(g_dir_processing_azm, table_name + ".fmt")
#print("table_dump_fp: "+table_dump_fp)
#print("table_dump_format_fp: "+table_dump_format_fp)
# create dump csv of that table
"""
example dump of logs table:
sqlite3 azqdata.db -list -newline "|" -separator "," ".out c:\\azq\\azq_report_gen\\azm_db_merge\\logs.csv" "select * from logs"
"""
# get col list, and hex(col) for blob coulumns
i = 0
col_select = ""
first = True
#dprint("local_columns: "+str(local_columns))
for col in local_columns:
col_name = col[0]
col_type = col[1]
local_col_name_to_type_dict[col_name] = col_type
if first:
first = False
else:
col_select = col_select + ","
pre = " "
post = ""
if col_type == "geometry" or (g_is_postgre and col_type == "bytea") or (g_is_ms and col_type.startswith("varbinary")):
pre = " nullif(hex("
post = "),'')"
if col_name == "geom":
pass
#geom_col_index = i
############## wrong data format fixes
### custom limit bsic len in case matched wrongly entered bsic to long str but pg takes max 5 char len for bsic
if col_name == "modem_time":
# handle invalid modem_time case: 159841018-03-10 07:24:42.191
col_name = "strftime('%Y-%m-%d %H:%M:%f', modem_time) as modem_time"
elif col_name == "gsm_bsic":
col_name = "substr(gsm_bsic, 0, 6) as gsm_bsic" # limit to 5 char len (6 is last index excluding)
elif col_name == "android_cellid_from_cellfile":
col_name = "cast(android_cellid_from_cellfile as int) as android_cellid_from_cellfile" # type cast required to remove non-int in cellfile data
elif col_name.endswith("duration") or col_name.endswith("time"):
# many _duration cols in detected_radion_voice_call_session and in pp_ tables have wrong types or even has right type but values came as "" so would be ,"" in csv which postgres and pyarrow wont allow for double/float/numeric cols - check by col_name only is faster than nullif() on all numericols - as most cases are these _duration cols only
col_name = "nullif({},'') as {}".format(col_name, col_name)
elif table_name == "nr_cell_meas":
# special table handling
if "int" in col_type.lower():
print("nr_cell_meas cast to int: col_name {} col_type {}".format(col_name, col_type))
pre = "cast("
post = " as int)"
elif "double" in col_type.lower():
print("nr_cell_meas cast to double: col_name {} col_type {}".format(col_name, col_type))
pre = "cast("
post = " as double)"
col_select = col_select + pre + col_name + post
i = i + 1
dprint("col_select: "+col_select)
if g_is_ms:
ret = call(
[
args['sqlite3_executable'],
args['file'],
"-ascii",
"-list",
'-separator', azm_db_constants.BULK_INSERT_COL_SEPARATOR_VALUE,
'-newline', azm_db_constants.BULK_INSERT_LINE_SEPARATOR_VALUE,
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
'select '+col_select+' from '+ table_name + ' where time is not null'
], shell = False
)
if g_is_postgre:
select_sqlstr = 'select '+col_select+' from '+ table_name
# filter all tables but not the main logs table
if table_name != "logs":
pass
select_sqlstr += " where time >= '{}' and time <= '{}'".format(args['log_data_min_time'], args['log_data_max_time'])
#print "select_sqlstr:", select_sqlstr
dump_cmd = [
args['sqlite3_executable'],
args['file'],
"-ascii",
"-csv",
'-separator',',',
'-newline', '\n',
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
select_sqlstr
]
#dprint("dump_cmd:", dump_cmd)
# if parquet dump mode do only logs table dump to track already imported
start_time = datetime.datetime.now()
if True:#(not args['dump_parquet']) or parquet_arrow_mode or table_name == "logs":
ret = call(
dump_cmd,
shell=False
)
#print("dump_cmd:", dump_cmd)
#print "dump_cmd ret:", ret
append_table_operation_stats(args, table_name, "dump_csv duration:", (datetime.datetime.now() - start_time).total_seconds())
table_dump_fp_ori = table_dump_fp
pqfp = table_dump_fp_ori.replace(".csv","_{}.parquet".format(args['log_hash']))
table_dump_fp_adj = table_dump_fp + "_adj.csv"
geom_format_in_csv_is_wkb = False
# in parquet mode we are modifying geom anyway so assume geom is spatialite format instead of wkb
if (not args['dump_parquet']) or (table_name == "logs"):
geom_format_in_csv_is_wkb = True
start_time = datetime.datetime.now()
with open(table_dump_fp,"rb") as of:
with open(table_dump_fp_adj,"w") as nf: # wb required for windows so that \n is 0x0A - otherwise \n will be 0x0D 0x0A and doest go with our fmt file and only 1 row will be inserted per table csv in bulk inserts...
while True:
ofl = of.readline().decode()
''' this causes python test_browse_performance_timing.py to fail as its json got changed
if g_is_postgre:
ofl = ofl.replace(',""',',') # keep this legacy code for postgres mode code jus to be sure, although we already did nullif checks during sqlite csv dunp...
'''
""" no need to check this, only old stale thread versions would have these cases and will have other cases too so let it crash in all those cases
if ofl.strip() == all_cols_null_line:
continue
"""
ofl = find_and_conv_spatialite_blob_to_wkb(ofl)
if ofl == "":
break
nf.write(ofl)
table_dump_fp = table_dump_fp_adj
append_table_operation_stats(args, table_name, """find_and_conv_spatialite_blob_to_wkb, replace ,"" with , total file duration:""", (datetime.datetime.now() - start_time).total_seconds())
#dprint("dump table: "+table_name+" for bulk insert ret: "+str(ret))
if (ret != 0):
print("WARNING: dump table: "+table_name+" for bulk insert failed - likely sqlite db file error like: database disk image is malformed. In many cases, data is still correct/complete so continue.")
if (os.stat(table_dump_fp).st_size == 0):
print("this table is empty...")
return True
# if control reaches here then the table is not empty
################## read csv to arrow, set types, dump to parqet - return True, but if log_table dont return - let it enter pg too...
# yes, arrow read from csv, convert to pd to mod datetime col and add lat lon is faster than pd.read_sql() and converting fields and to parquet
if args['dump_parquet']:
#print "local_column_names:", local_column_names
pa_column_types = local_column_dict.copy()
for col in list(pa_column_types.keys()):
sqlite_col_type = pa_column_types[col].lower()
if sqlite_col_type in list(pa_type_replace_dict.keys()):
pa_column_types[col] = pa_type_replace_dict[sqlite_col_type]
elif sqlite_col_type.startswith("varchar"):
pa_column_types[col] = "string"
# special cases
if is_datetime_col(col):
# because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218'
pa_column_types[col] = pa.string()
elif col.endswith("duration"):
pa_column_types[col] = pa.float64()
elif col.endswith("session_master_session_id"):
pa_column_types[col] = pa.string() # some old db invalid type cases
elif pa_column_types[col] == "test":
pa_column_types[col] = pa.string()
elif col == "exynos_basic_info_nr_cellid":
pa_column_types[col] = pa.uint64()
# adj types for pa
start_time = datetime.datetime.now()
print("read csv into pa:", table_dump_fp)
#print("pa_column_types:", pa_column_types)
#print("local_column_names:", local_column_names)
padf = csv.read_csv(
table_dump_fp,
read_options=csv.ReadOptions(
column_names=local_column_names,
autogenerate_column_names=False,
),
parse_options=csv.ParseOptions(
newlines_in_values=True
),
convert_options=csv.ConvertOptions(
column_types=pa_column_types,
null_values=[""],
strings_can_be_null=True,
)
)
append_table_operation_stats(args, table_name, "padf read_csv duration:", (datetime.datetime.now() - start_time).total_seconds())
start_time = datetime.datetime.now()
cur_schema = padf.schema
field_indexes_need_pd_datetime = []
fields_need_pd_datetime = []
field_index_to_drop = []
has_geom_field = False
geom_field_index = None
field_index = -1
signalling_symbol_column_index = None
for field in cur_schema:
field_index += 1
if field.name == "time_ms":
field_index_to_drop.append(field_index)
continue
if table_name == "signalling" and field.name == "symbol":
signalling_symbol_column_index = field_index
# check if has geom
if field.name == "geom":
has_geom_field = True
geom_field_index = field_index
# change type of field in new schema to timestamp if required
if is_datetime_col(field.name):
fields_need_pd_datetime.append(pa.field(field.name, pa.timestamp('ns')))
field_indexes_need_pd_datetime.append(field_index)
##### special mods for each table
if table_name == "signalling":
# create int column 'direction' for faster queries instead of the string 'symbol' column
assert signalling_symbol_column_index is not None
symbol_sr = padf.column(signalling_symbol_column_index).to_pandas().astype(str, copy=False)
direction_sr = pd.Series(np.zeros(len(symbol_sr), dtype=np.uint8))
uplink_mask = symbol_sr == "send"
direction_sr.loc[uplink_mask] = 1
#print "direction_sr.dtype", direction_sr.dtype
padf = padf.append_column(
# org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_8);
# org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_16);
# so had to use uint32
pa.field("direction", pa.uint32()),
pa.Array.from_pandas(direction_sr.astype(np.uint32))
)
#print "symbol_sr:", symbol_sr
#print "direction_sr:", direction_sr
# conv datetime fields with pandas then assign back to padf - do this before adding lat lon as index would change...
for i in range(len(fields_need_pd_datetime)):
index = field_indexes_need_pd_datetime[i]
field = fields_need_pd_datetime[i]
print("converting field index {} name {} to datetime...".format(index, field))
# convert
converted_sr = pd.to_datetime(padf.column(index).to_pandas())
#print "converted_sr head:", converted_sr.head()
# assign it back
# print "padf.schema:\n", padf.schema
padf = padf.set_column(index, field, pa.Array.from_pandas(converted_sr))
if has_geom_field:
# use pandas to decode geom from hex to binary, then extract lat, lon from wkb
geom_sr = padf.column(geom_field_index).to_pandas()
geom_sr_null_mask = pd.isnull(geom_sr)
geom_sr = geom_sr.str.decode('ascii')
geom_sr = geom_sr.fillna("")
#print("ori geom_sr:", geom_sr)
if not geom_format_in_csv_is_wkb:
print("geom in csv is in spatialite format - convert to wkb first...")
spatialite_geom_sr = geom_sr
class_type = "01000020E6100000"
endian = "01" # spatialite_geom_sr.str.slice(start=2, stop=4)
point = spatialite_geom_sr.str.slice(start=86, stop=118) # 86 + 16 + 16
geom_sr = endian + class_type + point # wkb
geom_sr = geom_sr.str.decode("hex")
geom_sr[geom_sr_null_mask] = None
#print('wkb geom_sr.head():', geom_sr.head())
lon_sr = geom_sr.apply(lambda x: None if (pd.isnull(x) or len(x) != WKB_POINT_LAT_LON_BYTES_LEN) else np.frombuffer(x[9:9+8], dtype=np.float64)).astype(np.float64) # X
lat_sr = geom_sr.apply(lambda x: None if (pd.isnull(x) or len(x) != WKB_POINT_LAT_LON_BYTES_LEN) else np.frombuffer(x[9+8:9+8+8], dtype=np.float64)).astype(np.float64) # Y
#print('lon_sr', lon_sr.head())
#print('lat_sr', lat_sr.head())
##### assign all three back to padf
## replace geom with newly converted to binary geom_sr
geom_sr_len = len(geom_sr)
pa_array = None
if pd.isnull(geom_sr).all():
print("geom_sr null all case")
pa_array = pa.array(geom_sr.values.tolist()+[b'']).slice(0, geom_sr_len) # convert tolist() and add [""] then slice() back to ori len required to avoid pyarrow.lib.ArrowInvalid: Field type did not match data type - see azq_report_gen/test_spark_wkb_exception.py
else:
print("not geom_sr null all case")
pa_array = pa.array(geom_sr)
assert pa_array is not None
padf = padf.set_column(geom_field_index, pa.field("geom", "binary"), pa_array)
## insert lat, lon
padf = padf.add_column(geom_field_index+1, pa.field("lat", pa.float64()), pa.Array.from_pandas(lat_sr))
padf = padf.add_column(geom_field_index+2, pa.field("lon", pa.float64()), pa.Array.from_pandas(lon_sr))
# finally drop 'time_ms' legacy column used long ago in mysql where it didnt have milliseconds - not used anymore
for drop_index in field_index_to_drop:
padf = padf.remove_column(drop_index)
#print "padf.schema:\n", padf.schema
append_table_operation_stats(args, table_name, "padf processing and conversion with pd duration:", (datetime.datetime.now() - start_time).total_seconds())
print("padf len:", len(padf))
start_time = datetime.datetime.now()
# use snappy and use_dictionary - https://wesmckinney.com/blog/python-parquet-multithreading/
pq.write_table(padf, pqfp, flavor='spark', compression=PARQUET_COMPRESSION, use_dictionary=True)
assert os.path.isfile(pqfp)
append_table_operation_stats(args, table_name, "pq.write_table duration:", (datetime.datetime.now() - start_time).total_seconds())
print("wrote pqfp:", pqfp)
# if log_table dont return - let it enter pg too...
if table_name == "logs":
pass # import logs table to pg too
else:
return True
if args['target_db_type'] == 'mssql':
# create fmt format file for that table
"""
generate format file:
https://msdn.microsoft.com/en-us/library/ms178129.aspx
format file contents:
https://msdn.microsoft.com/en-us/library/ms191479(v=sql.110).aspx
"""
n_local_cols = len(local_column_names)
fmt = open(table_dump_format_fp,"w")
fmt.write("11.0\n") # ver - 11.0 = SQL Server 2012
fmt.write(str(n_local_cols)+"\n") # n cols
host_field_order = 0 # dyn gen - first inc wil get it to 1
host_file_data_type = "SQLCHAR"
prefix_length = 0
host_file_data_length = 0 # When a delimited text file having a prefix length of 0 and a terminator is imported, the field-length value is ignored, because the storage space used by the field equals the length of the data plus the terminator
terminator = None # dyn gen
server_col_order = None # dyn gen
server_col_name = None # dyn gen
col_coalition = ""
for col in local_column_names:
host_field_order = host_field_order + 1
if (n_local_cols == host_field_order): #last
terminator = azm_db_constants.BULK_INSERT_LINE_SEPARATOR_PARAM
else:
terminator = azm_db_constants.BULK_INSERT_COL_SEPARATOR_PARAM
if not table_name.startswith("wifi_scanned"):
#dprint("remote_column_names: "+str(remote_column_names))
pass
#dprint("col: "+str(col))
server_col_order = remote_column_names.index(col) + 1 # not 0 based
server_col_name = col # always same col name
fmt.write(
'{}\t{}\t{}\t{}\t"{}"\t{}\t"{}"\t"{}"\n'.format(
host_field_order,
host_file_data_type,
prefix_length,
host_file_data_length,
terminator,
server_col_order,
server_col_name,
col_coalition
)
)
fmt.flush()
fmt.close()
# both dump csv and format fmt files are ready
# execute bulk insert sql now
if g_is_ms:
sqlstr = "bulk insert \"{}\" from '{}' with ( formatfile = '{}' );".format(
table_name,
table_dump_fp,
table_dump_format_fp
)
if g_is_postgre:
colnames = ""
first = True
for col in local_column_names:
if not first:
colnames = colnames + ","
if first:
first = False
colnames = colnames + '"' + col + '"'
sqlstr = "copy \"{}\" ({}) from STDIN with (format csv, NULL '')".format(
table_name,
colnames
)
#dprint("START bulk insert sqlstr: "+sqlstr)
g_exec_buf.append((sqlstr, table_dump_fp))
# print("DONE bulk insert - nrows inserted: "+str(ret.rowcount))
return True
### below are functions not used by azq_db_merge
def sql_adj_line(line):
global g_is_postgre
sqlstr = line
#sqlstr = sqlstr.replace('`', '"')
sqlstr = sqlstr.replace("\" Double", "\" float")
sqlstr = sqlstr.replace("\" double", "\" float")
sqlstr = sqlstr.replace("\" DOUBLE", "\" float")
sqlstr = sqlstr.replace("\" FLOAT", "\" float")
sqlstr = sqlstr.replace("\" smallint", "\" bigint")
sqlstr = sqlstr.replace("\" INT", "\" bigint")
sqlstr = sqlstr.replace('"geom" BLOB','"geom" geometry',1)
# sqlite pandas regen db uses lowercase
sqlstr = sqlstr.replace('"geom" blob','"geom" geometry',1)
if g_is_postgre:
sqlstr = sqlstr.replace("\" DATETIME", "\" timestamp")
sqlstr = sqlstr.replace("\" datetime", "\" timestamp")
sqlstr = sqlstr.replace("\" BLOB", "\" bytea")
sqlstr = sqlstr.replace("\" blob", "\" bytea")
sqlstr = sqlstr.replace('" string', '" text')
if g_is_ms:
sqlstr = sqlstr.replace("\" BLOB", "\" varbinary(MAX)")
# default empty fields to text type
# sqlstr = sqlstr.replace("\" ,", "\" text,")
# sqlstr = sqlstr.replace("\" );", "\" text);")
return sqlstr
def get_table_name(line_adj):
return line_adj.split(" ")[2].replace("\"", "")
def get_col_names(cols):
ret = []
for col in cols:
ret.append(col[0])
return ret
def get_remote_columns(args, table_name):
global g_cursor
global g_is_ms, g_is_postgre
#dprint("table_name: "+table_name)
sqlstr = ""
if g_is_ms:
sqlstr = "sp_columns @table_name=\"{}\"".format(table_name)
if g_is_postgre:
sqlstr = "select * from \"{}\" where false".format(table_name)
#dprint("check table columns sqlstr: "+sqlstr)
g_cursor.execute(sqlstr)
#dprint("query execute ret: "+str(ret))
rows = g_cursor.fetchall()
'''
Now get remote column list for this table...
'''
remote_columns = []
if g_is_postgre:
colnames = [desc[0] for desc in g_cursor.description]
for col in colnames:
remote_columns.append([col,""])
return remote_columns
if g_is_ms:
# MS SQL
for row in rows:
'''
MSSQL Column str return example:
row n: {0: u'azqdemo', 1: u'dbo', 2: u'android_metadata', 3: u'locale', 4: -1, 5: u'text', u'DATA_TYPE': -1, 7: 2147483647, 8: None, 9: None, 10: 1, 11: None, 12: None, 13: -1, 14: None, 15: 2147483647, u'COLUMN_DEF': None, 17: u'YES', 18: 35, u'SCALE': None, u'TABLE_NAME': u'android_metadata', u'SQL_DATA_TYPE': -1, 6: 2147483647, u'NULLABLE': 1, u'REMARKS': None, u'CHAR_OCTET_LENGTH': 2147483647, u'COLUMN_NAME': u'locale', u'SQL_DATETIME_SUB': None, u'TABLE_OWNER': u'dbo', 16: 1, u'RADIX': None, u'SS_DATA_TYPE': 35, u'TYPE_NAME': u'text', u'PRECISION': 2147483647, u'IS_NULLABLE': u'YES', u'LENGTH': 2147483647, u'ORDINAL_POSITION': 1, u'TABLE_QUALIFIER': u'azqdemo'}
Result:
col_name: locale
col_type: text
'''
rs = str(row)
#dprint("row n: " + rs)
splitted = rs.split(", u")
col_name = splitted[3].split("'")[1]
#dprint("col_name: "+col_name)
col_type = splitted[4].split("'")[1]
#dprint("col_type: "+col_type)
remote_columns.append([col_name,col_type])
return remote_columns
def exec_creatept_or_alter_handle_concurrency(sqlstr, raise_exception_if_fail=True, allow_exstr_list=[]):
global g_conn
global g_cursor
print(("exec_creatept_or_alter_handle_concurrency START sqlstr: {}".format(sqlstr)))
ret = False
prev_exstr = ""
exec_creatept_or_alter_handle_concurrency_max_retries = 2
for retry in range(exec_creatept_or_alter_handle_concurrency_max_retries):
try:
# use with for auto rollback() on g_conn on expected fails like already exists
with g_conn as con:
print(("exec_creatept_or_alter_handle_concurrency retry {} sqlstr: {}".format(retry, sqlstr)))
execret = g_cursor.execute(sqlstr)
print(("exec_creatept_or_alter_handle_concurrency retry {} sqlstr: {} execret: {}".format(retry, sqlstr, execret)))
# commit now otherwise upcoming COPY commands might not see partitions
con.commit()
print("exec_creatept_or_alter_handle_concurrency commit done")
ret = True
break
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
for allow_case in allow_exstr_list:
if allow_case in exstr:
print("exec_creatept_or_alter_handle_concurrency got exception but matches allow_exstr_list allow_case: {} - so treat as success".format(allow_case))
ret = True
break
if ret == True:
break
prev_exstr = "WARNING: exec_creatept_or_alter_handle_concurrency retry {} exception: {}".format(retry, exstr)
print(prev_exstr)
sleep_dur = random.random() + 0.5
time.sleep(sleep_dur)
print(("exec_creatept_or_alter_handle_concurrency DONE sqlstr: {} - ret {}".format(sqlstr, ret)))
if ret is False and raise_exception_if_fail:
raise Exception("exec_creatept_or_alter_handle_concurrency FAILED after max retries: {} prev_exstr: {}".format(exec_creatept_or_alter_handle_concurrency_max_retries, prev_exstr))
return ret
def is_datetime_col(col):
return col.endswith("time") and (not col.endswith("trip_time"))
def is_numeric_col_type(col_type):
cl = col_type.lower()
if cl in ("int", "integer", "bigint", "biginteger", "real", "double", "float"):
return True
return False
def append_table_operation_stats(args, table, operation, duration):
print("operation_stats: {}:{}:{} seconds".format(table, operation, duration))
od = args["table_operation_stats"]
od["table"].append(table)
od["operation"].append(operation)
od["duration"].append(duration) | en | 0.627925 | module to handle merging (importing) of (azqdata.db from azq .azm files) sqlite3 dump lines into a PostgreSQL and Microsoft SQL Server db. Copyright: Copyright (C) 2016 Freewill FX Co., Ltd. All rights reserved. # global vars # would be set in --unmerge mode now we already use 'autocommit = True' as recommended by MSDN doc so set g_always_commit to False old: sometimes imports work fine without cursor.commit() but after a --unmerge task, imports dont work anymore until we do commit() after each execute for all tables # TODO: set/use as global - from args from azm_db_merge - where .db is extracted from azm # because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218' # EXCEPT special allowed cols like 'log_hash' # EXCEPT special allowed cols like 'log_hash' that will never be null - they will be np.int64 - but for generic numbers can be null so pd df needs it as float64 # for generic numbers can be null so pd df needs it as float64 # for generic numbers can be null so pd df needs it as float64 ### below are functions required/used by azq_db_merge # cleanup old stuff just in case # always bulk insert mode now #unsafe as users might see in logs print "using connect_str: "+connect_str https://msdn.microsoft.com/en-us/library/ms131281.aspx ODBC applications should not use Transact-SQL transaction statements such as BEGIN TRANSACTION, COMMIT TRANSACTION, or ROLLBACK TRANSACTION because this can cause indeterminate behavior in the driver. An ODBC application should run in autocommit mode and not use any transaction management functions or statements, or run in manual-commit mode and use the ODBC SQLEndTran function to either commit or roll back transactions. https://mkleehammer.github.io/pyodbc/api.html >> 'autocommit' in our case set to false and buffer all atomic cmds into g_exec_buf for run once before commit # example: conn = psycopg2.connect("dbname=azqdb user=azqdb") #unsafe as users might see in logs print "using connect_str: "+connect_str # post connect steps for each dbms # create postgis at public schema first # create postgis in public only - print "pg using schema start" # try_cre_postgis(schema=args["pg_schema"]) # inside new schema somehow not working - let qgis detect itself for now... try: # set 'f_table_name' to unique so we can blindly insert table_name:geom (on create handlers) to it without checking (let mssql check) ret = g_cursor.execute(""" CREATE TABLE [dbo].[geometry_columns]( [f_table_catalog] [varchar](50) NULL, [f_table_schema] [varchar](50) NULL, [f_table_name] [varchar](100) NULL UNIQUE, [f_geometry_column] [varchar](50) NULL, [coord_dimension] [int] NULL, [srid] [int] NULL, [geometry_type] [varchar](50) NULL ) """) print "created qgis table: geometry_columns" except Exception as e: pass try: # below execute would raise an exception if it is already created ret = g_cursor.execute(""" CREATE TABLE spatial_ref_sys (srid INTEGER NOT NULL PRIMARY KEY,auth_name VARCHAR(256) NOT NULL,auth_srid INTEGER NOT NULL,ref_sys_name VARCHAR(256),proj4text VARCHAR(2048) NOT NULL); """) print "created qgis table: spatial_ref_sys" # if control reaches here means the table didn't exist (table was just created and is empty) so insert wgs84 into it... ret = g_cursor.execute(""" INSERT INTO "spatial_ref_sys" VALUES(4326,'epsg',4326,'WGS 84','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'); """) print "added wgs84 to qgis table: spatial_ref_sys" except Exception as e: pass # use with for auto rollback() on g_conn on exception - otherwise we cant use the cursor again - would fail as: current transaction is aborted, commands ignored until end of transaction block # azm never imported # unmerge mode - this azm is not in target db # azm already imported #dprint("um0: row: "+str(row)) #dprint("upg 0") # row is a tuple - make it a dict #dprint("upg 01") # now we only need 'log_hash' to unmerge and the used odbc cant parse geom too - cols = get_remote_columns(args,'logs') #dprint("upg 1: cols: "+str(cols)) #dprint("um1") ## unmerge mode - delete start for azm: log_hash {}".format(row['log_hash'])) # first time import - no table named logs exists yet # make sure all create/alters are committed ## total cmds to execute for operation: "+str(n)) # for COPY from stdin #print("dump_parquet mode exec buf:", buf) # needed otherwise cursor would become invalid and unmerge would fail for no table cases handled below ## all cmds exec success - COMMIT now...") ## COMMIT success...") # do mc cp all parquet files to object store... # object listing would cost too much cpu and class a operations so skip this for parquet mode #print "fac csv_line:", csv_line #print "part[120]:", part[120] #dprint("csv_line spatialite_geom_part: "+part) #dprint("spatialite_geom_contents: len "+str(len(spatialite_geom_contents))+" val: "+spatialite_geom_contents) # convert spatialite geometry blob to wkb Spatialite BLOB Format (Point) ------------------------------ http://www.gaia-gis.it/gaia-sins/BLOB-Geometry.html example: 0001E6100000DD30C0F46C2A594041432013008E2B40DD30C0F46C2A594041432013008E2B407C01000000DD30C0F46C2A594041432013008E2B40FE parse: spatialite header: 00 (str_off 0 str_len 2) endian: 01 little endian (str_off 2 str_len 2) (spec: if this GEOMETRY is BIG_ENDIAN ordered must contain a 0x00 byte value otherwise, if this GEOMETRY is LITTLE_ENDIAN ordered must contain a 0x01 byte value) SRID: E6 10 00 00 (str_off 4 str_len 8) MBR_MIN_X: DD 30 C0 F4 6C 2A 59 40 (str_off 12 str_len 16) MBR_MIN_Y: 41 43 20 13 00 8E 2B 40 (str_off 28 str_len 16) MBR_MAX_X: DD 30 C0 F4 6C 2A 59 40 (str_off 42 str_len 16) MBR_MAX_Y: 41 43 20 13 00 8E 2B 40 (str_off 58 str_len 16) MBR_END: 7C (str_off 76 str_len 2) CLASS_TYPE: 01 00 00 00 (str_off 78 str_len 8) POINT: X: DD 30 C0 F4 6C 2A 59 40 (str_off 86 str_len 16) Y: 41 43 20 13 00 8E 2B 40 (str_off 102 str_len 16) END: FE (str_off 118 str_len 2) --- WKB Format ---------- See "3.3.2.6 Description of WKBGeometry Representations" in https://portal.opengeospatial.org/files/?artifact_id=829 Point { double x; double y; }; WKBPoint { byte byteOrder; uint32 wkbType; //class_type Point point; } Therefore, for "Point" we need from spatialite blob parts: endian, CLASS_TYPE, POINT # spatialite blob point size is 60 bytes = 120 chars in hex - as in above example and starts with 00 # 2 + len 2 old code: class_type = spatialite_geom_contents[78:86] # 78 + 8 change class_type to 'point' BITWISE_OR SRID flag as per https://trac.osgeo.org/postgis/browser/trunk/doc/ZMSgeoms.txt " wkbSRID = 0x20000000 If the SRID flag is set it's value is encoded as a 4byte integer right after the type integer. " so our class is pont | wkbSRID = 0x20000001 (little endian 32: 01000020) then add srid "right after the type integer" our srid = 4326 = 0x10E6 (little endian 32: E6100000) therefore, class_type_point_with_srid_wgs84 little_endian is 01000020E6100000 # 86 + 16 + 16 # example: 01 01000020e6100000 ae17f9ab76565340 59528b140ca03c40 https://msdn.microsoft.com/en-us/library/ee320529.aspx 0xE6100000 01 0C 0000000000001440 0000000000002440 This string is interpreted as shown in the following table. Binary value Description E6100000 SRID = 4326 01 Version = 1 0C Serialization Properties = V + P (geometry is valid, single point) 0000000000001440 X = 5 0000000000002440 Y = 10 #dprint("not entering spatialite blob parse - len "+str(len(spatialite_geom_contents))) #dprint("find_and_conv_spatialite_blob_to_wkb ret: "+csv_line) # omit these tables - import fails ## unmerge mode - delete all rows for this azm in table: "+table_name) now we use log_hash - no need to parse time # remove 3 traling 000 from microsecs str start_dt_str = str(g_unmerge_logs_row['log_start_time'])[:-3] end_dt_str = str(g_unmerge_logs_row['log_end_time'])[:-3] Now get local columns Example sqlstr: CREATE TABLE "browse" ("time" DATETIME,"time_ms" INT,"posid" INT,"seqid" INT,"netid" INT, "Browse_All_Session_Throughput_Avg" real, "Data_Browse_Throughput" real, "Data_Browse_Throughput_Avg" real, "Data_Browse_Total_Loaded_Obj" smallint, "Data_Browse_Total_Page_Obj" smallint, "Data_Browse_Page_Load_Time" real, "Data_Browse_Page_Load_Time_Avg" real, "Data_Browse_Total_Sessions" smallint, "Data_Browse_Total_Success" smallint, "Data_Browse_Total_Fail_Page" smallint, "Data_Browse_Total_Fail_Obj" smallint, "Data_Browse_Total_Timeout" smallint, "Data_Browse_Exterior_Fail_Page" smallint, "Data_Browse_Exterior_Fail_Obj" smallint, "Browse_Throughput" real, "Browse_Throughput_max" real, "Browse_Throughput_min" real, "Browse_Duration" real, "Browse_Duration_max" real, "Browse_Duration_min" real); # get part inside parenthesis #dprint("ls :" + str(ls)) # split by comma # parse column names and keep for insert commands import_geom_column_in_location_table_only feature already implemented at line_adj above if args['import_geom_column_in_location_table_only'] and col_name == "geom" and table_name != "location": omit_col = True # args['prev_create_statement_column_names'] #dprint("create sqlstr: "+sqlstr) # dont partition logs table # create target partition for this log + table # ok - partition this table #dprint("create sqlstr postgres mod: "+sqlstr) # postgis automatically creates/maintains "geometry_columns" 'view' #dprint("create sqlstr mod mssql geom: "+sqlstr) #too slow and high cpu: g_cursor.execute("select * from information_schema.tables where table_schema=%s and table_name=%s", (args["pg_schema"],table_name,)) SELECT FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = %s AND c.relname = %s AND c.relkind = 'r' # use with for auto rollback() on g_conn on expected fails like already exists # commit now otherwise COPY might not see partitions #dprint("create execute ret: "+str(ret)) if control reaches here then the create is successful - table was not existing earlier - so remote cols must be the same This table already exists - checking if all local columns already exist in remote - otherwise will add each missing cols to remote table before inserting to it. # now get local columns that are not in remote ####### quickfix: col_type override for unsigned int32 cols from sqlite (bindLong already) - conv to bigint in pg as pg doesnt have unsigned # might need to psql to do first manually if log was already imported using older azm_db_merge: # alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_source_ssrc type bigint; # alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_timestamp type bigint; ####################### # TODO: handle if different type? #dprint("n_cols_to_add == 0 - no need to alter table") # example: ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL, column_c INT NULL ; # not first # re-get remote cols # dont partition logs table ## check/create partitions for month for log_hash, prev month, after month #print "log_hash_datetime:", log_hash_datetime # simpler name because we got cases where schema's table name got truncated: activate_dedicated_eps_bearer_context_request_params_3170932708 # too slow and high cpu check_sql = "select * from information_schema.tables where table_schema='{}' and table_name='{}'".format(schema_per_month_name, ntn) SELECT FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = '{}' AND c.relname = '{}' AND c.relkind = 'r' ###### let sqlite3 dump contents of table into file #print("table_dump_fp: "+table_dump_fp) #print("table_dump_format_fp: "+table_dump_format_fp) # create dump csv of that table example dump of logs table: sqlite3 azqdata.db -list -newline "|" -separator "," ".out c:\\azq\\azq_report_gen\\azm_db_merge\\logs.csv" "select * from logs" # get col list, and hex(col) for blob coulumns #dprint("local_columns: "+str(local_columns)) #geom_col_index = i ############## wrong data format fixes ### custom limit bsic len in case matched wrongly entered bsic to long str but pg takes max 5 char len for bsic # handle invalid modem_time case: 159841018-03-10 07:24:42.191 # limit to 5 char len (6 is last index excluding) # type cast required to remove non-int in cellfile data # many _duration cols in detected_radion_voice_call_session and in pp_ tables have wrong types or even has right type but values came as "" so would be ,"" in csv which postgres and pyarrow wont allow for double/float/numeric cols - check by col_name only is faster than nullif() on all numericols - as most cases are these _duration cols only # special table handling # double backslash because it needs to go inside sqlite3 cmd parsing again # filter all tables but not the main logs table #print "select_sqlstr:", select_sqlstr # double backslash because it needs to go inside sqlite3 cmd parsing again #dprint("dump_cmd:", dump_cmd) # if parquet dump mode do only logs table dump to track already imported #(not args['dump_parquet']) or parquet_arrow_mode or table_name == "logs": #print("dump_cmd:", dump_cmd) #print "dump_cmd ret:", ret # in parquet mode we are modifying geom anyway so assume geom is spatialite format instead of wkb # wb required for windows so that \n is 0x0A - otherwise \n will be 0x0D 0x0A and doest go with our fmt file and only 1 row will be inserted per table csv in bulk inserts... this causes python test_browse_performance_timing.py to fail as its json got changed if g_is_postgre: ofl = ofl.replace(',""',',') # keep this legacy code for postgres mode code jus to be sure, although we already did nullif checks during sqlite csv dunp... no need to check this, only old stale thread versions would have these cases and will have other cases too so let it crash in all those cases if ofl.strip() == all_cols_null_line: continue find_and_conv_spatialite_blob_to_wkb, replace ,"" with , total file duration: #dprint("dump table: "+table_name+" for bulk insert ret: "+str(ret)) # if control reaches here then the table is not empty ################## read csv to arrow, set types, dump to parqet - return True, but if log_table dont return - let it enter pg too... # yes, arrow read from csv, convert to pd to mod datetime col and add lat lon is faster than pd.read_sql() and converting fields and to parquet #print "local_column_names:", local_column_names # special cases # because pyarrow is somehow not taking vals like this so use strings first: In CSV column #0: CSV conversion error to timestamp[ms]: invalid value '2018-07-24 09:59:48.218' # some old db invalid type cases # adj types for pa #print("pa_column_types:", pa_column_types) #print("local_column_names:", local_column_names) # check if has geom # change type of field in new schema to timestamp if required ##### special mods for each table # create int column 'direction' for faster queries instead of the string 'symbol' column #print "direction_sr.dtype", direction_sr.dtype # org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_8); # org.apache.spark.sql.AnalysisException: Parquet type not supported: INT32 (UINT_16); # so had to use uint32 #print "symbol_sr:", symbol_sr #print "direction_sr:", direction_sr # conv datetime fields with pandas then assign back to padf - do this before adding lat lon as index would change... # convert #print "converted_sr head:", converted_sr.head() # assign it back # print "padf.schema:\n", padf.schema # use pandas to decode geom from hex to binary, then extract lat, lon from wkb #print("ori geom_sr:", geom_sr) # spatialite_geom_sr.str.slice(start=2, stop=4) # 86 + 16 + 16 # wkb #print('wkb geom_sr.head():', geom_sr.head()) # X # Y #print('lon_sr', lon_sr.head()) #print('lat_sr', lat_sr.head()) ##### assign all three back to padf ## replace geom with newly converted to binary geom_sr # convert tolist() and add [""] then slice() back to ori len required to avoid pyarrow.lib.ArrowInvalid: Field type did not match data type - see azq_report_gen/test_spark_wkb_exception.py ## insert lat, lon # finally drop 'time_ms' legacy column used long ago in mysql where it didnt have milliseconds - not used anymore #print "padf.schema:\n", padf.schema # use snappy and use_dictionary - https://wesmckinney.com/blog/python-parquet-multithreading/ # if log_table dont return - let it enter pg too... # import logs table to pg too # create fmt format file for that table generate format file: https://msdn.microsoft.com/en-us/library/ms178129.aspx format file contents: https://msdn.microsoft.com/en-us/library/ms191479(v=sql.110).aspx # ver - 11.0 = SQL Server 2012 # n cols # dyn gen - first inc wil get it to 1 # When a delimited text file having a prefix length of 0 and a terminator is imported, the field-length value is ignored, because the storage space used by the field equals the length of the data plus the terminator # dyn gen # dyn gen # dyn gen #last #dprint("remote_column_names: "+str(remote_column_names)) #dprint("col: "+str(col)) # not 0 based # always same col name # both dump csv and format fmt files are ready # execute bulk insert sql now #dprint("START bulk insert sqlstr: "+sqlstr) # print("DONE bulk insert - nrows inserted: "+str(ret.rowcount)) ### below are functions not used by azq_db_merge #sqlstr = sqlstr.replace('`', '"') # sqlite pandas regen db uses lowercase # default empty fields to text type # sqlstr = sqlstr.replace("\" ,", "\" text,") # sqlstr = sqlstr.replace("\" );", "\" text);") #dprint("table_name: "+table_name) #dprint("check table columns sqlstr: "+sqlstr) #dprint("query execute ret: "+str(ret)) Now get remote column list for this table... # MS SQL MSSQL Column str return example: row n: {0: u'azqdemo', 1: u'dbo', 2: u'android_metadata', 3: u'locale', 4: -1, 5: u'text', u'DATA_TYPE': -1, 7: 2147483647, 8: None, 9: None, 10: 1, 11: None, 12: None, 13: -1, 14: None, 15: 2147483647, u'COLUMN_DEF': None, 17: u'YES', 18: 35, u'SCALE': None, u'TABLE_NAME': u'android_metadata', u'SQL_DATA_TYPE': -1, 6: 2147483647, u'NULLABLE': 1, u'REMARKS': None, u'CHAR_OCTET_LENGTH': 2147483647, u'COLUMN_NAME': u'locale', u'SQL_DATETIME_SUB': None, u'TABLE_OWNER': u'dbo', 16: 1, u'RADIX': None, u'SS_DATA_TYPE': 35, u'TYPE_NAME': u'text', u'PRECISION': 2147483647, u'IS_NULLABLE': u'YES', u'LENGTH': 2147483647, u'ORDINAL_POSITION': 1, u'TABLE_QUALIFIER': u'azqdemo'} Result: col_name: locale col_type: text #dprint("row n: " + rs) #dprint("col_name: "+col_name) #dprint("col_type: "+col_type) # use with for auto rollback() on g_conn on expected fails like already exists # commit now otherwise upcoming COPY commands might not see partitions | 1.976641 | 2 |
ui/pypesvds/controllers/project.py | onfire73/pypeskg | 117 | 6621038 | import logging
import traceback
import json
from pylons import request, response, session, tmpl_context as c
from pylons import app_globals
from pylons.controllers.util import abort
# added for auth
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import RemoteUser, ValidAuthKitUser, UserIn
from pypesvds.lib.base import BaseController, render
log = logging.getLogger(__name__)
class ProjectController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('project', 'project')
#@authorize(ValidAuthKitUser())
def index(self, format='html'):
"""GET /project: All items in the collection"""
# url('project')
return render('/pypesvds.mako')
def create(self):
"""POST /project: Create a new item"""
# url('project')
try:
config = request.params.getall('config')[0]
except:
traceback.print_exc()
# added because authkit seems to try posting after login
# need toinvestigate further...
return render('/pypesvds.mako')
else:
return app_globals.dfg.update(config)
def new(self, format='html'):
"""GET /project/new: Form to create a new item"""
# url('new_project')
def update(self, id):
"""PUT /project/id: Update an existing item"""
pass
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='put')
# url('project', id=ID)
def delete(self, id):
"""DELETE /project/id: Delete an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='delete')
# url('project', id=ID)
def show(self, id, format='html'):
"""GET /project/id: Show a specific item"""
# url('project', id=ID)
if id == 'current':
return json.dumps(app_globals.dfg.Config)
else:
return ''
def edit(self, id, format='html'):
"""GET /project/id/edit: Form to edit an existing item"""
# url('edit_project', id=ID)
| import logging
import traceback
import json
from pylons import request, response, session, tmpl_context as c
from pylons import app_globals
from pylons.controllers.util import abort
# added for auth
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import RemoteUser, ValidAuthKitUser, UserIn
from pypesvds.lib.base import BaseController, render
log = logging.getLogger(__name__)
class ProjectController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('project', 'project')
#@authorize(ValidAuthKitUser())
def index(self, format='html'):
"""GET /project: All items in the collection"""
# url('project')
return render('/pypesvds.mako')
def create(self):
"""POST /project: Create a new item"""
# url('project')
try:
config = request.params.getall('config')[0]
except:
traceback.print_exc()
# added because authkit seems to try posting after login
# need toinvestigate further...
return render('/pypesvds.mako')
else:
return app_globals.dfg.update(config)
def new(self, format='html'):
"""GET /project/new: Form to create a new item"""
# url('new_project')
def update(self, id):
"""PUT /project/id: Update an existing item"""
pass
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='put')
# url('project', id=ID)
def delete(self, id):
"""DELETE /project/id: Delete an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='delete')
# url('project', id=ID)
def show(self, id, format='html'):
"""GET /project/id: Show a specific item"""
# url('project', id=ID)
if id == 'current':
return json.dumps(app_globals.dfg.Config)
else:
return ''
def edit(self, id, format='html'):
"""GET /project/id/edit: Form to edit an existing item"""
# url('edit_project', id=ID)
| en | 0.56645 | # added for auth REST Controller styled on the Atom Publishing Protocol # To properly map this controller, ensure your config/routing.py # file has a resource setup: # map.resource('project', 'project') #@authorize(ValidAuthKitUser()) GET /project: All items in the collection # url('project') POST /project: Create a new item # url('project') # added because authkit seems to try posting after login # need toinvestigate further... GET /project/new: Form to create a new item # url('new_project') PUT /project/id: Update an existing item # Forms posted to this method should contain a hidden field: # <input type="hidden" name="_method" value="PUT" /> # Or using helpers: # h.form(url('project', id=ID), # method='put') # url('project', id=ID) DELETE /project/id: Delete an existing item # Forms posted to this method should contain a hidden field: # <input type="hidden" name="_method" value="DELETE" /> # Or using helpers: # h.form(url('project', id=ID), # method='delete') # url('project', id=ID) GET /project/id: Show a specific item # url('project', id=ID) GET /project/id/edit: Form to edit an existing item # url('edit_project', id=ID) | 2.224518 | 2 |
photogrid.py | stnc-python/Tasvirci | 0 | 6621039 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tasvirci by OsmanDE
# github.com/OsmanDE/Tasvirci
from photogrid_dialog import Ui_GridDialog
from gridsetup_dialog import Ui_GridSetupDialog
from PyQt5.QtCore import Qt, QObject, pyqtSignal, QRect, QPoint
from PyQt5.QtGui import QPixmap, QPainter, QImageReader, QPen
from PyQt5.QtWidgets import QApplication, QLabel, QDialog, QHBoxLayout, QSizePolicy, QFileDialog, QMessageBox
helptext = '''Resim koymak için bir resim thumbnaili seç. Seçtiğin fotoğrafı koymak için boş kutulardan birine tıkla.
Eğer gridi daha farklı resimlerden oluşturmak istiyorsan fotoğraf ekleme kısmını seç.
Yeni bir resim seçerek eski resmi kaldırabilirsin.'''
class GridDialog(QDialog, Ui_GridDialog):
def __init__(self, pixmap, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.resize(1020, 640)
layout = QHBoxLayout(self.scrollAreaWidgetContents)
layout.setContentsMargins(0, 0, 0, 0)
self.gridPaper = GridPaper(self)
layout.addWidget(self.gridPaper)
self.thumbnailGr = ThumbnailGroup(self)
thumbnail = Thumbnail(pixmap, self.frame)
self.verticalLayout.addWidget(thumbnail)
thumbnail.select(True)
thumbnail.clicked.connect(self.gridPaper.setPhoto)
self.thumbnailGr.append(thumbnail)
self.configureBtn.clicked.connect(self.configure)
self.addPhotoBtn.clicked.connect(self.addPhoto)
self.checkAddBorder.clicked.connect(self.gridPaper.toggleBorder)
self.helpBtn.clicked.connect(self.showHelp)
self.gridPaper.photo = pixmap
def configure(self):
dialog = GridSetupDialog(self)
if dialog.exec_()==1:
self.gridPaper.paperW = dialog.paperW
self.gridPaper.paperH = dialog.paperH
self.gridPaper.rows = dialog.rows
self.gridPaper.cols = dialog.cols
self.gridPaper.W = dialog.W
self.gridPaper.H = dialog.H
self.gridPaper.DPI = dialog.DPI
self.gridPaper.setupGrid()
def addPhoto(self):
filefilter = "JPEG Images (*.jpg *jpeg);;PNG Images (*.png);;All Files (*)"
filepath, sel_filter = QFileDialog.getOpenFileName(self, 'Seyahatname - Resmi Aç', '', filefilter)
if filepath == '' : return
image_reader = QImageReader(filepath)
image_reader.setAutoTransform(True)
pm = QPixmap.fromImageReader(image_reader)
if not pm.isNull() :
thumbnail = Thumbnail(pm, self.frame)
self.verticalLayout.addWidget(thumbnail)
thumbnail.clicked.connect(self.gridPaper.setPhoto)
self.thumbnailGr.append(thumbnail)
def accept(self):
# Create final grid when ok is clicked
self.gridPaper.createFinalGrid()
QDialog.accept(self)
def showHelp(self):
global helptext
QMessageBox.about(self, 'Nasıl Grid Oluştururum?', helptext)
class Thumbnail(QLabel):
clicked = pyqtSignal(QPixmap)
def __init__(self, pixmap, parent):
QLabel.__init__(self, parent)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.photo = pixmap
self.setPixmap(pixmap.scaledToWidth(100))
def mousePressEvent(self, ev):
self.clicked.emit(self.photo)
def select(self, select):
if select:
pm = self.photo.scaledToWidth(100)
painter = QPainter(pm)
pen = QPen(Qt.blue)
pen.setWidth(4)
painter.setPen(pen)
painter.drawRect(2, 2 , 100-4, pm.height()-4)
painter.end()
self.setPixmap(pm)
else:
self.setPixmap(self.photo.scaledToWidth(100))
class ThumbnailGroup(QObject):
def __init__(self, parent):
QObject.__init__(self, parent)
self.thumbnails = []
def append(self, thumbnail):
self.thumbnails.append(thumbnail)
thumbnail.clicked.connect(self.selectThumbnail)
def selectThumbnail(self):
for thumbnail in self.thumbnails:
thumbnail.select(False)
self.sender().select(True)
class GridPaper(QLabel):
def __init__(self, parent):
QLabel.__init__(self, parent)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setMouseTracking(True)
self.pixmap_dict = {}
self.add_border = True
self.DPI = 300
self.paperW, self.paperH = 1800, 1200
self.W, self.H = 413, 531
self.cols, self.rows = 4, 2
self.setupGrid()
def setupGrid(self):
self.boxes = []
self.spacingX, self.spacingY = (self.paperW-self.cols*self.W)/(self.cols+1), (self.paperH-self.rows*self.H)/(self.rows+1)
screenDPI = QApplication.desktop().logicalDpiX()
self.scale = screenDPI/self.DPI
w, h = self.W*self.scale, self.H*self.scale
spacing_x, spacing_y = self.spacingX*self.scale, self.spacingY*self.scale
for i in range(self.cols*self.rows):
row, col = i//self.cols, i%self.cols
box = QRect(spacing_x+col*(spacing_x+w), spacing_y+row*(spacing_y+h), w-1, h-1)
self.boxes.append(box)
fg = QPixmap(self.paperW*self.scale, self.paperH*self.scale)
fg.fill()
painter = QPainter(fg)
for box in self.boxes:
painter.drawRect(box)
painter.end()
self.setPixmap(fg)
def setPhoto(self, pixmap):
self.photo = pixmap
def toggleBorder(self, ok):
self.add_border = ok
grid = self.pixmap()
painter = QPainter(grid)
for index in self.pixmap_dict:
topleft = self.boxes[index].topLeft()
pm = self.pixmap_dict[index].scaled(self.W*self.scale, self.H*self.scale, 1, 1)
painter.drawPixmap(topleft, pm)
if ok: painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
self.setPixmap(grid)
def mouseMoveEvent(self, ev):
for box in self.boxes:
if box.contains(ev.pos()):
self.setCursor(Qt.PointingHandCursor)
return
self.setCursor(Qt.ArrowCursor)
def mousePressEvent(self, ev):
blank_pm = QPixmap(self.W*self.scale, self.H*self.scale)
blank_pm.fill()
for box in self.boxes:
if box.contains(ev.pos()):
topleft = box.topLeft()
pm = self.photo.scaled(self.W*self.scale, self.H*self.scale, 1, 1)
bg = self.pixmap()
painter = QPainter(bg)
painter.drawPixmap(topleft, blank_pm)
painter.drawPixmap(topleft, pm)
if self.add_border:
painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
self.setPixmap(bg)
self.pixmap_dict[self.boxes.index(box)] = self.photo
break
def createFinalGrid(self):
self.photo_grid = QPixmap(self.paperW, self.paperH)
self.photo_grid.fill()
painter = QPainter(self.photo_grid)
for index in self.pixmap_dict:
row, col = index//self.cols, index%self.cols
topleft = QPoint(self.spacingX+col*(self.spacingX+self.W), self.spacingY+row*(self.spacingY+self.H))
pm = self.pixmap_dict[index].scaled(self.W, self.H, 1, 1)
painter.drawPixmap(topleft, pm)
if self.add_border:
painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
class GridSetupDialog(QDialog, Ui_GridSetupDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
def accept(self):
units = [1, 1/2.54, 1/25.4]
DPI = self.spinDPI.value()
unit_mult = units[self.paperSizeUnit.currentIndex()]
paperW = self.spinPaperWidth.value()*unit_mult*DPI
paperH = self.spinPaperHeight.value()*unit_mult*DPI
W, H = self.spinPhotoWidth.value()*DPI/2.54, self.spinPhotoHeight.value()*DPI/2.54
rows1, cols1 = int(paperH//H), int(paperW//W)
rows2, cols2 = int(paperW//H), int(paperH//W)
if rows1*cols1 >= rows2*cols2:
self.paperW = paperW
self.paperH = paperH
self.rows = rows1
self.cols = cols1
else:
self.paperW = paperH
self.paperH = paperW
self.rows = rows2
self.cols = cols2
self.W = W
self.H = H
self.DPI = DPI
QDialog.accept(self)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tasvirci by OsmanDE
# github.com/OsmanDE/Tasvirci
from photogrid_dialog import Ui_GridDialog
from gridsetup_dialog import Ui_GridSetupDialog
from PyQt5.QtCore import Qt, QObject, pyqtSignal, QRect, QPoint
from PyQt5.QtGui import QPixmap, QPainter, QImageReader, QPen
from PyQt5.QtWidgets import QApplication, QLabel, QDialog, QHBoxLayout, QSizePolicy, QFileDialog, QMessageBox
helptext = '''Resim koymak için bir resim thumbnaili seç. Seçtiğin fotoğrafı koymak için boş kutulardan birine tıkla.
Eğer gridi daha farklı resimlerden oluşturmak istiyorsan fotoğraf ekleme kısmını seç.
Yeni bir resim seçerek eski resmi kaldırabilirsin.'''
class GridDialog(QDialog, Ui_GridDialog):
def __init__(self, pixmap, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.resize(1020, 640)
layout = QHBoxLayout(self.scrollAreaWidgetContents)
layout.setContentsMargins(0, 0, 0, 0)
self.gridPaper = GridPaper(self)
layout.addWidget(self.gridPaper)
self.thumbnailGr = ThumbnailGroup(self)
thumbnail = Thumbnail(pixmap, self.frame)
self.verticalLayout.addWidget(thumbnail)
thumbnail.select(True)
thumbnail.clicked.connect(self.gridPaper.setPhoto)
self.thumbnailGr.append(thumbnail)
self.configureBtn.clicked.connect(self.configure)
self.addPhotoBtn.clicked.connect(self.addPhoto)
self.checkAddBorder.clicked.connect(self.gridPaper.toggleBorder)
self.helpBtn.clicked.connect(self.showHelp)
self.gridPaper.photo = pixmap
def configure(self):
dialog = GridSetupDialog(self)
if dialog.exec_()==1:
self.gridPaper.paperW = dialog.paperW
self.gridPaper.paperH = dialog.paperH
self.gridPaper.rows = dialog.rows
self.gridPaper.cols = dialog.cols
self.gridPaper.W = dialog.W
self.gridPaper.H = dialog.H
self.gridPaper.DPI = dialog.DPI
self.gridPaper.setupGrid()
def addPhoto(self):
filefilter = "JPEG Images (*.jpg *jpeg);;PNG Images (*.png);;All Files (*)"
filepath, sel_filter = QFileDialog.getOpenFileName(self, 'Seyahatname - Resmi Aç', '', filefilter)
if filepath == '' : return
image_reader = QImageReader(filepath)
image_reader.setAutoTransform(True)
pm = QPixmap.fromImageReader(image_reader)
if not pm.isNull() :
thumbnail = Thumbnail(pm, self.frame)
self.verticalLayout.addWidget(thumbnail)
thumbnail.clicked.connect(self.gridPaper.setPhoto)
self.thumbnailGr.append(thumbnail)
def accept(self):
# Create final grid when ok is clicked
self.gridPaper.createFinalGrid()
QDialog.accept(self)
def showHelp(self):
global helptext
QMessageBox.about(self, 'Nasıl Grid Oluştururum?', helptext)
class Thumbnail(QLabel):
clicked = pyqtSignal(QPixmap)
def __init__(self, pixmap, parent):
QLabel.__init__(self, parent)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.photo = pixmap
self.setPixmap(pixmap.scaledToWidth(100))
def mousePressEvent(self, ev):
self.clicked.emit(self.photo)
def select(self, select):
if select:
pm = self.photo.scaledToWidth(100)
painter = QPainter(pm)
pen = QPen(Qt.blue)
pen.setWidth(4)
painter.setPen(pen)
painter.drawRect(2, 2 , 100-4, pm.height()-4)
painter.end()
self.setPixmap(pm)
else:
self.setPixmap(self.photo.scaledToWidth(100))
class ThumbnailGroup(QObject):
def __init__(self, parent):
QObject.__init__(self, parent)
self.thumbnails = []
def append(self, thumbnail):
self.thumbnails.append(thumbnail)
thumbnail.clicked.connect(self.selectThumbnail)
def selectThumbnail(self):
for thumbnail in self.thumbnails:
thumbnail.select(False)
self.sender().select(True)
class GridPaper(QLabel):
def __init__(self, parent):
QLabel.__init__(self, parent)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setMouseTracking(True)
self.pixmap_dict = {}
self.add_border = True
self.DPI = 300
self.paperW, self.paperH = 1800, 1200
self.W, self.H = 413, 531
self.cols, self.rows = 4, 2
self.setupGrid()
def setupGrid(self):
self.boxes = []
self.spacingX, self.spacingY = (self.paperW-self.cols*self.W)/(self.cols+1), (self.paperH-self.rows*self.H)/(self.rows+1)
screenDPI = QApplication.desktop().logicalDpiX()
self.scale = screenDPI/self.DPI
w, h = self.W*self.scale, self.H*self.scale
spacing_x, spacing_y = self.spacingX*self.scale, self.spacingY*self.scale
for i in range(self.cols*self.rows):
row, col = i//self.cols, i%self.cols
box = QRect(spacing_x+col*(spacing_x+w), spacing_y+row*(spacing_y+h), w-1, h-1)
self.boxes.append(box)
fg = QPixmap(self.paperW*self.scale, self.paperH*self.scale)
fg.fill()
painter = QPainter(fg)
for box in self.boxes:
painter.drawRect(box)
painter.end()
self.setPixmap(fg)
def setPhoto(self, pixmap):
self.photo = pixmap
def toggleBorder(self, ok):
self.add_border = ok
grid = self.pixmap()
painter = QPainter(grid)
for index in self.pixmap_dict:
topleft = self.boxes[index].topLeft()
pm = self.pixmap_dict[index].scaled(self.W*self.scale, self.H*self.scale, 1, 1)
painter.drawPixmap(topleft, pm)
if ok: painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
self.setPixmap(grid)
def mouseMoveEvent(self, ev):
for box in self.boxes:
if box.contains(ev.pos()):
self.setCursor(Qt.PointingHandCursor)
return
self.setCursor(Qt.ArrowCursor)
def mousePressEvent(self, ev):
blank_pm = QPixmap(self.W*self.scale, self.H*self.scale)
blank_pm.fill()
for box in self.boxes:
if box.contains(ev.pos()):
topleft = box.topLeft()
pm = self.photo.scaled(self.W*self.scale, self.H*self.scale, 1, 1)
bg = self.pixmap()
painter = QPainter(bg)
painter.drawPixmap(topleft, blank_pm)
painter.drawPixmap(topleft, pm)
if self.add_border:
painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
self.setPixmap(bg)
self.pixmap_dict[self.boxes.index(box)] = self.photo
break
def createFinalGrid(self):
self.photo_grid = QPixmap(self.paperW, self.paperH)
self.photo_grid.fill()
painter = QPainter(self.photo_grid)
for index in self.pixmap_dict:
row, col = index//self.cols, index%self.cols
topleft = QPoint(self.spacingX+col*(self.spacingX+self.W), self.spacingY+row*(self.spacingY+self.H))
pm = self.pixmap_dict[index].scaled(self.W, self.H, 1, 1)
painter.drawPixmap(topleft, pm)
if self.add_border:
painter.drawRect(topleft.x(), topleft.y(), pm.width()-1, pm.height()-1)
painter.end()
class GridSetupDialog(QDialog, Ui_GridSetupDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
def accept(self):
units = [1, 1/2.54, 1/25.4]
DPI = self.spinDPI.value()
unit_mult = units[self.paperSizeUnit.currentIndex()]
paperW = self.spinPaperWidth.value()*unit_mult*DPI
paperH = self.spinPaperHeight.value()*unit_mult*DPI
W, H = self.spinPhotoWidth.value()*DPI/2.54, self.spinPhotoHeight.value()*DPI/2.54
rows1, cols1 = int(paperH//H), int(paperW//W)
rows2, cols2 = int(paperW//H), int(paperH//W)
if rows1*cols1 >= rows2*cols2:
self.paperW = paperW
self.paperH = paperH
self.rows = rows1
self.cols = cols1
else:
self.paperW = paperH
self.paperH = paperW
self.rows = rows2
self.cols = cols2
self.W = W
self.H = H
self.DPI = DPI
QDialog.accept(self)
| tr | 0.994764 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Tasvirci by OsmanDE # github.com/OsmanDE/Tasvirci Resim koymak için bir resim thumbnaili seç. Seçtiğin fotoğrafı koymak için boş kutulardan birine tıkla. Eğer gridi daha farklı resimlerden oluşturmak istiyorsan fotoğraf ekleme kısmını seç. Yeni bir resim seçerek eski resmi kaldırabilirsin. # Create final grid when ok is clicked | 1.967031 | 2 |
nmt/data/tools/fuse_bpe_all.py | xjz92/Attach-Dictionary | 1 | 6621040 | <reponame>xjz92/Attach-Dictionary<filename>nmt/data/tools/fuse_bpe_all.py<gh_stars>1-10
def load_dictionary(ent_path,def_path):
dicti={}
for ents, defs in zip(open(ent_path),open(def_path)):
entry=ents.strip('\n')
defini=defs.strip('\n')
#print(len(defini.split()))
if(entry!=defini):
dicti[entry]=defini
sorted_items= sorted(dicti.items(), key=lambda item: len(item[1]), reverse=True)
out=dict(sorted_items)
#print(len(out))
return out
def rewrite(infile, outfile, dic):
outs=open(outfile,'w')
count=0
for line in open(infile):
templine=line.strip('\n')
for item in dic:
if item in templine:
templine=templine.replace(item, dic[item])
count+=1
outs.write(templine+'\n')
print(count,' multiple words fused')
if __name__ == "__main__":
import sys
infile=str(sys.argv[1])
outfile=str(sys.argv[2])
mult=str(sys.argv[3])
sing=str(sys.argv[4])
vocab= load_dictionary(mult, sing)
rewrite(infile, outfile, vocab)
| def load_dictionary(ent_path,def_path):
dicti={}
for ents, defs in zip(open(ent_path),open(def_path)):
entry=ents.strip('\n')
defini=defs.strip('\n')
#print(len(defini.split()))
if(entry!=defini):
dicti[entry]=defini
sorted_items= sorted(dicti.items(), key=lambda item: len(item[1]), reverse=True)
out=dict(sorted_items)
#print(len(out))
return out
def rewrite(infile, outfile, dic):
outs=open(outfile,'w')
count=0
for line in open(infile):
templine=line.strip('\n')
for item in dic:
if item in templine:
templine=templine.replace(item, dic[item])
count+=1
outs.write(templine+'\n')
print(count,' multiple words fused')
if __name__ == "__main__":
import sys
infile=str(sys.argv[1])
outfile=str(sys.argv[2])
mult=str(sys.argv[3])
sing=str(sys.argv[4])
vocab= load_dictionary(mult, sing)
rewrite(infile, outfile, vocab) | ru | 0.260892 | #print(len(defini.split())) #print(len(out)) | 2.915988 | 3 |
rec_app/src/data.py | LukasSteffensen/movielens-imdb-exploration | 30 | 6621041 | <reponame>LukasSteffensen/movielens-imdb-exploration
import pandas as pd
import datetime, time
import os
import random
import numpy as np
import scipy.sparse as sp
import json
from IPython.display import Image
import base64
from imdbpie import Imdb
import requests
DATA_DIR = "../../movielens-imdb-exploration/data"
def string2ts(string, fmt="%Y-%m-%d %H:%M:%S"):
dt = datetime.datetime.strptime(string, fmt)
t_tuple = dt.timetuple()
return int(time.mktime(t_tuple))
def slice_by_lengths(lengths, the_list):
for length in lengths:
new = []
for i in range(length):
new.append(the_list.pop(0))
yield new
def initial_data():
# MOVIES
df_movies = pd.read_csv(f"{DATA_DIR}/movies_cast_company.csv", encoding='utf8')
df_movies["cast"] = df_movies["cast"].apply(lambda x: json.loads(x))
df_movies["company"] = df_movies["company"].apply(lambda x: json.loads(x))
# TODO: just temporary, later remove
df_movies = df_movies.drop(['movie_id', 'keyword', 'cast', 'company'], axis=1)
# RATINGS
df_ratings = pd.read_csv(f"{DATA_DIR}/ratings.csv")
df_ratings.rating_timestamp = df_ratings.rating_timestamp.apply(lambda x: string2ts(x))
# USERS
df_users = pd.read_csv(f"{DATA_DIR}/users.csv")
# TODO: just temporary, later remove
#additional_rows = ["user_zipcode"]
#df_users = df_users.drop(additional_rows, axis=1)
num2occupation = dict(enumerate(df_users.user_occupation.unique()))
occupation2num = {y:x for x,y in num2occupation.items()}
num2gender = dict(enumerate(df_users.user_gender.unique()))
gender2num = {y:x for x,y in num2gender.items()}
df_users.user_occupation = df_users.user_occupation.apply(lambda x: occupation2num[x])
df_users.user_gender = df_users.user_gender.apply(lambda x: gender2num[x])
df_posters = pd.read_csv(f"{DATA_DIR}/movie_poster.csv", names=["movie_id_ml", "poster_url"])
# ALL
df = pd.merge(df_movies, df_ratings, on="movie_id_ml")
df = pd.merge(df, df_users, on="user_id")
df = pd.merge(df, df_posters, on="movie_id_ml")
# Creating UID, IID, FID
# movies
id2movie = dict(enumerate(df.movie_id_ml.unique()))
movie2id = {y:x for x,y in id2movie.items()}
# users
id2user = dict(enumerate(df.user_id.unique()))
user2id = {y:x for x,y in id2user.items()}
user_ids = list(df_users.user_id.unique())
total_users = len(user_ids)
lengths_sum = 0
lengths = []
for i in range(total_users):
length = random.randint(2, 8)
if lengths_sum+length > total_users:
length = total_users - lengths_sum
lengths_sum += length
lengths.append(length)
break
elif lengths_sum+length == total_users:
lengths_sum += length
lengths.append(length)
break
else:
lengths_sum += length
lengths.append(length)
friend_ids = [i for i in enumerate(slice_by_lengths(lengths, user_ids))]
print(f"Number of friend groupd: {len(friend_ids)}, max {max(friend_ids)[0]}")
user2friendsid = {}
for fid_and_uids in friend_ids:
for uid in fid_and_uids[1]:
user2friendsid[uid] = fid_and_uids[0]
df["iid"] = df.apply(lambda x: movie2id[x.movie_id_ml], axis=1)
df["uid"] = df.apply(lambda x: user2id[x.user_id], axis=1)
df["fid"] = df.apply(lambda x: user2friendsid[x.user_id], axis=1)
fid2avgage = dict(df.groupby("fid")["user_age"].agg(np.mean))
fid2medianrating = dict(df.groupby(["fid","iid"])["rating"].agg(np.median))
df["fid_user_avg_age"] = df.apply(lambda x: fid2avgage[x.fid], axis=1)
df["rating"] = df.apply(lambda x: fid2medianrating[(x.fid, x.iid)], axis=1)
df = df.drop(["uid", "user_gender", "user_occupation", "user_age", "user_id", "rating_timestamp"], axis=1)
df = df.drop_duplicates()
# shape [n_users, n_user_features]
df_friends = df[['fid', 'fid_user_avg_age']].drop_duplicates()
print(f"Number of friends features: {df_friends.shape[0]}")
df_movies = df[['iid', 'unknown', 'action', 'adventure', 'animation', 'childrens', 'comedy', 'crime', 'documentary', 'drama', 'fantasy', 'noir', 'horror', 'musical', 'mystery', 'romance', 'scifi', 'thriller', 'war', 'western']].drop_duplicates()
print(f"Number of movies features: {df_movies.shape[0]}")
return df, df_friends, df_movies, len(friend_ids)
def update_data(friends_id, ratings, rated_movie_ids, df, df_friends, df_movies):
df_friends = df_friends.append({"fid": friends_id, "fid_user_avg_age":0}, ignore_index=True)
print(f"New number of friends features: {df_friends.shape[0]}")
print(f"New number of movies features: {df_movies.shape[0]}")
data_new_friends_training = []
for mid, movie_real_id in enumerate(rated_movie_ids):
avg_mv_rating = np.median(np.array([user_ratings[mid] for user_ratings in ratings]))
data_new_friends_training.append([friends_id, movie_real_id, avg_mv_rating])
columns = ["fid", "iid", "rating"]
# user initial input that will be given to him to rate it before recommendation
df_new_friends_train = pd.DataFrame(data_new_friends_training, columns=columns)
df_train = df.copy()
df_train = pd.concat([df_train, df_new_friends_train], sort=False)
df_train = df_train[["fid", "iid", "rating"]].astype(np.int64)
#df_new_friends_train = df_new_friends_train[["fid", "iid", "rating"]].astype(np.int64)
return df_train, df_friends, df_movies
def onehotencoding2genre(x):
genres= ['unknown','action','adventure','animation','childrens','comedy','crime','documentary','drama','fantasy','noir','horror','musical','mystery','romance','scifi','thriller','war','western']
ret_val = []
for c in genres:
g = getattr(x, c)
if g == 1:
ret_val.append(c)
return ret_val
def get_trending_movie_ids(k, df):
df_movie_count_mean = df.groupby(["movie_id_ml", "title"], as_index=False)["rating"].agg(["count", "mean"]).reset_index()
C = df_movie_count_mean["mean"].mean()
m = df_movie_count_mean["count"].quantile(0.9)
def weighted_rating(x, m=m, C=C):
"""Calculation based on the IMDB formula"""
v = x['count']
R = x['mean']
return (v/(v+m) * R) + (m/(m+v) * C)
df_movies = pd.read_csv(f"{DATA_DIR}/movies_cast_company.csv", encoding='utf8')
df_movies["cast"] = df_movies["cast"].apply(lambda x: json.loads(x))
df_movies["company"] = df_movies["company"].apply(lambda x: json.loads(x))
df_movies["genres"] = df_movies.apply(lambda x: onehotencoding2genre(x), axis=1)
df_movies_1 = df_movie_count_mean.copy().loc[df_movie_count_mean["count"] > m]
df = pd.merge(df_movies, df_movies_1, on=["movie_id_ml", "title"])
# Define a new feature 'score' and calculate its value with `weighted_rating()`
df['score'] = df.apply(weighted_rating, axis=1)
#Sort movies based on score calculated above
df = df.sort_values('score', ascending=False).reset_index()
df = df.head(50)
df = df.sample(k)
return list(df.movie_id_ml) | import pandas as pd
import datetime, time
import os
import random
import numpy as np
import scipy.sparse as sp
import json
from IPython.display import Image
import base64
from imdbpie import Imdb
import requests
DATA_DIR = "../../movielens-imdb-exploration/data"
def string2ts(string, fmt="%Y-%m-%d %H:%M:%S"):
dt = datetime.datetime.strptime(string, fmt)
t_tuple = dt.timetuple()
return int(time.mktime(t_tuple))
def slice_by_lengths(lengths, the_list):
for length in lengths:
new = []
for i in range(length):
new.append(the_list.pop(0))
yield new
def initial_data():
# MOVIES
df_movies = pd.read_csv(f"{DATA_DIR}/movies_cast_company.csv", encoding='utf8')
df_movies["cast"] = df_movies["cast"].apply(lambda x: json.loads(x))
df_movies["company"] = df_movies["company"].apply(lambda x: json.loads(x))
# TODO: just temporary, later remove
df_movies = df_movies.drop(['movie_id', 'keyword', 'cast', 'company'], axis=1)
# RATINGS
df_ratings = pd.read_csv(f"{DATA_DIR}/ratings.csv")
df_ratings.rating_timestamp = df_ratings.rating_timestamp.apply(lambda x: string2ts(x))
# USERS
df_users = pd.read_csv(f"{DATA_DIR}/users.csv")
# TODO: just temporary, later remove
#additional_rows = ["user_zipcode"]
#df_users = df_users.drop(additional_rows, axis=1)
num2occupation = dict(enumerate(df_users.user_occupation.unique()))
occupation2num = {y:x for x,y in num2occupation.items()}
num2gender = dict(enumerate(df_users.user_gender.unique()))
gender2num = {y:x for x,y in num2gender.items()}
df_users.user_occupation = df_users.user_occupation.apply(lambda x: occupation2num[x])
df_users.user_gender = df_users.user_gender.apply(lambda x: gender2num[x])
df_posters = pd.read_csv(f"{DATA_DIR}/movie_poster.csv", names=["movie_id_ml", "poster_url"])
# ALL
df = pd.merge(df_movies, df_ratings, on="movie_id_ml")
df = pd.merge(df, df_users, on="user_id")
df = pd.merge(df, df_posters, on="movie_id_ml")
# Creating UID, IID, FID
# movies
id2movie = dict(enumerate(df.movie_id_ml.unique()))
movie2id = {y:x for x,y in id2movie.items()}
# users
id2user = dict(enumerate(df.user_id.unique()))
user2id = {y:x for x,y in id2user.items()}
user_ids = list(df_users.user_id.unique())
total_users = len(user_ids)
lengths_sum = 0
lengths = []
for i in range(total_users):
length = random.randint(2, 8)
if lengths_sum+length > total_users:
length = total_users - lengths_sum
lengths_sum += length
lengths.append(length)
break
elif lengths_sum+length == total_users:
lengths_sum += length
lengths.append(length)
break
else:
lengths_sum += length
lengths.append(length)
friend_ids = [i for i in enumerate(slice_by_lengths(lengths, user_ids))]
print(f"Number of friend groupd: {len(friend_ids)}, max {max(friend_ids)[0]}")
user2friendsid = {}
for fid_and_uids in friend_ids:
for uid in fid_and_uids[1]:
user2friendsid[uid] = fid_and_uids[0]
df["iid"] = df.apply(lambda x: movie2id[x.movie_id_ml], axis=1)
df["uid"] = df.apply(lambda x: user2id[x.user_id], axis=1)
df["fid"] = df.apply(lambda x: user2friendsid[x.user_id], axis=1)
fid2avgage = dict(df.groupby("fid")["user_age"].agg(np.mean))
fid2medianrating = dict(df.groupby(["fid","iid"])["rating"].agg(np.median))
df["fid_user_avg_age"] = df.apply(lambda x: fid2avgage[x.fid], axis=1)
df["rating"] = df.apply(lambda x: fid2medianrating[(x.fid, x.iid)], axis=1)
df = df.drop(["uid", "user_gender", "user_occupation", "user_age", "user_id", "rating_timestamp"], axis=1)
df = df.drop_duplicates()
# shape [n_users, n_user_features]
df_friends = df[['fid', 'fid_user_avg_age']].drop_duplicates()
print(f"Number of friends features: {df_friends.shape[0]}")
df_movies = df[['iid', 'unknown', 'action', 'adventure', 'animation', 'childrens', 'comedy', 'crime', 'documentary', 'drama', 'fantasy', 'noir', 'horror', 'musical', 'mystery', 'romance', 'scifi', 'thriller', 'war', 'western']].drop_duplicates()
print(f"Number of movies features: {df_movies.shape[0]}")
return df, df_friends, df_movies, len(friend_ids)
def update_data(friends_id, ratings, rated_movie_ids, df, df_friends, df_movies):
df_friends = df_friends.append({"fid": friends_id, "fid_user_avg_age":0}, ignore_index=True)
print(f"New number of friends features: {df_friends.shape[0]}")
print(f"New number of movies features: {df_movies.shape[0]}")
data_new_friends_training = []
for mid, movie_real_id in enumerate(rated_movie_ids):
avg_mv_rating = np.median(np.array([user_ratings[mid] for user_ratings in ratings]))
data_new_friends_training.append([friends_id, movie_real_id, avg_mv_rating])
columns = ["fid", "iid", "rating"]
# user initial input that will be given to him to rate it before recommendation
df_new_friends_train = pd.DataFrame(data_new_friends_training, columns=columns)
df_train = df.copy()
df_train = pd.concat([df_train, df_new_friends_train], sort=False)
df_train = df_train[["fid", "iid", "rating"]].astype(np.int64)
#df_new_friends_train = df_new_friends_train[["fid", "iid", "rating"]].astype(np.int64)
return df_train, df_friends, df_movies
def onehotencoding2genre(x):
genres= ['unknown','action','adventure','animation','childrens','comedy','crime','documentary','drama','fantasy','noir','horror','musical','mystery','romance','scifi','thriller','war','western']
ret_val = []
for c in genres:
g = getattr(x, c)
if g == 1:
ret_val.append(c)
return ret_val
def get_trending_movie_ids(k, df):
df_movie_count_mean = df.groupby(["movie_id_ml", "title"], as_index=False)["rating"].agg(["count", "mean"]).reset_index()
C = df_movie_count_mean["mean"].mean()
m = df_movie_count_mean["count"].quantile(0.9)
def weighted_rating(x, m=m, C=C):
"""Calculation based on the IMDB formula"""
v = x['count']
R = x['mean']
return (v/(v+m) * R) + (m/(m+v) * C)
df_movies = pd.read_csv(f"{DATA_DIR}/movies_cast_company.csv", encoding='utf8')
df_movies["cast"] = df_movies["cast"].apply(lambda x: json.loads(x))
df_movies["company"] = df_movies["company"].apply(lambda x: json.loads(x))
df_movies["genres"] = df_movies.apply(lambda x: onehotencoding2genre(x), axis=1)
df_movies_1 = df_movie_count_mean.copy().loc[df_movie_count_mean["count"] > m]
df = pd.merge(df_movies, df_movies_1, on=["movie_id_ml", "title"])
# Define a new feature 'score' and calculate its value with `weighted_rating()`
df['score'] = df.apply(weighted_rating, axis=1)
#Sort movies based on score calculated above
df = df.sort_values('score', ascending=False).reset_index()
df = df.head(50)
df = df.sample(k)
return list(df.movie_id_ml) | en | 0.811003 | # MOVIES # TODO: just temporary, later remove # RATINGS # USERS # TODO: just temporary, later remove #additional_rows = ["user_zipcode"] #df_users = df_users.drop(additional_rows, axis=1) # ALL # Creating UID, IID, FID # movies # users # shape [n_users, n_user_features] # user initial input that will be given to him to rate it before recommendation #df_new_friends_train = df_new_friends_train[["fid", "iid", "rating"]].astype(np.int64) Calculation based on the IMDB formula # Define a new feature 'score' and calculate its value with `weighted_rating()` #Sort movies based on score calculated above | 2.731553 | 3 |
myworkspace/translate_tutorial/cnn_test.py | haochen12/dpPython | 0 | 6621042 | <filename>myworkspace/translate_tutorial/cnn_test.py
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import datasets, models
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, train_labels = train_images / 255.0, train_labels / 255.0
x_train4D = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
x_test4D = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
model = models.Sequential()
model.add(Conv2D(filters=16,
kernel_size=(5, 5),
input_shape=(28, 28, 1),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(Conv2D(filters=32,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x=x_train4D, y=to_categorical(train_labels), batch_size=100, epochs=2)
| <filename>myworkspace/translate_tutorial/cnn_test.py
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import datasets, models
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, train_labels = train_images / 255.0, train_labels / 255.0
x_train4D = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
x_test4D = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
model = models.Sequential()
model.add(Conv2D(filters=16,
kernel_size=(5, 5),
input_shape=(28, 28, 1),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(Conv2D(filters=32,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128,
kernel_size=(5, 5),
padding="same",
activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x=x_train4D, y=to_categorical(train_labels), batch_size=100, epochs=2)
| none | 1 | 3.107594 | 3 | |
src/pynn/bin/decode_g2p.py | enesyugan/yapay-nn | 0 | 6621043 | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
import time
import argparse
import torch
from pynn.util import load_object
from pynn.decoder.s2s import beam_search
from pynn.util.text import load_dict, write_hypo
parser = argparse.ArgumentParser(description='pynn')
parser.add_argument('--model-dic', help='model dictionary', required=True)
parser.add_argument('--src-dict', help='src dictionary file', required=True)
parser.add_argument('--tgt-dict', help='tgt dictionary file', required=True)
parser.add_argument('--data-path', help='path to data file', required=True)
parser.add_argument('--batch-size', help='batch size', type=int, default=32)
parser.add_argument('--beam-size', help='beam size', type=int, default=6)
parser.add_argument('--max-len', help='max len', type=int, default=40)
parser.add_argument('--fp16', help='float 16 bits', action='store_true')
parser.add_argument('--len-norm', help='length normalization', action='store_true')
parser.add_argument('--output', help='output file', type=str, default='hypos/output.text')
if __name__ == '__main__':
args = parser.parse_args()
src_dic = {}
with open(args.src_dict, 'r') as f:
for line in f:
tokens = line.split()
src_dic[tokens[0]] = int(tokens[1])
tgt_dic = {}
with open(args.tgt_dict, 'r') as f:
for line in f:
tokens = line.split()
tgt_dic[int(tokens[1])] = tokens[0]
use_gpu = torch.cuda.is_available()
device = torch.device('cuda' if use_gpu else 'cpu')
mdic = torch.load(args.model_dic)
model = load_object(mdic['class'], mdic['module'], mdic['params'])
model = model.to(device)
model.load_state_dict(mdic['state'])
model.eval()
if args.fp16: model.half()
data = []
for line in open(args.data_path, 'r'):
word = line.split()[0]
word_norm = word[1:] if word.startswith('-') else word
word_norm = word_norm[:-1] if word_norm.endswith('-') else word_norm
seq = []
skip = False
for ch in word_norm:
if ch not in src_dic:
skip = True; break
seq.append(src_dic[ch])
if skip: continue
#seq = [src_dic[ch] for ch in word]
seq = [el+2 for el in seq] + [2]
data.append((word, seq))
bs = args.batch_size
since = time.time()
fout = open(args.output, 'w')
with torch.no_grad():
start = 0
while True:
batch = data[start: start+bs]
if len(batch) == 0: break
words, seqs = zip(*batch)
max_len = max(len(inst) for inst in seqs)
src = [inst + [0] * (max_len - len(inst)) for inst in seqs]
src = torch.LongTensor(src)
mask = src.gt(0)
src, mask = src.to(device), mask.to(device)
hypos = beam_search(model, src, mask, device, args.beam_size,
args.max_len, len_norm=args.len_norm)[0]
for word, hypo in zip(words, hypos):
j = 0
while j < len(hypo) and hypo[j] != 2: j += 1
hypo = [tgt_dic[tk-2] for tk in hypo[:j]]
if len(hypo) == 0: continue
fout.write(word + ' ' + ' '.join(hypo) + '\n')
start += bs
fout.close()
time_elapsed = time.time() - since
print(" Elapsed Time: %.0fm %.0fs" % (time_elapsed // 60, time_elapsed % 60))
| #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
import time
import argparse
import torch
from pynn.util import load_object
from pynn.decoder.s2s import beam_search
from pynn.util.text import load_dict, write_hypo
parser = argparse.ArgumentParser(description='pynn')
parser.add_argument('--model-dic', help='model dictionary', required=True)
parser.add_argument('--src-dict', help='src dictionary file', required=True)
parser.add_argument('--tgt-dict', help='tgt dictionary file', required=True)
parser.add_argument('--data-path', help='path to data file', required=True)
parser.add_argument('--batch-size', help='batch size', type=int, default=32)
parser.add_argument('--beam-size', help='beam size', type=int, default=6)
parser.add_argument('--max-len', help='max len', type=int, default=40)
parser.add_argument('--fp16', help='float 16 bits', action='store_true')
parser.add_argument('--len-norm', help='length normalization', action='store_true')
parser.add_argument('--output', help='output file', type=str, default='hypos/output.text')
if __name__ == '__main__':
args = parser.parse_args()
src_dic = {}
with open(args.src_dict, 'r') as f:
for line in f:
tokens = line.split()
src_dic[tokens[0]] = int(tokens[1])
tgt_dic = {}
with open(args.tgt_dict, 'r') as f:
for line in f:
tokens = line.split()
tgt_dic[int(tokens[1])] = tokens[0]
use_gpu = torch.cuda.is_available()
device = torch.device('cuda' if use_gpu else 'cpu')
mdic = torch.load(args.model_dic)
model = load_object(mdic['class'], mdic['module'], mdic['params'])
model = model.to(device)
model.load_state_dict(mdic['state'])
model.eval()
if args.fp16: model.half()
data = []
for line in open(args.data_path, 'r'):
word = line.split()[0]
word_norm = word[1:] if word.startswith('-') else word
word_norm = word_norm[:-1] if word_norm.endswith('-') else word_norm
seq = []
skip = False
for ch in word_norm:
if ch not in src_dic:
skip = True; break
seq.append(src_dic[ch])
if skip: continue
#seq = [src_dic[ch] for ch in word]
seq = [el+2 for el in seq] + [2]
data.append((word, seq))
bs = args.batch_size
since = time.time()
fout = open(args.output, 'w')
with torch.no_grad():
start = 0
while True:
batch = data[start: start+bs]
if len(batch) == 0: break
words, seqs = zip(*batch)
max_len = max(len(inst) for inst in seqs)
src = [inst + [0] * (max_len - len(inst)) for inst in seqs]
src = torch.LongTensor(src)
mask = src.gt(0)
src, mask = src.to(device), mask.to(device)
hypos = beam_search(model, src, mask, device, args.beam_size,
args.max_len, len_norm=args.len_norm)[0]
for word, hypo in zip(words, hypos):
j = 0
while j < len(hypo) and hypo[j] != 2: j += 1
hypo = [tgt_dic[tk-2] for tk in hypo[:j]]
if len(hypo) == 0: continue
fout.write(word + ' ' + ' '.join(hypo) + '\n')
start += bs
fout.close()
time_elapsed = time.time() - since
print(" Elapsed Time: %.0fm %.0fs" % (time_elapsed // 60, time_elapsed % 60))
| en | 0.712648 | #!/usr/bin/env python3 # encoding: utf-8 # Copyright 2019 <NAME> # Licensed under the Apache License, Version 2.0 (the "License") #seq = [src_dic[ch] for ch in word] | 2.157879 | 2 |
utils/util.py | mchirico/gmail | 1 | 6621044 | <reponame>mchirico/gmail
import pickle
from os import path
def pickle_it(file, obj):
with open(file, 'wb') as f:
pickle.dump(obj, f)
def unpickle_it(file):
with open(file, 'rb') as f:
return pickle.load(f)
def findFile(file="bigquery.json"):
if path.exists("credentials/{}".format(file)):
return "credentials/{}".format(file)
if path.exists("../credentials/{}".format(file)):
return "../credentials/{}".format(file)
if path.exists("/credentials/{}".format(file)):
return "/credentials/{}".format(file)
if path.exists("etc/strava/credentials/{}".format(file)):
return "etc/strava/credentials/{}".format(file)
| import pickle
from os import path
def pickle_it(file, obj):
with open(file, 'wb') as f:
pickle.dump(obj, f)
def unpickle_it(file):
with open(file, 'rb') as f:
return pickle.load(f)
def findFile(file="bigquery.json"):
if path.exists("credentials/{}".format(file)):
return "credentials/{}".format(file)
if path.exists("../credentials/{}".format(file)):
return "../credentials/{}".format(file)
if path.exists("/credentials/{}".format(file)):
return "/credentials/{}".format(file)
if path.exists("etc/strava/credentials/{}".format(file)):
return "etc/strava/credentials/{}".format(file) | none | 1 | 2.883862 | 3 | |
regionalSwordFernDieOff/generateDirectories.py | paul-shannon/annotatedMap | 0 | 6621045 | import json
import os
import yaml
sites = json.load(open('v4.js'))
for i in range(len(sites)):
directoryName = "site.%04d" % i
if(not os.path.exists(directoryName)):
os.mkdir(directoryName)
print(" %s exists? %s" % (directoryName, os.path.exists(directoryName)))
site = sites[i]
site["firstReported"] = "2000-01-01"
site["lastUpdate"] = "2000-01-01"
site["contact"] = ""
site["radius"] = 100
site["area"] = 10
site["severity"] = 0
site["notesFile"] = "notes.html"
siteFileName = os.path.join(directoryName, "site.yaml")
print("siteFileName: '%s'" % siteFileName)
siteFile = open(siteFileName, "w")
yaml.dump(site, siteFile, default_flow_style=False)
notesFileName = os.path.join(directoryName, site["notesFile"])
notesFile = open(notesFileName, "w")
notesFile.write(site["text"])
notesFile.close()
# site.yaml
# firstReported: 2013-10-01
# lastUpdate: 2019-03-01
# contact: <NAME>
# radius: 100
# area: 10
# severity: 10
# notesFile: notes.html
# title: Seward Park Ground Zero
# lat: 47.555474
# lon: -122.248945
# firstReported: 2013-10-01
# lastUpdate: 2019-03-01
# contact: <NAME>
# radius: 100
# area: 10
# severity: 10
# notesFile: notes.html
# photoTabs:
# - title: "Before and After: 2011 2017"
# url: https://4.bp.blogspot.com/-xpU4_m8AXW0/WhjplTnA2hI/AAAAAAAAX64/n_GNVcM0E1E8xE3EgrJAvjgi5b37nsO6wCLcBGAs/s1600/Screen%2BShot%2B2017-11-24%2Bat%2B7.54.16%2BPM.png
# - title: <NAME>
# url: "https://2.bp.blogspot.com/-0u1aT9PEBcI/WqtEkMgaQOI/AAAAAAAAX_0/ailkJiE1w6oCvsCXbcipTvhho6rDiGNWACLcBGAs/s1600/IMG_2057.JPG"
# videoTabs:
# - title: KING 5 News 2017
# url: https://www.youtube.com/embed/qtifUa6LTn4
| import json
import os
import yaml
sites = json.load(open('v4.js'))
for i in range(len(sites)):
directoryName = "site.%04d" % i
if(not os.path.exists(directoryName)):
os.mkdir(directoryName)
print(" %s exists? %s" % (directoryName, os.path.exists(directoryName)))
site = sites[i]
site["firstReported"] = "2000-01-01"
site["lastUpdate"] = "2000-01-01"
site["contact"] = ""
site["radius"] = 100
site["area"] = 10
site["severity"] = 0
site["notesFile"] = "notes.html"
siteFileName = os.path.join(directoryName, "site.yaml")
print("siteFileName: '%s'" % siteFileName)
siteFile = open(siteFileName, "w")
yaml.dump(site, siteFile, default_flow_style=False)
notesFileName = os.path.join(directoryName, site["notesFile"])
notesFile = open(notesFileName, "w")
notesFile.write(site["text"])
notesFile.close()
# site.yaml
# firstReported: 2013-10-01
# lastUpdate: 2019-03-01
# contact: <NAME>
# radius: 100
# area: 10
# severity: 10
# notesFile: notes.html
# title: Seward Park Ground Zero
# lat: 47.555474
# lon: -122.248945
# firstReported: 2013-10-01
# lastUpdate: 2019-03-01
# contact: <NAME>
# radius: 100
# area: 10
# severity: 10
# notesFile: notes.html
# photoTabs:
# - title: "Before and After: 2011 2017"
# url: https://4.bp.blogspot.com/-xpU4_m8AXW0/WhjplTnA2hI/AAAAAAAAX64/n_GNVcM0E1E8xE3EgrJAvjgi5b37nsO6wCLcBGAs/s1600/Screen%2BShot%2B2017-11-24%2Bat%2B7.54.16%2BPM.png
# - title: <NAME>
# url: "https://2.bp.blogspot.com/-0u1aT9PEBcI/WqtEkMgaQOI/AAAAAAAAX_0/ailkJiE1w6oCvsCXbcipTvhho6rDiGNWACLcBGAs/s1600/IMG_2057.JPG"
# videoTabs:
# - title: KING 5 News 2017
# url: https://www.youtube.com/embed/qtifUa6LTn4
| en | 0.484802 | # site.yaml # firstReported: 2013-10-01 # lastUpdate: 2019-03-01 # contact: <NAME> # radius: 100 # area: 10 # severity: 10 # notesFile: notes.html # title: Seward Park Ground Zero # lat: 47.555474 # lon: -122.248945 # firstReported: 2013-10-01 # lastUpdate: 2019-03-01 # contact: <NAME> # radius: 100 # area: 10 # severity: 10 # notesFile: notes.html # photoTabs: # - title: "Before and After: 2011 2017" # url: https://4.bp.blogspot.com/-xpU4_m8AXW0/WhjplTnA2hI/AAAAAAAAX64/n_GNVcM0E1E8xE3EgrJAvjgi5b37nsO6wCLcBGAs/s1600/Screen%2BShot%2B2017-11-24%2Bat%2B7.54.16%2BPM.png # - title: <NAME> # url: "https://2.bp.blogspot.com/-0u1aT9PEBcI/WqtEkMgaQOI/AAAAAAAAX_0/ailkJiE1w6oCvsCXbcipTvhho6rDiGNWACLcBGAs/s1600/IMG_2057.JPG" # videoTabs: # - title: KING 5 News 2017 # url: https://www.youtube.com/embed/qtifUa6LTn4 | 2.475645 | 2 |
japanese2phoneme/normalize.py | iory/japanese2phoneme | 0 | 6621046 | def normalize_japanese_text(line: str) -> str:
for char in [u"(", u")", u" ", u".", u"?", u"「", u"」",
u"[", u"]", u"@W", u"@S", u"<", u">", u" ", u"。"]:
line = line.replace(char, "")
for char in [u"・", u"·"]:
line = line.replace(char, " ")
line = line.strip()
return line
| def normalize_japanese_text(line: str) -> str:
for char in [u"(", u")", u" ", u".", u"?", u"「", u"」",
u"[", u"]", u"@W", u"@S", u"<", u">", u" ", u"。"]:
line = line.replace(char, "")
for char in [u"・", u"·"]:
line = line.replace(char, " ")
line = line.strip()
return line
| none | 1 | 3.402674 | 3 | |
rel8/app.py | aucontraire/rel8 | 0 | 6621047 | #!/usr/bin/env python3
"""rel8 Flask app"""
import binascii
import csv
import datetime
from dateutil import relativedelta
from flask import abort, flash, Flask, jsonify, render_template
from flask import redirect, request, session, stream_with_context, url_for
from flask_bcrypt import Bcrypt
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
from io import StringIO
import models
from models.interval import Interval
from models.outcome import Outcome
from models.predictor import Predictor
from models.response import Response
from models.session import Session
from models.user import User
import os
import phonenumbers
import pytz
from pytz import timezone
from rel8.forms import RegistrationForm, PasswordForm, LoginForm, VariablesForm
from rel8.utils import get_local_dt
from twilio.twiml.messaging_response import MessagingResponse
from werkzeug.datastructures import Headers
from werkzeug import wrappers
TWILIO_ACCOUNT_SID = os.getenv('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.getenv('TWILIO_AUTH_TOKEN')
app = Flask(__name__)
app.url_map.strict_slashes = False
app.secret_key = os.getenv('SECRET_KEY')
bcrypt = Bcrypt(app)
SITE_URL = os.getenv('SITE_URL')
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return models.storage.get(User, user_id)
def get_session():
counter = session.get('counter', 0)
counter += 1
session['counter'] = counter
consent = session.get('consent', False)
name_req = session.get('name_req', False)
return session, counter, consent, name_req
def standardize_phone(phone_number):
phone_number = phonenumbers.parse(phone_number, "US")
phone_number_formatted = phonenumbers.format_number(
phone_number, phonenumbers.PhoneNumberFormat.E164)
return phone_number_formatted
def find_user_by_phone(phone_number):
phone_number_formatted = standardize_phone(phone_number)
users = models.storage.all(User)
for user in users.values():
if user.phone_number == phone_number_formatted:
return user
return None
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
form = RegistrationForm()
if form.validate_on_submit():
user = find_user_by_phone(form.phone_number.data)
if user:
if user.access_code == form.access_code.data:
user.password = <PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user.timezone = form.timezone.data
user.save()
return redirect(url_for('login'))
else:
error = 'Check access code or follow link in text'
else:
error = 'Account does not exist'
return render_template('register.html', form=form, error=error)
@app.route('/')
def index():
if current_user:
return redirect('dashboard')
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm()
if form.validate_on_submit():
user = find_user_by_phone(form.phone_number.data)
if user:
if not user.password:
error = 'Set up a password first'
form = RegistrationForm()
return render_template('register.html', form=form, error=error)
elif bcrypt.check_password_hash(user.password, form.password.data):
login_user(user)
session['user-id'] = user.id #TODO: keep?
return redirect(url_for('dashboard'))
else:
error = 'Check your password'
else:
error = 'Account does not exist'
return render_template('login.html', form=form, error=error)
@app.route('/logout')
@login_required
def logout():
session.pop('user-id')
logout_user()
return redirect(url_for('index'))
@app.route('/dashboard', methods=['GET'])
@login_required
def dashboard():
error = None
responses = []
current_user.sessions.sort(key=lambda session: session.updated_at, reverse=False)
for session in current_user.sessions:
session.responses.sort(key=lambda response: response.updated_at, reverse=False)
if len(session.responses) == 1:
responses.append((session.responses[0], ))
elif len(session.responses) == 2:
diff = relativedelta.relativedelta(session.responses[1].updated_at, session.responses[0].updated_at)
responses.append((session.responses[0], session.responses[1], diff.minutes))
return render_template('dashboard.html', error=error, user=current_user, responses=responses)
@app.route('/csv')
def csv_download():
now = datetime.datetime.now()
filename = "{}.csv".format(get_local_dt(now, human=True, format='%Y-%m-%d_%H.%M.%S'))
def generate():
data = StringIO()
writer = csv.writer(data)
writer.writerow(('predictor dt', 'predictor', 'outcome dt', 'outcome', 'difference (min)'))
yield data.getvalue()
data.seek(0)
data.truncate(0)
current_user.sessions.sort(key=lambda session: session.updated_at, reverse=False)
for session in current_user.sessions:
session.responses.sort(key=lambda response: response.updated_at, reverse=False)
if len(session.responses) == 1:
writer.writerow(
(
get_local_dt(session.responses[0].updated_at),
session.responses[0].message,
'',
'',
''
)
)
elif len(session.responses) == 2:
diff = relativedelta.relativedelta(session.responses[1].updated_at, session.responses[0].updated_at)
writer.writerow(
(
get_local_dt(session.responses[0].updated_at),
session.responses[0].message,
get_local_dt(session.responses[1].updated_at),
session.responses[1].message,
diff.minutes
)
)
yield data.getvalue()
data.seek(0)
data.truncate(0)
headers = Headers()
headers.set('Content-Disposition', 'attachment', filename=filename)
return wrappers.Response(
stream_with_context(generate()),
mimetype='text/csv', headers=headers
)
@app.route('/password', methods=['GET', 'POST'])
@login_required
def password(user=None):
form = PasswordForm()
if form.validate_on_submit():
current_user.password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
current_user.save()
flash('Updated password')
return render_template('password.html', form=form)
@app.route('/variables', methods=['GET', 'POST'])
@login_required
def variables():
error = None
predictor = ''
outcome = ''
duration = ''
if current_user.predictor:
predictor = current_user.predictor.name
outcome = current_user.outcome.name
duration = current_user.interval.duration
data = {
'predictor': predictor,
'outcome': outcome,
'duration': duration
}
form = VariablesForm(data=data)
if form.validate_on_submit():
if current_user.predictor:
current_user.predictor.name = form.predictor.data.strip().lower()
current_user.outcome.name = form.outcome.data.strip().lower()
current_user.interval.duration = form.duration.data
flash('Variables updated')
else:
predictor = Predictor(
name=form.predictor.data.strip().lower(),
user_id=current_user.id
)
outcome = Outcome(
name=form.outcome.data.strip().lower(),
user_id=current_user.id
)
interval = Interval(
duration=form.duration.data,
user_id=current_user.id
)
models.storage.new(predictor)
models.storage.new(outcome)
models.storage.new(interval)
flash('Variables added')
models.storage.save()
return render_template('variables.html', form=form, error=error)
def session_expired(created_at, interval):
delta = datetime.timedelta(hours=interval)
now = datetime.datetime.utcnow()
return now > created_at + delta
def new_session(user, message, response):
sms_session = Session(
user_id=user.id,
interval_id=user.interval.id
)
models.storage.new(sms_session)
models.storage.save()
if message.strip().lower() == user.predictor.name:
sms_response = Response(
session_id=sms_session.id,
predictor_id=user.predictor.id,
user_id=user.id,
message=message,
twilio_json="{}"
)
models.storage.new(sms_response)
models.storage.save()
else:
response.message('This should be the predictor.')
@app.route('/sms', methods=['POST'])
def sms():
session, counter, consent, name_req = get_session()
response = MessagingResponse()
phone_number = request.form['From']
message = request.form['Body']
user = find_user_by_phone(phone_number)
if user:
if not user.predictor and not user.outcome:
response.message(
"Hi {}. You need to set up your variables first: {}".format(
user.username, SITE_URL
)
)
elif message.strip().lower() != user.predictor.name and message.strip().lower() != user.outcome.name:
response.message('That does not match your variables. Try again.')
else:
user.sessions.sort(key=lambda sess: sess.updated_at, reverse=True)
if message.strip().lower() == user.predictor.name:
if len(user.sessions) == 0 or user.sessions[0].complete is True:
new_session(user, message, response)
elif user.sessions[0].complete is False:
if session_expired(user.sessions[0].created_at, user.sessions[0].interval.duration):
user.sessions[0].complete = True
new_session(user, message, response)
else:
response.message('We were expecting outcome: {}'.format(user.outcome.name))
elif message.strip().lower() == user.outcome.name:
if len(user.sessions) == 0 or user.sessions[0].complete is True:
response.message('We were expecting predictor: {}'.format(user.predictor.name))
elif user.sessions[0].complete is False:
if session_expired(user.sessions[0].created_at, user.sessions[0].interval.duration):
user.sessions[0].complete = True
response.message('We were expecting predictor: {}'.format(user.predictor.name))
else:
sms_response = Response(
session_id=user.sessions[0].id,
outcome_id=user.outcome.id,
user_id=user.id,
message=message,
twilio_json="{}"
)
models.storage.new(sms_response)
user.sessions[0].complete = True
models.storage.save()
elif consent is True and name_req is True:
access_code = binascii.hexlify(os.urandom(8)).decode()
session['access-code'] = access_code
user = User()
user.username = message.strip()
user.phone_number = phone_number
user.access_code = access_code
models.storage.new(user)
models.storage.save()
session['user-id'] = user.id
response.message(
"Welcome {}! Please go to: {}/register/?access-code={}".format(
user.username, SITE_URL, access_code
)
)
elif consent is True and name_req is False:
session['name_req'] = True
if message.strip().lower() == 'yes':
session['consent'] = True
response.message("What's your name?")
elif message.strip().lower() == 'no':
response.message("Sorry to hear that. Bye.")
else:
response.message("Would you like to enroll in rel8? [Yes, No]")
session['consent'] = True
return str(response)
@app.errorhandler(403)
def forbidden(error):
return render_template('403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('500.html'), 500
if __name__ == '__main__':
app.run(
host=os.getenv('REL8_HOST', default='0.0.0.0'),
port=int(os.getenv('REL8_PORT', default=5000))
)
| #!/usr/bin/env python3
"""rel8 Flask app"""
import binascii
import csv
import datetime
from dateutil import relativedelta
from flask import abort, flash, Flask, jsonify, render_template
from flask import redirect, request, session, stream_with_context, url_for
from flask_bcrypt import Bcrypt
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
from io import StringIO
import models
from models.interval import Interval
from models.outcome import Outcome
from models.predictor import Predictor
from models.response import Response
from models.session import Session
from models.user import User
import os
import phonenumbers
import pytz
from pytz import timezone
from rel8.forms import RegistrationForm, PasswordForm, LoginForm, VariablesForm
from rel8.utils import get_local_dt
from twilio.twiml.messaging_response import MessagingResponse
from werkzeug.datastructures import Headers
from werkzeug import wrappers
TWILIO_ACCOUNT_SID = os.getenv('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.getenv('TWILIO_AUTH_TOKEN')
app = Flask(__name__)
app.url_map.strict_slashes = False
app.secret_key = os.getenv('SECRET_KEY')
bcrypt = Bcrypt(app)
SITE_URL = os.getenv('SITE_URL')
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return models.storage.get(User, user_id)
def get_session():
counter = session.get('counter', 0)
counter += 1
session['counter'] = counter
consent = session.get('consent', False)
name_req = session.get('name_req', False)
return session, counter, consent, name_req
def standardize_phone(phone_number):
phone_number = phonenumbers.parse(phone_number, "US")
phone_number_formatted = phonenumbers.format_number(
phone_number, phonenumbers.PhoneNumberFormat.E164)
return phone_number_formatted
def find_user_by_phone(phone_number):
phone_number_formatted = standardize_phone(phone_number)
users = models.storage.all(User)
for user in users.values():
if user.phone_number == phone_number_formatted:
return user
return None
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
form = RegistrationForm()
if form.validate_on_submit():
user = find_user_by_phone(form.phone_number.data)
if user:
if user.access_code == form.access_code.data:
user.password = <PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user.timezone = form.timezone.data
user.save()
return redirect(url_for('login'))
else:
error = 'Check access code or follow link in text'
else:
error = 'Account does not exist'
return render_template('register.html', form=form, error=error)
@app.route('/')
def index():
if current_user:
return redirect('dashboard')
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm()
if form.validate_on_submit():
user = find_user_by_phone(form.phone_number.data)
if user:
if not user.password:
error = 'Set up a password first'
form = RegistrationForm()
return render_template('register.html', form=form, error=error)
elif bcrypt.check_password_hash(user.password, form.password.data):
login_user(user)
session['user-id'] = user.id #TODO: keep?
return redirect(url_for('dashboard'))
else:
error = 'Check your password'
else:
error = 'Account does not exist'
return render_template('login.html', form=form, error=error)
@app.route('/logout')
@login_required
def logout():
session.pop('user-id')
logout_user()
return redirect(url_for('index'))
@app.route('/dashboard', methods=['GET'])
@login_required
def dashboard():
error = None
responses = []
current_user.sessions.sort(key=lambda session: session.updated_at, reverse=False)
for session in current_user.sessions:
session.responses.sort(key=lambda response: response.updated_at, reverse=False)
if len(session.responses) == 1:
responses.append((session.responses[0], ))
elif len(session.responses) == 2:
diff = relativedelta.relativedelta(session.responses[1].updated_at, session.responses[0].updated_at)
responses.append((session.responses[0], session.responses[1], diff.minutes))
return render_template('dashboard.html', error=error, user=current_user, responses=responses)
@app.route('/csv')
def csv_download():
now = datetime.datetime.now()
filename = "{}.csv".format(get_local_dt(now, human=True, format='%Y-%m-%d_%H.%M.%S'))
def generate():
data = StringIO()
writer = csv.writer(data)
writer.writerow(('predictor dt', 'predictor', 'outcome dt', 'outcome', 'difference (min)'))
yield data.getvalue()
data.seek(0)
data.truncate(0)
current_user.sessions.sort(key=lambda session: session.updated_at, reverse=False)
for session in current_user.sessions:
session.responses.sort(key=lambda response: response.updated_at, reverse=False)
if len(session.responses) == 1:
writer.writerow(
(
get_local_dt(session.responses[0].updated_at),
session.responses[0].message,
'',
'',
''
)
)
elif len(session.responses) == 2:
diff = relativedelta.relativedelta(session.responses[1].updated_at, session.responses[0].updated_at)
writer.writerow(
(
get_local_dt(session.responses[0].updated_at),
session.responses[0].message,
get_local_dt(session.responses[1].updated_at),
session.responses[1].message,
diff.minutes
)
)
yield data.getvalue()
data.seek(0)
data.truncate(0)
headers = Headers()
headers.set('Content-Disposition', 'attachment', filename=filename)
return wrappers.Response(
stream_with_context(generate()),
mimetype='text/csv', headers=headers
)
@app.route('/password', methods=['GET', 'POST'])
@login_required
def password(user=None):
form = PasswordForm()
if form.validate_on_submit():
current_user.password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
current_user.save()
flash('Updated password')
return render_template('password.html', form=form)
@app.route('/variables', methods=['GET', 'POST'])
@login_required
def variables():
error = None
predictor = ''
outcome = ''
duration = ''
if current_user.predictor:
predictor = current_user.predictor.name
outcome = current_user.outcome.name
duration = current_user.interval.duration
data = {
'predictor': predictor,
'outcome': outcome,
'duration': duration
}
form = VariablesForm(data=data)
if form.validate_on_submit():
if current_user.predictor:
current_user.predictor.name = form.predictor.data.strip().lower()
current_user.outcome.name = form.outcome.data.strip().lower()
current_user.interval.duration = form.duration.data
flash('Variables updated')
else:
predictor = Predictor(
name=form.predictor.data.strip().lower(),
user_id=current_user.id
)
outcome = Outcome(
name=form.outcome.data.strip().lower(),
user_id=current_user.id
)
interval = Interval(
duration=form.duration.data,
user_id=current_user.id
)
models.storage.new(predictor)
models.storage.new(outcome)
models.storage.new(interval)
flash('Variables added')
models.storage.save()
return render_template('variables.html', form=form, error=error)
def session_expired(created_at, interval):
delta = datetime.timedelta(hours=interval)
now = datetime.datetime.utcnow()
return now > created_at + delta
def new_session(user, message, response):
sms_session = Session(
user_id=user.id,
interval_id=user.interval.id
)
models.storage.new(sms_session)
models.storage.save()
if message.strip().lower() == user.predictor.name:
sms_response = Response(
session_id=sms_session.id,
predictor_id=user.predictor.id,
user_id=user.id,
message=message,
twilio_json="{}"
)
models.storage.new(sms_response)
models.storage.save()
else:
response.message('This should be the predictor.')
@app.route('/sms', methods=['POST'])
def sms():
session, counter, consent, name_req = get_session()
response = MessagingResponse()
phone_number = request.form['From']
message = request.form['Body']
user = find_user_by_phone(phone_number)
if user:
if not user.predictor and not user.outcome:
response.message(
"Hi {}. You need to set up your variables first: {}".format(
user.username, SITE_URL
)
)
elif message.strip().lower() != user.predictor.name and message.strip().lower() != user.outcome.name:
response.message('That does not match your variables. Try again.')
else:
user.sessions.sort(key=lambda sess: sess.updated_at, reverse=True)
if message.strip().lower() == user.predictor.name:
if len(user.sessions) == 0 or user.sessions[0].complete is True:
new_session(user, message, response)
elif user.sessions[0].complete is False:
if session_expired(user.sessions[0].created_at, user.sessions[0].interval.duration):
user.sessions[0].complete = True
new_session(user, message, response)
else:
response.message('We were expecting outcome: {}'.format(user.outcome.name))
elif message.strip().lower() == user.outcome.name:
if len(user.sessions) == 0 or user.sessions[0].complete is True:
response.message('We were expecting predictor: {}'.format(user.predictor.name))
elif user.sessions[0].complete is False:
if session_expired(user.sessions[0].created_at, user.sessions[0].interval.duration):
user.sessions[0].complete = True
response.message('We were expecting predictor: {}'.format(user.predictor.name))
else:
sms_response = Response(
session_id=user.sessions[0].id,
outcome_id=user.outcome.id,
user_id=user.id,
message=message,
twilio_json="{}"
)
models.storage.new(sms_response)
user.sessions[0].complete = True
models.storage.save()
elif consent is True and name_req is True:
access_code = binascii.hexlify(os.urandom(8)).decode()
session['access-code'] = access_code
user = User()
user.username = message.strip()
user.phone_number = phone_number
user.access_code = access_code
models.storage.new(user)
models.storage.save()
session['user-id'] = user.id
response.message(
"Welcome {}! Please go to: {}/register/?access-code={}".format(
user.username, SITE_URL, access_code
)
)
elif consent is True and name_req is False:
session['name_req'] = True
if message.strip().lower() == 'yes':
session['consent'] = True
response.message("What's your name?")
elif message.strip().lower() == 'no':
response.message("Sorry to hear that. Bye.")
else:
response.message("Would you like to enroll in rel8? [Yes, No]")
session['consent'] = True
return str(response)
@app.errorhandler(403)
def forbidden(error):
return render_template('403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('500.html'), 500
if __name__ == '__main__':
app.run(
host=os.getenv('REL8_HOST', default='0.0.0.0'),
port=int(os.getenv('REL8_PORT', default=5000))
)
| en | 0.315342 | #!/usr/bin/env python3 rel8 Flask app #TODO: keep? | 1.97264 | 2 |
ape_starknet/__init__.py | ApeWorX/ape-starknet | 0 | 6621048 | from ape import plugins
from ape.api.networks import LOCAL_NETWORK_NAME, NetworkAPI, create_network_type
from ape.types import AddressType
from ape_starknet._utils import NETWORKS, PLUGIN_NAME
from ape_starknet.accounts import StarknetAccountContracts, StarknetKeyfileAccount
from ape_starknet.config import StarknetConfig
from ape_starknet.conversion import StarknetAddressConverter
from ape_starknet.ecosystems import Starknet
from ape_starknet.provider import StarknetProvider
from ape_starknet.tokens import TokenManager
tokens = TokenManager()
@plugins.register(plugins.ConversionPlugin)
def converters():
yield AddressType, StarknetAddressConverter
@plugins.register(plugins.Config)
def config_class():
return StarknetConfig
@plugins.register(plugins.EcosystemPlugin)
def ecosystems():
yield Starknet
@plugins.register(plugins.NetworkPlugin)
def networks():
for network_name, network_params in NETWORKS.items():
yield PLUGIN_NAME, network_name, create_network_type(*network_params)
# NOTE: This works for development providers, as they get chain_id from themselves
yield PLUGIN_NAME, LOCAL_NETWORK_NAME, NetworkAPI
@plugins.register(plugins.ProviderPlugin)
def providers():
network_names = [LOCAL_NETWORK_NAME] + [k for k in NETWORKS.keys()]
for network_name in network_names:
yield PLUGIN_NAME, network_name, StarknetProvider
@plugins.register(plugins.AccountPlugin)
def account_types():
return StarknetAccountContracts, StarknetKeyfileAccount
| from ape import plugins
from ape.api.networks import LOCAL_NETWORK_NAME, NetworkAPI, create_network_type
from ape.types import AddressType
from ape_starknet._utils import NETWORKS, PLUGIN_NAME
from ape_starknet.accounts import StarknetAccountContracts, StarknetKeyfileAccount
from ape_starknet.config import StarknetConfig
from ape_starknet.conversion import StarknetAddressConverter
from ape_starknet.ecosystems import Starknet
from ape_starknet.provider import StarknetProvider
from ape_starknet.tokens import TokenManager
tokens = TokenManager()
@plugins.register(plugins.ConversionPlugin)
def converters():
yield AddressType, StarknetAddressConverter
@plugins.register(plugins.Config)
def config_class():
return StarknetConfig
@plugins.register(plugins.EcosystemPlugin)
def ecosystems():
yield Starknet
@plugins.register(plugins.NetworkPlugin)
def networks():
for network_name, network_params in NETWORKS.items():
yield PLUGIN_NAME, network_name, create_network_type(*network_params)
# NOTE: This works for development providers, as they get chain_id from themselves
yield PLUGIN_NAME, LOCAL_NETWORK_NAME, NetworkAPI
@plugins.register(plugins.ProviderPlugin)
def providers():
network_names = [LOCAL_NETWORK_NAME] + [k for k in NETWORKS.keys()]
for network_name in network_names:
yield PLUGIN_NAME, network_name, StarknetProvider
@plugins.register(plugins.AccountPlugin)
def account_types():
return StarknetAccountContracts, StarknetKeyfileAccount
| en | 0.97535 | # NOTE: This works for development providers, as they get chain_id from themselves | 2.01862 | 2 |
tests/test_mysql.py | dropbox/pytest-call-tracer | 5 | 6621049 | <filename>tests/test_mysql.py
from __future__ import absolute_import
import MySQLdb
from unittest import TestCase
class MySQLdbSampleTest(TestCase):
def setUp(self):
self.client = MySQLdb.connect()
def test_simple(self):
self.client.query('select 1')
r = self.client.use_result()
assert r.fetch_row() == ((1,), )
| <filename>tests/test_mysql.py
from __future__ import absolute_import
import MySQLdb
from unittest import TestCase
class MySQLdbSampleTest(TestCase):
def setUp(self):
self.client = MySQLdb.connect()
def test_simple(self):
self.client.query('select 1')
r = self.client.use_result()
assert r.fetch_row() == ((1,), )
| none | 1 | 2.53408 | 3 | |
vcdynhello.py | tjarrettveracode/veracode-dyn-hello-world | 0 | 6621050 | <gh_stars>0
import sys
import argparse
import logging
import json
import datetime
import anticrlf
from veracode_api_py import VeracodeAPI as vapi, Applications, Analyses, DynUtils
DEFAULT_BUSINESS_CRITICALITY = 'HIGH'
log = logging.getLogger(__name__)
def setup_logger():
handler = logging.FileHandler('vcdynhello.log', encoding='utf8')
handler.setFormatter(anticrlf.LogFormatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def creds_expire_days_warning():
creds = vapi().get_creds()
exp = datetime.datetime.strptime(creds['expiration_ts'], "%Y-%m-%dT%H:%M:%S.%f%z")
delta = exp - datetime.datetime.now().astimezone() #we get a datetime with timezone...
if (delta.days < 7):
print('These API credentials expire ', creds['expiration_ts'])
def find_app_by_name(search_term):
return Applications().get_by_name(search_term)
def create_app(name):
return Applications().create(app_name=name, business_criticality=DEFAULT_BUSINESS_CRITICALITY)
def find_analysis_by_name(name):
return Analyses().get_by_name(name)
def create_analysis(name,scan,email, owner):
thescans = [scan]
return Analyses().create(name=name,scans=thescans, business_unit_guid=None,email=email,owner=owner)
def configure_scan(url, username, password, email, business_owner, phone, app_id):
theurl = DynUtils().setup_url(url)
auth_config = DynUtils().setup_auth_config(DynUtils().setup_auth('AUTO',username,password))
allowed_hosts = [theurl]
config_request = DynUtils().setup_scan_config_request(url=theurl,allowed_hosts=allowed_hosts,auth_config=auth_config)
contact = DynUtils().setup_scan_contact_info(email=email,first_and_last_name=business_owner,telephone=phone)
scan = DynUtils().setup_scan(scan_config_request=config_request,scan_contact_info=contact,linked_app_guid=app_id)
return scan
def main():
parser = argparse.ArgumentParser(
description='This script creates a dynamic analysis from the provided input.')
parser.add_argument('-u', '--url', required=False, help='URL to scan.',default='https://jarrett2.example.com')
parser.add_argument('--username', '-n', help='Username to use to authenticate to the URL.',default='admin')
parser.add_argument('--password', '-p', help='Password to authenticate to the URL (required if --username is set).',default='<PASSWORD>')
parser.add_argument('--email','-e', help='Contact email for the scan')
parser.add_argument('--business_owner','-b', help='Business owner of the system being scanned')
parser.add_argument('--phone','-ph',help='Contact phone number for the system being scanned')
args = parser.parse_args()
url = args.url
username = args.username
pwd = <PASSWORD>
email = args.email
bu = args.business_owner
phone = args.phone
setup_logger()
# CHECK FOR CREDENTIALS EXPIRATION
creds_expire_days_warning()
# check to see if already an application profile named the URL
app = find_app_by_name(url)
if len(app) > 0:
app_id = app[0]['guid']
log.info('Found app_id {} for application name {}.'.format(app_id,url))
else:
# create the application
app = create_app(url)
app_id = app['guid']
log.info('Created app_id {} for application name {}.'.format(app_id,url))
# check to see if we already have a dynamic analysis for this URL
da = find_analysis_by_name(url)
if len(da) > 0:
message = 'Found existing analysis named {} (analysis ID {}), exiting'.format(url, da[0]['analysis_id'])
log.info(message)
print(message)
return
# configure the scan request
scan = configure_scan(url,username,pwd,email, bu, phone, app_id)
# create the analysis
create_analysis(url, scan,email,bu)
da = find_analysis_by_name(url) # no JSON returned for create_analysis, need to look it up after
msg = "Created analysis id {} for url {}".format(da[0]['analysis_id'],url)
print(msg)
log.info(msg)
if __name__ == '__main__':
main() | import sys
import argparse
import logging
import json
import datetime
import anticrlf
from veracode_api_py import VeracodeAPI as vapi, Applications, Analyses, DynUtils
DEFAULT_BUSINESS_CRITICALITY = 'HIGH'
log = logging.getLogger(__name__)
def setup_logger():
handler = logging.FileHandler('vcdynhello.log', encoding='utf8')
handler.setFormatter(anticrlf.LogFormatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def creds_expire_days_warning():
creds = vapi().get_creds()
exp = datetime.datetime.strptime(creds['expiration_ts'], "%Y-%m-%dT%H:%M:%S.%f%z")
delta = exp - datetime.datetime.now().astimezone() #we get a datetime with timezone...
if (delta.days < 7):
print('These API credentials expire ', creds['expiration_ts'])
def find_app_by_name(search_term):
return Applications().get_by_name(search_term)
def create_app(name):
return Applications().create(app_name=name, business_criticality=DEFAULT_BUSINESS_CRITICALITY)
def find_analysis_by_name(name):
return Analyses().get_by_name(name)
def create_analysis(name,scan,email, owner):
thescans = [scan]
return Analyses().create(name=name,scans=thescans, business_unit_guid=None,email=email,owner=owner)
def configure_scan(url, username, password, email, business_owner, phone, app_id):
theurl = DynUtils().setup_url(url)
auth_config = DynUtils().setup_auth_config(DynUtils().setup_auth('AUTO',username,password))
allowed_hosts = [theurl]
config_request = DynUtils().setup_scan_config_request(url=theurl,allowed_hosts=allowed_hosts,auth_config=auth_config)
contact = DynUtils().setup_scan_contact_info(email=email,first_and_last_name=business_owner,telephone=phone)
scan = DynUtils().setup_scan(scan_config_request=config_request,scan_contact_info=contact,linked_app_guid=app_id)
return scan
def main():
parser = argparse.ArgumentParser(
description='This script creates a dynamic analysis from the provided input.')
parser.add_argument('-u', '--url', required=False, help='URL to scan.',default='https://jarrett2.example.com')
parser.add_argument('--username', '-n', help='Username to use to authenticate to the URL.',default='admin')
parser.add_argument('--password', '-p', help='Password to authenticate to the URL (required if --username is set).',default='<PASSWORD>')
parser.add_argument('--email','-e', help='Contact email for the scan')
parser.add_argument('--business_owner','-b', help='Business owner of the system being scanned')
parser.add_argument('--phone','-ph',help='Contact phone number for the system being scanned')
args = parser.parse_args()
url = args.url
username = args.username
pwd = <PASSWORD>
email = args.email
bu = args.business_owner
phone = args.phone
setup_logger()
# CHECK FOR CREDENTIALS EXPIRATION
creds_expire_days_warning()
# check to see if already an application profile named the URL
app = find_app_by_name(url)
if len(app) > 0:
app_id = app[0]['guid']
log.info('Found app_id {} for application name {}.'.format(app_id,url))
else:
# create the application
app = create_app(url)
app_id = app['guid']
log.info('Created app_id {} for application name {}.'.format(app_id,url))
# check to see if we already have a dynamic analysis for this URL
da = find_analysis_by_name(url)
if len(da) > 0:
message = 'Found existing analysis named {} (analysis ID {}), exiting'.format(url, da[0]['analysis_id'])
log.info(message)
print(message)
return
# configure the scan request
scan = configure_scan(url,username,pwd,email, bu, phone, app_id)
# create the analysis
create_analysis(url, scan,email,bu)
da = find_analysis_by_name(url) # no JSON returned for create_analysis, need to look it up after
msg = "Created analysis id {} for url {}".format(da[0]['analysis_id'],url)
print(msg)
log.info(msg)
if __name__ == '__main__':
main() | en | 0.775462 | #we get a datetime with timezone... # CHECK FOR CREDENTIALS EXPIRATION # check to see if already an application profile named the URL # create the application # check to see if we already have a dynamic analysis for this URL # configure the scan request # create the analysis # no JSON returned for create_analysis, need to look it up after | 2.18794 | 2 |