code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import time
from grove.grove_light_sensor_v1_2 import GroveLightSensor
from grove.grove_led import GroveLed
import paho.mqtt.client as mqtt
import json
light_sensor = GroveLightSensor(0)
led = GroveLed(5)
id = '<ID>'
client_telemetry_topic = 'kekiot/' + id + '/telemetry'
client_name = id + 'nightlight_client'
mqtt_client = mqtt.Client(client_name)
mqtt_client.connect('test.mosquitto.org')
mqtt_client.loop_start()
print("MQTT connected!")
while True:
light = light_sensor.light
telemetry = json.dumps({'light' : light, 'name' : id})
print("Sending telemetry ", telemetry)
mqtt_client.publish(client_telemetry_topic, telemetry)
time.sleep(5)
|
[
"grove.grove_led.GroveLed",
"json.dumps",
"time.sleep",
"paho.mqtt.client.Client",
"grove.grove_light_sensor_v1_2.GroveLightSensor"
] |
[((168, 187), 'grove.grove_light_sensor_v1_2.GroveLightSensor', 'GroveLightSensor', (['(0)'], {}), '(0)\n', (184, 187), False, 'from grove.grove_light_sensor_v1_2 import GroveLightSensor\n'), ((194, 205), 'grove.grove_led.GroveLed', 'GroveLed', (['(5)'], {}), '(5)\n', (202, 205), False, 'from grove.grove_led import GroveLed\n'), ((329, 353), 'paho.mqtt.client.Client', 'mqtt.Client', (['client_name'], {}), '(client_name)\n', (340, 353), True, 'import paho.mqtt.client as mqtt\n'), ((508, 548), 'json.dumps', 'json.dumps', (["{'light': light, 'name': id}"], {}), "({'light': light, 'name': id})\n", (518, 548), False, 'import json\n'), ((660, 673), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (670, 673), False, 'import time\n')]
|
import requests
from bs4 import BeautifulSoup
import re
'''def fate_proxy():
resp=requests.get('https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list')
#print(resp.text)
a=((resp.text).split('\n'))
#print(a)
p_list=[]
for i in a:
try:
p_list.append(json.loads(i))
except Exception as e:
continue
#print(p_list)
np_list=[]
for i in p_list:
if i['country']=='IN':
np_list.append(i)
proxy=[]
fast_proxy=sorted(np_list,key=lambda k: k['response_time'])
for p in fast_proxy:
proxy.append(str(p['host'])+':'+str(p['port']))
return proxy'''
def present():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[0].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out
def future():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[1].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out
def past():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
url='https://www.codechef.com/contests'
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.text,"lxml")
contest = soup.find_all('table',{"class":"dataTable"})
contest = contest[2].find_all('tbody')
#contest = contest[0].find_all('tr')
name = []
link = []
code = []
sdate = []
edate = []
stime = []
etime = []
j = 0
for i in contest[0].findAll('a', attrs={'href': re.compile("^/")}):
link.append('https://www.codechef.com'+i.get('href'))
for i in contest[0].findAll('a'):
name.append(i.text)
for i in contest[0].findAll('td'):
if(j%4==2):
sdate.append(i.text[:-10])
stime.append(i.text[-8:])
if(j%4==0):
code.append(i.text)
if (j % 4 == 3):
edate.append(i.text[:-10])
etime.append(i.text[-8:])
j+=1
#print(stime)
out=[]
for i in range(len(name)):
d = {}
d.update({'code':code[i],'name':name[i],'link':link[i],'sdate':sdate[i],'edate':edate[i],'stime':stime[i],'etime':etime[i]})
out.append(d)
#print(out)
#print(d)
#pot()
return out
|
[
"bs4.BeautifulSoup",
"requests.get",
"re.compile"
] |
[((860, 894), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (872, 894), False, 'import requests\n'), ((906, 937), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (919, 937), False, 'from bs4 import BeautifulSoup\n'), ((2178, 2212), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2190, 2212), False, 'import requests\n'), ((2224, 2255), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (2237, 2255), False, 'from bs4 import BeautifulSoup\n'), ((3493, 3527), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3505, 3527), False, 'import requests\n'), ((3539, 3570), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (3552, 3570), False, 'from bs4 import BeautifulSoup\n'), ((1244, 1260), 're.compile', 're.compile', (['"""^/"""'], {}), "('^/')\n", (1254, 1260), False, 'import re\n'), ((2562, 2578), 're.compile', 're.compile', (['"""^/"""'], {}), "('^/')\n", (2572, 2578), False, 'import re\n'), ((3877, 3893), 're.compile', 're.compile', (['"""^/"""'], {}), "('^/')\n", (3887, 3893), False, 'import re\n')]
|
#!/usr/bin/env python
import uuid
class SMAPI_Response(object):
'''
Implentation of a ICUV Request
'''
def __init__(self, output_parameters):
self._uuid = uuid.uuid1()
self._date = None
self._output_parameters = output_parameters
def get_output_parameters(self):
return self._output_parameters
def get_date(self):
return self._date
def set_date(self, date):
self._date = date
def __repr__(self):
"<{} (name={}, input parameters={})>".format(
self.__class__.__name__,
self.name,
self._output_parameters)
|
[
"uuid.uuid1"
] |
[((187, 199), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (197, 199), False, 'import uuid\n')]
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import json
import logging
from responsibleai import RAIInsights
from constants import RAIToolType
from rai_component_utilities import (
load_rai_insights_from_input_port,
save_to_output_port,
copy_dashboard_info_file,
)
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
parser.add_argument("--rai_insights_dashboard", type=str, required=True)
parser.add_argument("--max_depth", type=int)
parser.add_argument("--num_leaves", type=int)
parser.add_argument("--filter_features", type=json.loads, help="List")
parser.add_argument("--error_analysis_path", type=str)
# parse args
args = parser.parse_args()
# Patch issue with argument passing
if isinstance(args.filter_features, list) and len(args.filter_features) == 0:
args.filter_features = None
# return args
return args
def main(args):
# Load the RAI Insights object
rai_i: RAIInsights = load_rai_insights_from_input_port(args.rai_insights_dashboard)
# Add the error analysis
rai_i.error_analysis.add(
max_depth=args.max_depth,
num_leaves=args.num_leaves,
filter_features=args.filter_features,
)
_logger.info("Added error analysis")
# Compute
rai_i.compute()
_logger.info("Computation complete")
# Save
save_to_output_port(rai_i, args.error_analysis_path, RAIToolType.ERROR_ANALYSIS)
_logger.info("Saved to output port")
# Copy the dashboard info file
copy_dashboard_info_file(args.rai_insights_dashboard, args.error_analysis_path)
_logger.info("Completing")
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"rai_component_utilities.copy_dashboard_info_file",
"rai_component_utilities.save_to_output_port",
"logging.getLogger",
"rai_component_utilities.load_rai_insights_from_input_port"
] |
[((444, 471), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import logging\n'), ((472, 511), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (491, 511), False, 'import logging\n'), ((568, 593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (591, 593), False, 'import argparse\n'), ((1226, 1288), 'rai_component_utilities.load_rai_insights_from_input_port', 'load_rai_insights_from_input_port', (['args.rai_insights_dashboard'], {}), '(args.rai_insights_dashboard)\n', (1259, 1288), False, 'from rai_component_utilities import load_rai_insights_from_input_port, save_to_output_port, copy_dashboard_info_file\n'), ((1604, 1689), 'rai_component_utilities.save_to_output_port', 'save_to_output_port', (['rai_i', 'args.error_analysis_path', 'RAIToolType.ERROR_ANALYSIS'], {}), '(rai_i, args.error_analysis_path, RAIToolType.ERROR_ANALYSIS\n )\n', (1623, 1689), False, 'from rai_component_utilities import load_rai_insights_from_input_port, save_to_output_port, copy_dashboard_info_file\n'), ((1766, 1845), 'rai_component_utilities.copy_dashboard_info_file', 'copy_dashboard_info_file', (['args.rai_insights_dashboard', 'args.error_analysis_path'], {}), '(args.rai_insights_dashboard, args.error_analysis_path)\n', (1790, 1845), False, 'from rai_component_utilities import load_rai_insights_from_input_port, save_to_output_port, copy_dashboard_info_file\n')]
|
# Brickout Game V 0.1
# 2018 by <NAME>
# color constants
# a website for finding out color names
# https://www.w3schools.com/colors/colors_converter.asp
GREY = [105, 105, 105]
BLACK = [0, 0, 0]
PINK = [168, 76, 96]
BROWN = [133, 107, 17]
OTHERBROWN = [157, 90, 48]
GREEN = [28, 120, 29]
LIGHTGREEN = [56, 141, 47]
DARKGREEN = [46, 137, 95]
BLUE = [91, 92, 214]
BALL = [145, 100, 71]
# I - Import and Initialize
import pygame
pygame.mixer.pre_init(22050, -16, 1, 2048)
pygame.mixer.init()
pygame.init()
# D - Display
screen = pygame.display.set_mode([640, 500])
screen.fill(BLACK)
# E - Entitiess
# classes
class Block(pygame.sprite.Sprite):
def __init__(self, color, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(color)
self.rect = self.image.get_rect()
# A - Action
# A - Assign Values to key variables
blocks_container = pygame.sprite.Group()
blocks = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
walls = pygame.sprite.Group()
wall_left = Block(GREY, 32, 430)
wall_right = Block(GREY, 32, 430)
wall_horiz = Block(GREY, 640, 32)
wall_left.rect.x = 0
wall_left.rect.y = 64
wall_right.rect.x = 640 - 32
wall_right.rect.y = 64
wall_horiz.rect.x = 0
wall_horiz.rect.y = 64
all_sprites.add(wall_left, wall_right, wall_horiz)
walls.add(wall_left, wall_right, wall_horiz)
paddle = Block(OTHERBROWN, 64, 8)
paddle.rect.x = (640 - 64) / 2
paddle.rect.y = 500 - 22
all_sprites.add(paddle)
# Create a ball sprite
ball = Block(BALL, 10, 10)
ball.rect.x = (640 - 10) / 2
ball.rect.y = paddle.rect.top - 20
all_sprites.add(ball)
# Score
font = pygame.font.Font("2600.ttf", 38)
text = font.render("000", False, GREY)
# Sounds
pygame.mixer.init()
snd_paddle_hit = pygame.mixer.Sound("hit_paddle.ogg")
snd_side_wall_hit = pygame.mixer.Sound("hit_side_wall.ogg")
snd_block_row1 = pygame.mixer.Sound("hit_block_row_1.ogg")
snd_block_row2 = pygame.mixer.Sound("hit_block_row_2.ogg")
snd_block_row3 = pygame.mixer.Sound("hit_block_row_3.ogg")
snd_block_row4 = pygame.mixer.Sound("hit_block_row_4.ogg")
snd_block_row5 = pygame.mixer.Sound("hit_block_row_5.ogg")
snd_block_row6 = pygame.mixer.Sound("hit_block_row_6.ogg")
"""
BxH = 48x20
165 Score 80 Paddles 96 Level
D 16 D 16 D
"""
# Set the initial ball speed
ball_dx = 1
ball_dy = 1
ball_speed = 1
ball_speed_max = 1.1
# A couple of 'flags' (Boolean values)
ball_in_play = False
just_bounced = False
# 1 block = 32 pixel x 16 pixel
color_list = [GREEN, LIGHTGREEN, PINK, DARKGREEN, BROWN, BLUE]
def setup_blocks():
# Create a horizontal row of blocks for each color
for block_row, block_color in enumerate(color_list):
for block_column in range(1, 19):
# Create a block, leaving 1 pixels around the four edges
block = Block(block_color, 32, 16)
block.rect.x = block_column * 32 + 1
block.rect.y = 145 + block_row * 16
blocks.add(block)
all_sprites.add(block)
# Create a horizontal row of blocks for each color
setup_blocks()
"""
for block_row, block_color in enumerate(color_list):
for block_column in range(1,19):
# Create a block, leaving 1 pixels around the four edges
block = Block(block_color, 32, 16)
block.rect.x = block_column * 32 + 1
block.rect.y = 145 + block_row * 16
blocks.add(block)
all_sprites.add(block)
"""
pygame.mouse.set_visible(False)
pygame.event.set_grab(True)
clock = pygame.time.Clock()
game_over = False
score = 0
lives = 5
live_text = font.render("{}".format(lives), False, GREY)
mouse_x_old = paddle.rect.x
assert isinstance(paddle.rect.x, object)
mouse_x = paddle.rect.x
# L - Loop
while not game_over:
# T - Timer
clock.tick(240)
# E- Event Handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game_over = True
# b1, b2, b3 = pygame.mouse.get_pressed()
if pygame.mouse.get_pressed()[0]:
if lives > 0 and ball_in_play == False:
ball_in_play = True
ball.rect.x = paddle.rect.centerx
ball.rect.y = paddle.rect.top - 20
all_sprites.add(ball)
if pygame.mouse.get_pressed()[2] and lives == 0:
lives = 5
score = 0
setup_blocks()
ball.rect.x = paddle.rect.centerx
ball.rect.y = paddle.rect.top - 20
ball_in_play = True
all_sprites.add(ball)
# print("Mouse Speed per frame:" , abs(mouse_x_old - mouse_x) )
mouse_x_old = mouse_x
mouse_x = pygame.mouse.get_pos()[0]
mouse_pos_equal = True
while mouse_pos_equal:
mouse_pos_equal = mouse_x != paddle.rect.left
# print(mouse_pos_equal)
if mouse_x < paddle.rect.left:
if paddle.rect.left > 32:
paddle.rect.x = paddle.rect.x - 1
else:
mouse_pos_equal = False
elif mouse_x > paddle.rect.left:
if paddle.rect.right < 640 - 32:
paddle.rect.x = paddle.rect.x + 1
else:
mouse_pos_equal = False
if ball_in_play:
# Move the ball
ball.rect.x += ball_dx
ball.rect.y += ball_dy
# Check if it collidedd with the paddle
if ball.rect.y < paddle.rect.top:
just_bounced = False
# Bounce off the screen edges
if ball.rect.left <= 0 + 32:
snd_side_wall_hit.play()
ball.rect.x = 0 + 32
ball_dx = -ball_dx
if ball.rect.y <= 0 + 96:
snd_side_wall_hit.play()
ball.rect.y = 0 + 96
ball_dy = -ball_dy
if ball.rect.x > 640 - 32 - 10:
snd_side_wall_hit.play()
ball.rect.x = 640 - 32 - 10
ball_dx = -ball_dx
# Check if the ball bounced off the paddle
# Collision detection between two sprites, using rects.
if pygame.sprite.collide_rect(ball, paddle) and not just_bounced:
snd_paddle_hit.play()
ball_dy = -ball_dy
just_bounced = True
# While ball and paddle are in contact, don't bounce again
# Ball didn't - game over
elif ball.rect.y > paddle.rect.top + 10 / 2:
ball_in_play = False
all_sprites.remove(ball)
lives = lives - 1
# Check if the ball bounced off a block
blocks_hit_list = pygame.sprite.spritecollide(ball, blocks, True)
if blocks_hit_list:
if ball_speed < ball_speed_max:
ball_speed += 0.01
print(ball_speed)
for block in blocks_hit_list:
score = score + 1
print(block.rect.y)
if block.rect.y == 145 + 6 * 16:
snd_block_row1.play()
elif block.rect.y == 145 + 5 * 16:
snd_block_row2.play()
elif block.rect.y == 145 + 4 * 16:
snd_block_row3.play()
elif block.rect.y == 145 + 3 * 16:
snd_block_row4.play()
elif block.rect.y == 145 + 2 * 16:
snd_block_row5.play()
elif block.rect.y == 145 + 1 * 16:
snd_block_row6.play()
# ball_dy = -ball_dy * ball_speeds
blocks_container.add(blocks_hit_list)
ball_dy = -ball_dy # * ball_speed
# scorestr = "{:0>3}".format(score)
text = font.render("{:0>3}".format(score), False, GREY)
live_text = font.render("{}".format(lives), False, GREY)
all_sprites.update()
screen.fill(BLACK)
screen.blit(text, (165, 32))
screen.blit(live_text, (165 + text.get_width() + 80, 32))
# R - Refresh Display
all_sprites.draw(screen)
pygame.display.update()
pygame.quit()
|
[
"pygame.mouse.set_visible",
"pygame.event.get",
"pygame.mixer.init",
"pygame.display.update",
"pygame.sprite.spritecollide",
"pygame.font.Font",
"pygame.mouse.get_pos",
"pygame.display.set_mode",
"pygame.mixer.Sound",
"pygame.quit",
"pygame.Surface",
"pygame.mouse.get_pressed",
"pygame.mixer.pre_init",
"pygame.init",
"pygame.time.Clock",
"pygame.sprite.collide_rect",
"pygame.event.set_grab",
"pygame.sprite.Group",
"pygame.sprite.Sprite.__init__"
] |
[((428, 470), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', (['(22050)', '(-16)', '(1)', '(2048)'], {}), '(22050, -16, 1, 2048)\n', (449, 470), False, 'import pygame\n'), ((471, 490), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (488, 490), False, 'import pygame\n'), ((492, 505), 'pygame.init', 'pygame.init', ([], {}), '()\n', (503, 505), False, 'import pygame\n'), ((530, 565), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[640, 500]'], {}), '([640, 500])\n', (553, 565), False, 'import pygame\n'), ((936, 957), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (955, 957), False, 'import pygame\n'), ((967, 988), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (986, 988), False, 'import pygame\n'), ((1003, 1024), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (1022, 1024), False, 'import pygame\n'), ((1033, 1054), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (1052, 1054), False, 'import pygame\n'), ((1661, 1693), 'pygame.font.Font', 'pygame.font.Font', (['"""2600.ttf"""', '(38)'], {}), "('2600.ttf', 38)\n", (1677, 1693), False, 'import pygame\n'), ((1743, 1762), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (1760, 1762), False, 'import pygame\n'), ((1780, 1816), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_paddle.ogg"""'], {}), "('hit_paddle.ogg')\n", (1798, 1816), False, 'import pygame\n'), ((1837, 1876), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_side_wall.ogg"""'], {}), "('hit_side_wall.ogg')\n", (1855, 1876), False, 'import pygame\n'), ((1894, 1935), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_1.ogg"""'], {}), "('hit_block_row_1.ogg')\n", (1912, 1935), False, 'import pygame\n'), ((1953, 1994), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_2.ogg"""'], {}), "('hit_block_row_2.ogg')\n", (1971, 1994), False, 'import pygame\n'), ((2012, 2053), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_3.ogg"""'], {}), "('hit_block_row_3.ogg')\n", (2030, 2053), False, 'import pygame\n'), ((2071, 2112), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_4.ogg"""'], {}), "('hit_block_row_4.ogg')\n", (2089, 2112), False, 'import pygame\n'), ((2130, 2171), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_5.ogg"""'], {}), "('hit_block_row_5.ogg')\n", (2148, 2171), False, 'import pygame\n'), ((2189, 2230), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""hit_block_row_6.ogg"""'], {}), "('hit_block_row_6.ogg')\n", (2207, 2230), False, 'import pygame\n'), ((3439, 3470), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (3463, 3470), False, 'import pygame\n'), ((3471, 3498), 'pygame.event.set_grab', 'pygame.event.set_grab', (['(True)'], {}), '(True)\n', (3492, 3498), False, 'import pygame\n'), ((3508, 3527), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (3525, 3527), False, 'import pygame\n'), ((7924, 7937), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7935, 7937), False, 'import pygame\n'), ((3826, 3844), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3842, 3844), False, 'import pygame\n'), ((7900, 7923), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7921, 7923), False, 'import pygame\n'), ((703, 738), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (732, 738), False, 'import pygame\n'), ((760, 791), 'pygame.Surface', 'pygame.Surface', (['[width, height]'], {}), '([width, height])\n', (774, 791), False, 'import pygame\n'), ((4085, 4111), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (4109, 4111), False, 'import pygame\n'), ((4687, 4709), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4707, 4709), False, 'import pygame\n'), ((6537, 6584), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['ball', 'blocks', '(True)'], {}), '(ball, blocks, True)\n', (6564, 6584), False, 'import pygame\n'), ((4330, 4356), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (4354, 4356), False, 'import pygame\n'), ((6049, 6089), 'pygame.sprite.collide_rect', 'pygame.sprite.collide_rect', (['ball', 'paddle'], {}), '(ball, paddle)\n', (6075, 6089), False, 'import pygame\n')]
|
# ~/Blog/djr/gql/schema.py
import graphene
from items.models import Movie
from graphene_django.types import DjangoObjectType
# api-movie-model
class MovieType(DjangoObjectType):
id = graphene.Int()
name = graphene.String()
year = graphene.Int()
summary = graphene.String()
poster_url = graphene.String()
slug = graphene.String()
class Meta:
model = Movie
def resolve_id(self, info):
return self.id
def resolve_name(self, info):
return self.name
def resolve_year(self, info):
return self.year
def resolve_summary(self, info):
return self.summary
def resolve_poster_url(self, info):
# Note: in client side app snake_case fields
# will be resolved as camelCase
# Eg: poster_url ==> posterUrl
return self.poster_url
def resolve_slug(self, info):
return self.slug
class Query(graphene.ObjectType):
movie_list = graphene.List(MovieType)
movie = graphene.Field(MovieType, slug=graphene.String())
def resolve_movie_list(self, info, *_):
# for large lists only query what you need
return Movie.objects.all().only("name", "poster_url", "slug")
def resolve_movie(self, info, slug):
movie_queryset = Movie.objects.filter(slug=slug)
if movie_queryset.exists():
return movie_queryset.first()
schema = graphene.Schema(query=Query)
|
[
"graphene.List",
"graphene.String",
"items.models.Movie.objects.all",
"graphene.Int",
"graphene.Schema",
"items.models.Movie.objects.filter"
] |
[((1400, 1428), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Query'}), '(query=Query)\n', (1415, 1428), False, 'import graphene\n'), ((188, 202), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (200, 202), False, 'import graphene\n'), ((214, 231), 'graphene.String', 'graphene.String', ([], {}), '()\n', (229, 231), False, 'import graphene\n'), ((243, 257), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (255, 257), False, 'import graphene\n'), ((272, 289), 'graphene.String', 'graphene.String', ([], {}), '()\n', (287, 289), False, 'import graphene\n'), ((307, 324), 'graphene.String', 'graphene.String', ([], {}), '()\n', (322, 324), False, 'import graphene\n'), ((336, 353), 'graphene.String', 'graphene.String', ([], {}), '()\n', (351, 353), False, 'import graphene\n'), ((956, 980), 'graphene.List', 'graphene.List', (['MovieType'], {}), '(MovieType)\n', (969, 980), False, 'import graphene\n'), ((1280, 1311), 'items.models.Movie.objects.filter', 'Movie.objects.filter', ([], {'slug': 'slug'}), '(slug=slug)\n', (1300, 1311), False, 'from items.models import Movie\n'), ((1024, 1041), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1039, 1041), False, 'import graphene\n'), ((1154, 1173), 'items.models.Movie.objects.all', 'Movie.objects.all', ([], {}), '()\n', (1171, 1173), False, 'from items.models import Movie\n')]
|
from functools import reduce
from pprint import pprint
from typing import Sequence, Tuple
import requests
from graph import Graph
spring_id = 71
spring_id_legacy = 20178
def modify_string(p: str, repl: Sequence[Tuple[str, str]]) -> str:
return reduce(lambda a, kv: a.replace(*kv), repl, p)
url = 'https://api.maui.uiowa.edu/maui/api/pub/registrar/sections'
subject = 'ece'
payload = "json={{sessionId: {}, courseSubject: '{}'}}".format(str(spring_id), subject)
response = requests.get(url=url, params=payload)
if response.status_code != 200:
print('Error: HTTP {}'.format(response.status_code))
data = response.json()
raw_courses = data['payload']
courses_with_prereqs = list(filter(lambda x: x['prerequisite'] is not None, raw_courses))
replacements = (' ', ''), ('and', '+'), ('or', '?')
for d in courses_with_prereqs:
d.update((k, modify_string(v, replacements)) for k, v in d.items() if k ==
'prerequisite')
prereqs = {map(lambda x: x['prerequisite'], courses_with_prereqs)}
pairings = {(x['subjectCourse'], x['prerequisite']) for x in courses_with_prereqs}
courses = set()
replacements = ('(', ''), (')', ''), ('+', ','), ('?', ',')
for pair in pairings:
courses.add(pair[0])
for prereq in modify_string(pair[1], replacements).split(','):
courses.add(prereq)
pprint(courses)
print(len(courses))
connections = []
for pair in pairings:
pprint('{} => {}'.format(pair[0], pair[1]))
# g = Graph(directed=True)
# 'ECE:5220 => (BIOS:4120?STAT:3510)+BME:5320+(CS:5110?ENGR:1300)'
# ('BIOS:4120?', 'ECE:5220',
|
[
"pprint.pprint",
"requests.get"
] |
[((483, 520), 'requests.get', 'requests.get', ([], {'url': 'url', 'params': 'payload'}), '(url=url, params=payload)\n', (495, 520), False, 'import requests\n'), ((1320, 1335), 'pprint.pprint', 'pprint', (['courses'], {}), '(courses)\n', (1326, 1335), False, 'from pprint import pprint\n')]
|
#Leia 10 números inteiros e armazene em um vetor v. Crie dois
#novos vetores v1 e v2. Copie os valores ímpares de v para
#v1, e os valores pares de v para v2. Note que cada um dos
#vetores v1 e v2 têm no máximo 10 elementos, mas nem todos
#os elementos são utilizados. No final escreva os elementos
#UTILIZADOS de v1 e v2.
import random
v=[]
v1=[0,0,0,0,0,0,0,0,0,0]
v2=[0,0,0,0,0,0,0,0,0,0]
for c in range(0,10):
n=random.randint(1,50)
v.append(n)
if(n%2==0):
v2[c]=n
else:
v1[c]=n
print(v)
for c in range(0,10):
if(v1[c]!=0):
print(f"V1 = {v1[c]}")
elif(v2[c]!=0):
print(f"V2 = {v2[c]}")
|
[
"random.randint"
] |
[((421, 442), 'random.randint', 'random.randint', (['(1)', '(50)'], {}), '(1, 50)\n', (435, 442), False, 'import random\n')]
|
import argparse
import subprocess
from dtran.dcat.api import DCatAPI
from funcs.readers.dcat_read_func import DATA_CATALOG_DOWNLOAD_DIR
import os
import csv
import json
import shutil
from datetime import datetime
from datetime import timedelta
from pathlib import Path
from typing import Optional, Dict
import re
import xarray as xr
from netCDF4 import Dataset
from dtran import IFunc, ArgType
from dtran.ifunc import IFuncType
from dtran.metadata import Metadata
from zipfile import ZipFile
import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class GldasToCyclesBatched(IFunc):
id = "gldas_to_cycles_batched_func"
description = """ A reader-transformation-writer multi-adapter.
Creates Cycles input (weather and soil file zip) from GLDAS NetCDF (climate) files & Soil files.
"""
inputs = {
"gldas_dataset_id": ArgType.String,
"gldas_soil_map_file": ArgType.String,
"start_date": ArgType.String,
"end_date": ArgType.String,
"batch_numdays": ArgType.Number,
"output_path": ArgType.FilePath
}
outputs = {"output_files": ArgType.FilePath}
friendly_name: str = "GldasToCyclesBatched"
func_type = IFuncType.MODEL_TRANS
example = {
"gldas_dataset_id": "5babae3f-c468-4e01-862e-8b201468e3b5",
"gldas_soil_map_file": "/tmp/gldas_soil_43.E_8.4N.json",
"start_date": "2000-01-01",
"end_date": "2018-01-31",
"batch_numdays": 14,
"output_path": "/tmp/output"
}
def __init__(
self,
gldas_dataset_id,
gldas_soil_map_file,
start_date,
end_date,
batch_numdays,
output_path
):
self.gldas_dataset_id = gldas_dataset_id
self.gldas_soil_map_file = gldas_soil_map_file
self.output_path = output_path
self.end_date = end_date
self.start_date = start_date
self.batch_numdays = batch_numdays
def validate(self) -> bool:
return True
def exec(self) -> dict:
output_file = gldas_to_cycles(
self.gldas_dataset_id,
self.gldas_soil_map_file,
self.start_date,
self.end_date,
self.batch_numdays,
self.output_path
)
return {"output_files": output_file}
def change_metadata(
self, metadata: Optional[Dict[str, Metadata]]
) -> Dict[str, Metadata]:
return metadata
def convert_to_cycles_input(ds):
"""
Resample GLDAS data for a location by 24 hours(1day), and convert to Cycles input
"""
# Calculate RH variable values
logging.debug("Reading variables from dataset..")
(_prcp, _temp, _wind, _solar, _rh) = read_variables_from_dataset(ds)
logging.debug("Finished reading variables from dataset..")
logging.debug("Start resampling...")
# Try group_by (time.dayofyear)
# - dataarray
# Resample/Group by 1 Day - Some variables are grouped by averaging, others by max/min
prcp_daily = _prcp.resample(time="1D")
temp_daily = _temp.resample(time="1D")
solar_daily = _solar.resample(time="1D")
rh_daily = _rh.resample(time="1D")
wind_daily = _wind.resample(time="1D")
prcp = prcp_daily.mean().rename("PP")
tx = temp_daily.max().rename("TX")
tn = temp_daily.min().rename("TN")
solar = solar_daily.mean().rename("SOLAR")
rhx = rh_daily.max().rename("RHX")
rhn = rh_daily.min().rename("RHN")
wind = wind_daily.mean().rename("WIND")
logging.debug("Finished resampling...")
logging.debug("Doing unit conversions...")
# Some unit conversions
prcp *= 86400.0
solar *= 86400.0 / 1.0e6
rhx *= 100.0
rhn *= 100.0
tx -= 273.15
tn -= 273.15
logging.debug("Finished unit conversions...")
# Get Year and Day of Year
year = prcp.time.dt.year.rename("YEAR")
doy = prcp.time.dt.dayofyear.rename("DOY")
logging.debug("Merge variables...")
# Create a dataset with all the required variables
cycles_weather_ds = xr.merge([year, doy, prcp, tx, tn, solar, rhx, rhn, wind])
cycles_weather_ds = cycles_weather_ds.reset_coords(names=["lat", "lon"], drop=True)
logging.debug("Finished merging variables...")
return cycles_weather_ds
def create_rh(nc):
"""
Calculate RH (Relative Humidity) value from GLDAS data
"""
_temp = nc["Tair_f_inst"]
_pres = nc["Psurf_f_inst"]
_spfh = nc["Qair_f_inst"]
es = 611.2 * xr.ufuncs.exp(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))
ws = 0.622 * es / (_pres - es)
w = _spfh / (1.0 - _spfh)
nc["rh"] = w / ws
nc["rh"].clip(max=1.0)
return nc
def read_variables_from_dataset(nc):
"""
Read/Calculate relevant variables from GLDAS dataset
"""
_prcp = nc["Rainf_f_tavg"]
_temp = nc["Tair_f_inst"]
_wind = nc["Wind_f_inst"]
_solar = nc["SWdown_f_tavg"]
create_rh(nc)
_rh = nc["rh"]
return _prcp, _temp, _wind, _solar, _rh
def load_gldas_dataset(gldas_files):
"""
Load GLDAS files using XArray
"""
if gldas_files is not None and len(gldas_files) > 0:
# Open a sample gldas file and get all variables to remove from the load (to make the loading faster)
first_file = gldas_files[0]
d1 = xr.open_dataset(first_file)
varnames = list(d1.data_vars.keys())
varnames.remove('Rainf_f_tavg')
varnames.remove('Tair_f_inst')
varnames.remove('Wind_f_inst')
varnames.remove('SWdown_f_tavg')
varnames.remove('Psurf_f_inst')
varnames.remove('Qair_f_inst')
d1.close()
ds=xr.open_mfdataset(gldas_files, drop_variables=varnames, chunks='auto')
return ds
def gldas_to_cycles(
gldas_dataset_id,
gldas_soil_map_file,
start_date,
end_date,
batch_numdays,
output_path
):
gldas_directory = DATA_CATALOG_DOWNLOAD_DIR + "/gldas"
if not os.path.exists(gldas_directory):
Path(gldas_directory).mkdir(exist_ok=True, parents=True)
if not os.path.exists(output_path):
Path(output_path).mkdir(exist_ok=True, parents=True)
# Load soil and weather information from input soil-weather map file
soil_grid_points = {}
weather_grid_points = []
with open(gldas_soil_map_file) as mapf:
weather_grid_points = json.load(mapf)
for weather_point in weather_grid_points:
soils = weather_point["soils"]
for soil in soils:
soil_grid_points[soil["name"]] = {
"weather": weather_point["weather"],
"soil_path": soil["path"]
}
num_weather_points = len(weather_grid_points)
num_soil_points = len(soil_grid_points.keys())
logging.info(f"Processing {num_weather_points} GLDAS grid points for {num_soil_points} Soil points")
# Get latest dates for existing weather files
point_latest_dates = {}
for grid_point in weather_grid_points:
weather_point = grid_point["weather"]
common_weather_fname = weather_point["filename"]
lat = weather_point["lat"]
lon = weather_point["lon"]
elevation = weather_point["elevation"]
common_weather_file = os.path.join(output_path, common_weather_fname)
# Check if the weather file already exists
if os.path.exists(common_weather_file):
# If yes, then get latest start date for this weather file
with open(common_weather_file) as weatherf:
for line in weatherf:
items = re.split(r"\s+", line)
if items[0].isnumeric():
point_latest_dates[common_weather_fname] = datetime.strptime("{} {}".format(items[0], items[1]), "%Y %j")
else:
# If not, then create the weather file headers
outfp = open(common_weather_file, "w")
outfp.write("LATITUDE %.2f\n" % (lat))
#outfp.write("LONGITUDE %.2f\n" % (lon))
outfp.write("ALTITUDE %.2f\n" % (elevation))
outfp.write("SCREENING_HEIGHT 2\n")
outfp.write("%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s\n" % (
'YEAR', 'DOY', 'PP', 'TX', 'TN', 'SOLAR', 'RHX', 'RHN', 'WIND'
))
outfp.close()
# Do the GLDAS to cycles conversion in batches of N number of days
# - For each batch of start-date/end-date, load GLDAS and create cycles inputs
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date, "%Y-%m-%d")
cur_start_date = start_date
while cur_start_date < end_date:
cur_end_date = cur_start_date + timedelta(days = batch_numdays)
if cur_end_date > end_date:
cur_end_date = end_date
logging.info(f"Fetching GLDAS files list for dates from {cur_start_date} to {cur_end_date}")
logging.info("Downloading missing GLDAS files..")
# Download GLDAS Datasets for the time period
gldas_resources = DCatAPI.get_instance().find_resources_by_dataset_id(gldas_dataset_id, cur_start_date, cur_end_date)
gldas_files = []
for resource in gldas_resources:
temporal_metadata = resource['resource_metadata']['temporal_coverage']
gldas_date_str = temporal_metadata['start_time'].split("T")[0]
gldas_date = datetime.strptime(gldas_date_str, "%Y-%m-%d")
nc_path = "%s/%4.4d/%3.3d/" % (gldas_directory, gldas_date.timetuple().tm_year, gldas_date.timetuple().tm_yday)
ofile = os.path.join(nc_path, resource['resource_name'])
if not os.path.exists(nc_path):
Path(nc_path).mkdir(parents=True, exist_ok=True)
if not os.path.exists(ofile):
logging.debug(ofile)
subprocess.check_call(f"wget -q \"{resource['resource_data_url']}\" -O {ofile}", shell=True, close_fds=False)
if os.path.exists(ofile):
gldas_files.append(ofile)
num_weather_files = len(gldas_files)
logging.info(f"Loading GLDAS data from {num_weather_files} files..")
gldas_ds = load_gldas_dataset(gldas_files)
logging.info("Loaded GLDAS data")
# Do the cycles conversion for all weather points
for grid_point in weather_grid_points:
weather_point = grid_point["weather"]
common_weather_fname = weather_point["filename"]
lat = weather_point["lat"]
lon = weather_point["lon"]
elevation = weather_point["elevation"]
common_weather_file = os.path.join(output_path, common_weather_fname)
point_start_date = cur_start_date
if common_weather_fname in point_latest_dates:
point_start_date = point_latest_dates[common_weather_fname] + timedelta(days=1)
# If we've already processed this time period for this point, then don't go further
if point_start_date > cur_end_date:
continue
# Load GLDAS data for the exact gridpoint location
logging.debug(f"Loading GLDAS data for grid point {lat}, {lon}")
loc_ds = gldas_ds.sel(lat=lat, lon=lon, time=slice(point_start_date, cur_end_date)).load()
logging.debug("Loaded gldas data for location")
logging.debug("Converting to Cycles input data")
# Convert to Cycles Input
loc_by_day_ds = convert_to_cycles_input(loc_ds)
logging.debug("Finished conversion to cycles input data")
logging.debug("Converting weather input data to Pandas Dataframe...")
loc_by_day_df = loc_by_day_ds.to_dataframe()
loc_by_day_df.sort_values(by=['YEAR', 'DOY'])
logging.debug("Finished converting to Dataframe")
logging.debug ("Writing the cycles weather file..")
# Append to the weather file
outfp = open(common_weather_file, "a")
for index, row in loc_by_day_df.iterrows():
if index < cur_end_date: # Sometimes an extra day is returned (for midnight file of next day. Do a check here to ignore that)
outfp.write("%-8.0f%-8.0f%-8.4f%-8.2f%-8.2f%-8.4f%-8.2f%-8.2f%-8.2f\n" % (
row['YEAR'], row['DOY'],
row['PP'], row['TX'], row['TN'],
row['SOLAR'], row['RHX'], row['RHN'],
row['WIND'])
)
outfp.close()
gldas_ds.close()
cur_start_date = cur_end_date
logging.info(f"Done converting GLDAS data to cycles input weather file for {num_weather_points} points")
logging.info(f"Creating {num_soil_points} cycles input zip files, each containing a weather and a soil file...")
fnames = []
# Create the Zip file for all soil points containing the soil file and the generated weather file
for fname in soil_grid_points.keys():
point = soil_grid_points[fname]
soil_path = point["soil_path"]
weather_point = point["weather"]
if not os.path.exists(soil_path):
continue
common_weather_fname = weather_point["filename"]
common_weather_file = os.path.join(output_path, common_weather_fname)
logging.debug (f"Creating Cycles zip file for {fname}")
weather_fname = fname + ".weather"
soil_fname = fname + ".soil"
zip_fname = fname + ".soil_weather.zip"
tmp_soil_file = os.path.join(output_path, soil_fname)
tmp_weather_file = os.path.join(output_path, weather_fname)
common_weather_file = os.path.join(output_path, common_weather_fname)
soil_weather_file = os.path.join(output_path, zip_fname)
shutil.copyfile(soil_path, Path(tmp_soil_file))
shutil.copyfile(common_weather_file, Path(tmp_weather_file))
zipObj = ZipFile(soil_weather_file, 'w')
zipObj.write(tmp_soil_file, soil_fname)
zipObj.write(tmp_weather_file, weather_fname)
zipObj.close()
logging.debug ("Done writing cycles zip file")
fnames.append(zip_fname)
logging.info(f"Done Creating {num_soil_points} cycles input zip files")
return fnames
|
[
"json.load",
"logging.debug",
"zipfile.ZipFile",
"logging.basicConfig",
"re.split",
"xarray.open_dataset",
"os.path.exists",
"dtran.dcat.api.DCatAPI.get_instance",
"xarray.merge",
"logging.info",
"datetime.datetime.strptime",
"pathlib.Path",
"datetime.timedelta",
"xarray.open_mfdataset",
"os.path.join",
"subprocess.check_call",
"xarray.ufuncs.exp"
] |
[((515, 573), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'logging.INFO'}), '(stream=sys.stderr, level=logging.INFO)\n', (534, 573), False, 'import logging, sys\n'), ((2628, 2677), 'logging.debug', 'logging.debug', (['"""Reading variables from dataset.."""'], {}), "('Reading variables from dataset..')\n", (2641, 2677), False, 'import logging, sys\n'), ((2755, 2813), 'logging.debug', 'logging.debug', (['"""Finished reading variables from dataset.."""'], {}), "('Finished reading variables from dataset..')\n", (2768, 2813), False, 'import logging, sys\n'), ((2819, 2855), 'logging.debug', 'logging.debug', (['"""Start resampling..."""'], {}), "('Start resampling...')\n", (2832, 2855), False, 'import logging, sys\n'), ((3514, 3553), 'logging.debug', 'logging.debug', (['"""Finished resampling..."""'], {}), "('Finished resampling...')\n", (3527, 3553), False, 'import logging, sys\n'), ((3559, 3601), 'logging.debug', 'logging.debug', (['"""Doing unit conversions..."""'], {}), "('Doing unit conversions...')\n", (3572, 3601), False, 'import logging, sys\n'), ((3755, 3800), 'logging.debug', 'logging.debug', (['"""Finished unit conversions..."""'], {}), "('Finished unit conversions...')\n", (3768, 3800), False, 'import logging, sys\n'), ((3933, 3968), 'logging.debug', 'logging.debug', (['"""Merge variables..."""'], {}), "('Merge variables...')\n", (3946, 3968), False, 'import logging, sys\n'), ((4048, 4106), 'xarray.merge', 'xr.merge', (['[year, doy, prcp, tx, tn, solar, rhx, rhn, wind]'], {}), '([year, doy, prcp, tx, tn, solar, rhx, rhn, wind])\n', (4056, 4106), True, 'import xarray as xr\n'), ((4204, 4250), 'logging.debug', 'logging.debug', (['"""Finished merging variables..."""'], {}), "('Finished merging variables...')\n", (4217, 4250), False, 'import logging, sys\n'), ((6784, 6894), 'logging.info', 'logging.info', (['f"""Processing {num_weather_points} GLDAS grid points for {num_soil_points} Soil points"""'], {}), "(\n f'Processing {num_weather_points} GLDAS grid points for {num_soil_points} Soil points'\n )\n", (6796, 6894), False, 'import logging, sys\n'), ((8495, 8536), 'datetime.datetime.strptime', 'datetime.strptime', (['start_date', '"""%Y-%m-%d"""'], {}), "(start_date, '%Y-%m-%d')\n", (8512, 8536), False, 'from datetime import datetime\n'), ((8552, 8591), 'datetime.datetime.strptime', 'datetime.strptime', (['end_date', '"""%Y-%m-%d"""'], {}), "(end_date, '%Y-%m-%d')\n", (8569, 8591), False, 'from datetime import datetime\n'), ((12653, 12767), 'logging.info', 'logging.info', (['f"""Done converting GLDAS data to cycles input weather file for {num_weather_points} points"""'], {}), "(\n f'Done converting GLDAS data to cycles input weather file for {num_weather_points} points'\n )\n", (12665, 12767), False, 'import logging, sys\n'), ((12763, 12885), 'logging.info', 'logging.info', (['f"""Creating {num_soil_points} cycles input zip files, each containing a weather and a soil file..."""'], {}), "(\n f'Creating {num_soil_points} cycles input zip files, each containing a weather and a soil file...'\n )\n", (12775, 12885), False, 'import logging, sys\n'), ((14247, 14318), 'logging.info', 'logging.info', (['f"""Done Creating {num_soil_points} cycles input zip files"""'], {}), "(f'Done Creating {num_soil_points} cycles input zip files')\n", (14259, 14318), False, 'import logging, sys\n'), ((4484, 4550), 'xarray.ufuncs.exp', 'xr.ufuncs.exp', (['(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))'], {}), '(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))\n', (4497, 4550), True, 'import xarray as xr\n'), ((5320, 5347), 'xarray.open_dataset', 'xr.open_dataset', (['first_file'], {}), '(first_file)\n', (5335, 5347), True, 'import xarray as xr\n'), ((5663, 5733), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['gldas_files'], {'drop_variables': 'varnames', 'chunks': '"""auto"""'}), "(gldas_files, drop_variables=varnames, chunks='auto')\n", (5680, 5733), True, 'import xarray as xr\n'), ((5960, 5991), 'os.path.exists', 'os.path.exists', (['gldas_directory'], {}), '(gldas_directory)\n', (5974, 5991), False, 'import os\n'), ((6069, 6096), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (6083, 6096), False, 'import os\n'), ((6361, 6376), 'json.load', 'json.load', (['mapf'], {}), '(mapf)\n', (6370, 6376), False, 'import json\n'), ((7257, 7304), 'os.path.join', 'os.path.join', (['output_path', 'common_weather_fname'], {}), '(output_path, common_weather_fname)\n', (7269, 7304), False, 'import os\n'), ((7367, 7402), 'os.path.exists', 'os.path.exists', (['common_weather_file'], {}), '(common_weather_file)\n', (7381, 7402), False, 'import os\n'), ((8820, 8922), 'logging.info', 'logging.info', (['f"""Fetching GLDAS files list for dates from {cur_start_date} to {cur_end_date}"""'], {}), "(\n f'Fetching GLDAS files list for dates from {cur_start_date} to {cur_end_date}'\n )\n", (8832, 8922), False, 'import logging, sys\n'), ((8922, 8971), 'logging.info', 'logging.info', (['"""Downloading missing GLDAS files.."""'], {}), "('Downloading missing GLDAS files..')\n", (8934, 8971), False, 'import logging, sys\n'), ((10092, 10160), 'logging.info', 'logging.info', (['f"""Loading GLDAS data from {num_weather_files} files.."""'], {}), "(f'Loading GLDAS data from {num_weather_files} files..')\n", (10104, 10160), False, 'import logging, sys\n'), ((10220, 10253), 'logging.info', 'logging.info', (['"""Loaded GLDAS data"""'], {}), "('Loaded GLDAS data')\n", (10232, 10253), False, 'import logging, sys\n'), ((13306, 13353), 'os.path.join', 'os.path.join', (['output_path', 'common_weather_fname'], {}), '(output_path, common_weather_fname)\n', (13318, 13353), False, 'import os\n'), ((13363, 13417), 'logging.debug', 'logging.debug', (['f"""Creating Cycles zip file for {fname}"""'], {}), "(f'Creating Cycles zip file for {fname}')\n", (13376, 13417), False, 'import logging, sys\n'), ((13581, 13618), 'os.path.join', 'os.path.join', (['output_path', 'soil_fname'], {}), '(output_path, soil_fname)\n', (13593, 13618), False, 'import os\n'), ((13650, 13690), 'os.path.join', 'os.path.join', (['output_path', 'weather_fname'], {}), '(output_path, weather_fname)\n', (13662, 13690), False, 'import os\n'), ((13729, 13776), 'os.path.join', 'os.path.join', (['output_path', 'common_weather_fname'], {}), '(output_path, common_weather_fname)\n', (13741, 13776), False, 'import os\n'), ((13806, 13842), 'os.path.join', 'os.path.join', (['output_path', 'zip_fname'], {}), '(output_path, zip_fname)\n', (13818, 13842), False, 'import os\n'), ((13987, 14018), 'zipfile.ZipFile', 'ZipFile', (['soil_weather_file', '"""w"""'], {}), "(soil_weather_file, 'w')\n", (13994, 14018), False, 'from zipfile import ZipFile\n'), ((14161, 14206), 'logging.debug', 'logging.debug', (['"""Done writing cycles zip file"""'], {}), "('Done writing cycles zip file')\n", (14174, 14206), False, 'import logging, sys\n'), ((8703, 8732), 'datetime.timedelta', 'timedelta', ([], {'days': 'batch_numdays'}), '(days=batch_numdays)\n', (8712, 8732), False, 'from datetime import timedelta\n'), ((9405, 9450), 'datetime.datetime.strptime', 'datetime.strptime', (['gldas_date_str', '"""%Y-%m-%d"""'], {}), "(gldas_date_str, '%Y-%m-%d')\n", (9422, 9450), False, 'from datetime import datetime\n'), ((9595, 9643), 'os.path.join', 'os.path.join', (['nc_path', "resource['resource_name']"], {}), "(nc_path, resource['resource_name'])\n", (9607, 9643), False, 'import os\n'), ((9973, 9994), 'os.path.exists', 'os.path.exists', (['ofile'], {}), '(ofile)\n', (9987, 9994), False, 'import os\n'), ((10641, 10688), 'os.path.join', 'os.path.join', (['output_path', 'common_weather_fname'], {}), '(output_path, common_weather_fname)\n', (10653, 10688), False, 'import os\n'), ((11149, 11213), 'logging.debug', 'logging.debug', (['f"""Loading GLDAS data for grid point {lat}, {lon}"""'], {}), "(f'Loading GLDAS data for grid point {lat}, {lon}')\n", (11162, 11213), False, 'import logging, sys\n'), ((11329, 11376), 'logging.debug', 'logging.debug', (['"""Loaded gldas data for location"""'], {}), "('Loaded gldas data for location')\n", (11342, 11376), False, 'import logging, sys\n'), ((11390, 11438), 'logging.debug', 'logging.debug', (['"""Converting to Cycles input data"""'], {}), "('Converting to Cycles input data')\n", (11403, 11438), False, 'import logging, sys\n'), ((11550, 11607), 'logging.debug', 'logging.debug', (['"""Finished conversion to cycles input data"""'], {}), "('Finished conversion to cycles input data')\n", (11563, 11607), False, 'import logging, sys\n'), ((11633, 11702), 'logging.debug', 'logging.debug', (['"""Converting weather input data to Pandas Dataframe..."""'], {}), "('Converting weather input data to Pandas Dataframe...')\n", (11646, 11702), False, 'import logging, sys\n'), ((11830, 11879), 'logging.debug', 'logging.debug', (['"""Finished converting to Dataframe"""'], {}), "('Finished converting to Dataframe')\n", (11843, 11879), False, 'import logging, sys\n'), ((11894, 11944), 'logging.debug', 'logging.debug', (['"""Writing the cycles weather file.."""'], {}), "('Writing the cycles weather file..')\n", (11907, 11944), False, 'import logging, sys\n'), ((13171, 13196), 'os.path.exists', 'os.path.exists', (['soil_path'], {}), '(soil_path)\n', (13185, 13196), False, 'import os\n'), ((13879, 13898), 'pathlib.Path', 'Path', (['tmp_soil_file'], {}), '(tmp_soil_file)\n', (13883, 13898), False, 'from pathlib import Path\n'), ((13945, 13967), 'pathlib.Path', 'Path', (['tmp_weather_file'], {}), '(tmp_weather_file)\n', (13949, 13967), False, 'from pathlib import Path\n'), ((6000, 6021), 'pathlib.Path', 'Path', (['gldas_directory'], {}), '(gldas_directory)\n', (6004, 6021), False, 'from pathlib import Path\n'), ((6105, 6122), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (6109, 6122), False, 'from pathlib import Path\n'), ((9052, 9074), 'dtran.dcat.api.DCatAPI.get_instance', 'DCatAPI.get_instance', ([], {}), '()\n', (9072, 9074), False, 'from dtran.dcat.api import DCatAPI\n'), ((9663, 9686), 'os.path.exists', 'os.path.exists', (['nc_path'], {}), '(nc_path)\n', (9677, 9686), False, 'import os\n'), ((9772, 9793), 'os.path.exists', 'os.path.exists', (['ofile'], {}), '(ofile)\n', (9786, 9793), False, 'import os\n'), ((9811, 9831), 'logging.debug', 'logging.debug', (['ofile'], {}), '(ofile)\n', (9824, 9831), False, 'import logging, sys\n'), ((9848, 9962), 'subprocess.check_call', 'subprocess.check_call', (['f"""wget -q "{resource[\'resource_data_url\']}" -O {ofile}"""'], {'shell': '(True)', 'close_fds': '(False)'}), '(f\'wget -q "{resource[\\\'resource_data_url\\\']}" -O {ofile}\'\n , shell=True, close_fds=False)\n', (9869, 9962), False, 'import subprocess\n'), ((7597, 7619), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (7605, 7619), False, 'import re\n'), ((10885, 10902), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (10894, 10902), False, 'from datetime import timedelta\n'), ((9704, 9717), 'pathlib.Path', 'Path', (['nc_path'], {}), '(nc_path)\n', (9708, 9717), False, 'from pathlib import Path\n')]
|
from django.core.exceptions import ValidationError
def only_letters_validator(value):
for ch in value:
if not ch.isalpha():
raise ValidationError("Value must contains only letters")
def file_max_size_in_mb_validator(max_size):
def validate(value):
filesize = value.file.size
if filesize > max_size * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(max_size))
return validate
|
[
"django.core.exceptions.ValidationError"
] |
[((156, 207), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Value must contains only letters"""'], {}), "('Value must contains only letters')\n", (171, 207), False, 'from django.core.exceptions import ValidationError\n')]
|
import torch
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
from rlpyt.models.conv2d import Conv2dModel
from rlpyt.models.mlp import MlpModel
from rlpyt.models.dqn.dueling import DuelingHeadModel
class CartpoleDqnModel(torch.nn.Module):
def __init__(
self,
image_shape,
output_size,
fc_sizes=[64, 64],
dueling=False,
use_maxpool=False,
channels=None, # None uses default.
kernel_sizes=None,
strides=None,
paddings=None,
):
super().__init__()
self.dueling = dueling
input_size = image_shape[0]
# self.mlp = MlpModel(input_size, fc_sizes, output_size)
if dueling:
self.head = DuelingHeadModel(input_size, fc_sizes, output_size)
else:
self.head = MlpModel(input_size, fc_sizes, output_size)
def forward(self, observation, prev_action, prev_reward):
"""Feedforward layers process as [T*B,H]. Return same leading dims as
input, can be [T,B], [B], or []."""
img = observation.type(torch.float) # Expect torch.uint8 inputs
# Infer (presence of) leading dimensions: [T,B], [B], or [].
lead_dim, T, B, img_shape = infer_leading_dims(img, 1)
# conv_out = self.conv(img.view(T * B, *img_shape)) # Fold if T dimension.
q = self.head(img.view(T * B, -1))
# Restore leading dimensions: [T,B], [B], or [], as input.
q = restore_leading_dims(q, lead_dim, T, B)
return q
|
[
"rlpyt.utils.tensor.restore_leading_dims",
"rlpyt.models.dqn.dueling.DuelingHeadModel",
"rlpyt.utils.tensor.infer_leading_dims",
"rlpyt.models.mlp.MlpModel"
] |
[((1291, 1317), 'rlpyt.utils.tensor.infer_leading_dims', 'infer_leading_dims', (['img', '(1)'], {}), '(img, 1)\n', (1309, 1317), False, 'from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims\n'), ((1526, 1565), 'rlpyt.utils.tensor.restore_leading_dims', 'restore_leading_dims', (['q', 'lead_dim', 'T', 'B'], {}), '(q, lead_dim, T, B)\n', (1546, 1565), False, 'from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims\n'), ((793, 844), 'rlpyt.models.dqn.dueling.DuelingHeadModel', 'DuelingHeadModel', (['input_size', 'fc_sizes', 'output_size'], {}), '(input_size, fc_sizes, output_size)\n', (809, 844), False, 'from rlpyt.models.dqn.dueling import DuelingHeadModel\n'), ((883, 926), 'rlpyt.models.mlp.MlpModel', 'MlpModel', (['input_size', 'fc_sizes', 'output_size'], {}), '(input_size, fc_sizes, output_size)\n', (891, 926), False, 'from rlpyt.models.mlp import MlpModel\n')]
|
from .base_lot import *
import numpy as np
import os
from .units import *
#TODO get rid of get_energy
class QChem(Lot):
def run(self,geom,multiplicity):
tempfilename = 'tempQCinp'
tempfile = open(tempfilename,'w')
if self.lot_inp_file == False:
tempfile.write(' $rem\n')
tempfile.write(' JOBTYPE FORCE\n')
tempfile.write(' EXCHANGE {}\n'.format(self.functional))
tempfile.write(' SCF_ALGORITHM rca_diis\n')
tempfile.write(' SCF_MAX_CYCLES 300\n')
tempfile.write(' BASIS {}\n'.format(self.basis))
#tempfile.write(' ECP LANL2DZ \n')
tempfile.write(' WAVEFUNCTION_ANALYSIS FALSE\n')
tempfile.write(' GEOM_OPT_MAX_CYCLES 300\n')
tempfile.write('scf_convergence 6\n')
tempfile.write(' SYM_IGNORE TRUE\n')
tempfile.write(' SYMMETRY FALSE\n')
tempfile.write('molden_format true\n')
tempfile.write(' $end\n')
tempfile.write('\n')
tempfile.write('$molecule\n')
else:
with open(self.lot_inp_file) as lot_inp:
lot_inp_lines = lot_inp.readlines()
for line in lot_inp_lines:
tempfile.write(line)
tempfile.write('{} {}\n'.format(self.charge,multiplicity))
if os.path.isfile("link.txt"):
with open("link.txt") as link:
link_lines = link.readlines()
tmp_geom = [list(i) for i in geom]
for i,coord in enumerate(tmp_geom):
coord.append(link_lines[i].rstrip('\n'))
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
else:
for coord in geom:
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
tempfile.write('$end')
tempfile.close()
cmd = "qchem -nt {} -save {} {}.qchem.out {}.{}".format(self.nproc,tempfilename,tempfilename,self.node_id,multiplicity)
os.system(cmd)
efilepath = os.environ['QCSCRATCH']
efilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(efilepath) as efile:
elines = efile.readlines()
temp = 0
for lines in elines:
if temp == 1:
self.E.append((multiplicity,float(lines.split()[0])))
break
if "$" in lines:
temp += 1
gradfilepath = os.environ['QCSCRATCH']
gradfilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(gradfilepath) as gradfile:
gradlines = gradfile.readlines()
temp = 0
tmp=[]
for lines in gradlines:
if '$' in lines:
temp+=1
elif temp == 2:
tmpline = lines.split()
tmp.append([float(i) for i in tmpline])
elif temp == 3:
break
self.grada.append((multiplicity,tmp))
return
def get_energy(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
self.hasRanForCurrentCoords=True
tmp = self.search_tuple(self.E,multiplicity)
return np.asarray(tmp[state][1])*KCAL_MOL_PER_AU
def get_gradient(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
tmp = self.search_tuple(self.grada,multiplicity)
return np.asarray(tmp[state][1])*ANGSTROM_TO_AU
@classmethod
def copy(cls,lot,**kwargs):
base = os.environ['QCSCRATCH']
for state in self.states:
multiplicity = state[0]
efilepath_old=base+ '/{}.{}'.format(self.node_id,multiplicity)
efilepath_new =base+ '/{}.{}'.format(node_id,multiplicity)
os.system('cp -r ' + efilepath_old +' ' + efilepath_new)
return cls(lot.options.copy().set_values(options))
|
[
"numpy.asarray",
"os.path.isfile",
"os.system"
] |
[((1355, 1381), 'os.path.isfile', 'os.path.isfile', (['"""link.txt"""'], {}), "('link.txt')\n", (1369, 1381), False, 'import os\n'), ((2103, 2117), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2112, 2117), False, 'import os\n'), ((3668, 3693), 'numpy.asarray', 'np.asarray', (['tmp[state][1]'], {}), '(tmp[state][1])\n', (3678, 3693), True, 'import numpy as np\n'), ((4228, 4253), 'numpy.asarray', 'np.asarray', (['tmp[state][1]'], {}), '(tmp[state][1])\n', (4238, 4253), True, 'import numpy as np\n'), ((4586, 4643), 'os.system', 'os.system', (["('cp -r ' + efilepath_old + ' ' + efilepath_new)"], {}), "('cp -r ' + efilepath_old + ' ' + efilepath_new)\n", (4595, 4643), False, 'import os\n')]
|
# System imports
import abc
import RPi.GPIO as GPIO
# Local imports
from mtda.usb.switch import UsbSwitch
class RPiGpioUsbSwitch(UsbSwitch):
def __init__(self):
self.dev = None
self.pin = 0
self.enable = GPIO.HIGH
self.disable = GPIO.LOW
GPIO.setwarnings(False)
def configure(self, conf):
""" Configure this USB switch from the provided configuration"""
if 'pin' in conf:
self.pin = int(conf['pin'], 10)
if 'enable' in conf:
if conf['enable'] == 'high':
self.enable = GPIO.HIGH
self.disable = GPIO.LOW
elif conf['enable'] == 'low':
self.enable = GPIO.LOW
self.disable = GPIO.HIGH
if self.pin > 0:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
return
def probe(self):
return
def on(self):
""" Power on the target USB port"""
GPIO.output(self.pin, self.enable)
return self.status() == self.POWERED_ON
def off(self):
""" Power off the target USB port"""
GPIO.output(self.pin, self.disable)
return self.status() == self.POWERED_OFF
def status(self):
""" Determine the current power state of the USB port"""
if GPIO.input(self.pin) == self.enable:
return self.POWERED_ON
else:
return self.POWERED_OFF
def toggle(self):
s = self.status()
if s == self.POWERED_ON:
self.off()
return self.POWERED_OFF
else:
self.on()
return self.POWERED_ON
def instantiate():
return RPiGpioUsbSwitch()
|
[
"RPi.GPIO.setmode",
"RPi.GPIO.setup",
"RPi.GPIO.input",
"RPi.GPIO.output",
"RPi.GPIO.setwarnings"
] |
[((294, 317), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (310, 317), True, 'import RPi.GPIO as GPIO\n'), ((993, 1027), 'RPi.GPIO.output', 'GPIO.output', (['self.pin', 'self.enable'], {}), '(self.pin, self.enable)\n', (1004, 1027), True, 'import RPi.GPIO as GPIO\n'), ((1149, 1184), 'RPi.GPIO.output', 'GPIO.output', (['self.pin', 'self.disable'], {}), '(self.pin, self.disable)\n', (1160, 1184), True, 'import RPi.GPIO as GPIO\n'), ((804, 826), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (816, 826), True, 'import RPi.GPIO as GPIO\n'), ((839, 869), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin', 'GPIO.OUT'], {}), '(self.pin, GPIO.OUT)\n', (849, 869), True, 'import RPi.GPIO as GPIO\n'), ((1333, 1353), 'RPi.GPIO.input', 'GPIO.input', (['self.pin'], {}), '(self.pin)\n', (1343, 1353), True, 'import RPi.GPIO as GPIO\n')]
|
import sys
import tensorflow as tf
import leveldb
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
import warnings
import glob
import toml
import re
from contextlib import redirect_stdout
import collections
import datetime
import functools
import itertools
import math
import numpy as np
import os
import random
import sys
import time
from zipfile import ZipFile
from datetime import datetime
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import TerminateOnNaN, EarlyStopping, ModelCheckpoint, LambdaCallback, Callback
from tensorflow.keras.layers import BatchNormalization, LayerNormalization, Flatten, Add, Conv2D, Permute
from tensorflow.keras.layers import Dense, Dropout, Input, Embedding, Concatenate, Activation
from tensorflow.keras.layers import GaussianNoise, LeakyReLU, Softmax
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, Discretization
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import AUC
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, Ftrl, SGD
from tensorflow.python.keras import backend
import pandas as pd
from data import create_input_generator
from plan import load_plan
from model import create_model
from lr import create_lr_schedule
from tf_utils_callbacks.callbacks import BestNModelCheckpoint
from timing_callback import TimingCallback
FLAGS = flags.FLAGS
flags.DEFINE_string('plan', None, 'toml file')
flags.DEFINE_multi_string('d', None, 'override plan settings')
def df_to_csv(df, fn, float_format='%6.4f'):
df.to_csv(fn, index=False, float_format=float_format)
class LogLrCallback(Callback):
def on_epoch_end(self, epoch, logs):
try:
logs['lr'] = float(backend.get_value(self.model.optimizer.lr(epoch)))
except TypeError:
logs['lr'] = float(backend.get_value(self.model.optimizer.lr))
def main(_argv):
flags.mark_flags_as_required(['plan'])
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity('error')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('ignore', category=Warning)
t1 = time.time()
timing = TimingCallback()
timing.record('overall_begin')
out_dir = datetime.today().strftime('%Y-%m-%d_%H:%M:%S')
out_dir = os.path.join('results', out_dir)
print('mkdir', out_dir)
os.mkdir(out_dir)
plan = load_plan(FLAGS.plan)
fn = os.path.join(out_dir, os.path.basename(FLAGS.plan))
print(f'Write {fn}')
with open(fn, 'w') as f:
toml.dump(plan, f)
os.chmod(fn, 0o444)
mplan = plan.model
return_legal_moves = mplan.mask_legal_moves
dplan = plan.data
ds1 = create_input_generator(dplan, dplan.train, is_train=True, return_legal_moves=return_legal_moves)
ds2 = create_input_generator(dplan, dplan.validate, is_train=False, return_legal_moves=return_legal_moves)
ds3 = create_input_generator(dplan, dplan.test, is_train=False, do_repeat=False,
return_legal_moves=return_legal_moves) if 'test' in dplan else None
if dplan.prefetch_to_device:
bs = dplan.get('prefetch_to_device_buffer', None)
ds1 = ds1.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
ds2 = ds2.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
ds3 = ds3.apply(tf.data.experimental.prefetch_to_device('/gpu:0', bs))
m = create_model(mplan)
fn = os.path.join(out_dir, 'model-summary.txt')
print(f'Write {fn}')
with open(fn, 'w') as f:
with redirect_stdout(f):
m.summary()
os.chmod(fn, 0o444)
callbacks = [TerminateOnNaN(),
LogLrCallback()]
tplan = plan.train
(lr_callback, lr) = create_lr_schedule(tplan)
if lr_callback:
callbacks.append(lr_callback)
# lr = CosineDecayRestarts(initial_learning_rate=tplan.lr,
# first_decay_steps=tplan.first_decay_steps,
# t_mul=1,
# m_mul=1,
# alpha=tplan.alpha)
if tplan.optimizer == 'SGD':
optimizer = SGD(learning_rate=lr)
elif tplan.optimizer == 'Adam':
optimizer = Adam(learning_rate=lr)
else:
assert False, tplan.optimizer
m.compile(optimizer=optimizer,
loss=SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#tf.keras.metrics.Precision(top_k=3, name='p_3'),
#tf.keras.metrics.Recall(top_k=3, name='r_3')])
best_path = os.path.join(out_dir, 'best.model')
callbacks.append(BestNModelCheckpoint(
filepath=best_path,
monitor='val_accuracy',
model='max',
max_to_keep=1,
save_weights_only=False,
verbose=0))
callbacks.append(timing)
timing.record('on_fit_begin')
history = m.fit(x=ds1,
epochs=tplan.epochs,
steps_per_epoch=tplan.steps_per_epoch,
validation_data=ds2,
validation_steps=tplan.validation_steps,
callbacks=callbacks)
timing.record('on_fit_end')
df = pd.DataFrame(history.history)
fn = os.path.join(out_dir, 'last.model')
print(f'Write {fn}')
m.save(fn)
os.chmod(fn, 0o755)
timing.record('after_fit_begin')
test_ev, test_ev2 = None, None
if tplan.test_steps > 0 and ds3:
tt0 = time.time()
print('Test (last)')
test_ev = m.evaluate(x=ds3, return_dict=True, steps=tplan.test_steps)
dt = time.time() - tt0
print('Test:', test_ev, int(dt))
print('Test (best)')
tt0 = time.time()
ds3 = create_input_generator(dplan, dplan.test, is_train=False, return_legal_moves=return_legal_moves) # rewind
if dplan.prefetch_to_device:
ds3 = ds3.apply(tf.data.experimental.prefetch_to_device('/gpu:0', 32))
test_ev2 = tf.keras.models.load_model(best_path).evaluate(x=ds3, return_dict=True, steps=tplan.test_steps)
dt = time.time() - tt0
print('Test/2:', test_ev2, int(dt))
timing.record('after_fit_end')
fn = os.path.join(out_dir, 'history.csv')
print(f'Write {fn}')
with open(fn, 'w') as f:
df_to_csv(df, f)
os.chmod(fn, 0o444)
v1 = df['val_accuracy'].max()
v2 = df['val_accuracy'].values[-1]
fn = os.path.join(out_dir, 'report.txt')
print(f'Write {fn}')
with open(fn, 'w') as f:
print(f'val_accuracy {v1:6.4f} (best)')
print(f' {v2:6.4f} (last)')
if test_ev:
print(f'test_accuracy {test_ev2["accuracy"]:6.4f} (best)')
print(f' {test_ev["accuracy"]:6.4f} (last)')
f.write(f'val_accuracy : {v1:6.4f} (best)\n')
f.write(f'val_accuracy : {v2:6.4f} (last)\n')
if test_ev:
f.write(f'test_accuracy : {test_ev2["accuracy"]:6.4f} (best)\n')
f.write(f'test_accuracy : {test_ev["accuracy"]:6.4f} (last)\n')
f.write(f'time : {int(time.time() - t1)}\n')
os.chmod(fn, 0o444)
timing.record('overall_end')
print('Timing')
for k in timing.tot:
print(f'{k:16s} | {timing.num[k]:8d} | {timing.tot[k]:.2f}')
if __name__ == '__main__':
app.run(main)
|
[
"os.mkdir",
"data.create_input_generator",
"tensorflow.keras.optimizers.SGD",
"os.path.join",
"absl.logging.set_verbosity",
"pandas.DataFrame",
"absl.flags.mark_flags_as_required",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.python.keras.backend.get_value",
"tensorflow.keras.optimizers.Adam",
"model.create_model",
"os.chmod",
"tensorflow.keras.models.load_model",
"datetime.datetime.today",
"os.path.basename",
"tensorflow.keras.callbacks.TerminateOnNaN",
"plan.load_plan",
"contextlib.redirect_stdout",
"timing_callback.TimingCallback",
"tf_utils_callbacks.callbacks.BestNModelCheckpoint",
"tensorflow.data.experimental.prefetch_to_device",
"warnings.filterwarnings",
"lr.create_lr_schedule",
"absl.flags.DEFINE_multi_string",
"time.time",
"absl.flags.DEFINE_string",
"absl.app.run",
"tensorflow.compat.v1.logging.set_verbosity",
"toml.dump"
] |
[((1561, 1607), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""plan"""', 'None', '"""toml file"""'], {}), "('plan', None, 'toml file')\n", (1580, 1607), False, 'from absl import flags\n'), ((1609, 1671), 'absl.flags.DEFINE_multi_string', 'flags.DEFINE_multi_string', (['"""d"""', 'None', '"""override plan settings"""'], {}), "('d', None, 'override plan settings')\n", (1634, 1671), False, 'from absl import flags\n'), ((2044, 2082), 'absl.flags.mark_flags_as_required', 'flags.mark_flags_as_required', (["['plan']"], {}), "(['plan'])\n", (2072, 2082), False, 'from absl import flags\n'), ((2085, 2147), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (2119, 2147), True, 'import tensorflow as tf\n'), ((2150, 2180), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['"""error"""'], {}), "('error')\n", (2171, 2180), False, 'from absl import logging\n'), ((2226, 2277), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'Warning'}), "('ignore', category=Warning)\n", (2249, 2277), False, 'import warnings\n'), ((2286, 2297), 'time.time', 'time.time', ([], {}), '()\n', (2295, 2297), False, 'import time\n'), ((2310, 2326), 'timing_callback.TimingCallback', 'TimingCallback', ([], {}), '()\n', (2324, 2326), False, 'from timing_callback import TimingCallback\n'), ((2432, 2464), 'os.path.join', 'os.path.join', (['"""results"""', 'out_dir'], {}), "('results', out_dir)\n", (2444, 2464), False, 'import os\n'), ((2493, 2510), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (2501, 2510), False, 'import os\n'), ((2521, 2542), 'plan.load_plan', 'load_plan', (['FLAGS.plan'], {}), '(FLAGS.plan)\n', (2530, 2542), False, 'from plan import load_plan\n'), ((2677, 2694), 'os.chmod', 'os.chmod', (['fn', '(292)'], {}), '(fn, 292)\n', (2685, 2694), False, 'import os\n'), ((2794, 2894), 'data.create_input_generator', 'create_input_generator', (['dplan', 'dplan.train'], {'is_train': '(True)', 'return_legal_moves': 'return_legal_moves'}), '(dplan, dplan.train, is_train=True,\n return_legal_moves=return_legal_moves)\n', (2816, 2894), False, 'from data import create_input_generator\n'), ((2900, 3004), 'data.create_input_generator', 'create_input_generator', (['dplan', 'dplan.validate'], {'is_train': '(False)', 'return_legal_moves': 'return_legal_moves'}), '(dplan, dplan.validate, is_train=False,\n return_legal_moves=return_legal_moves)\n', (2922, 3004), False, 'from data import create_input_generator\n'), ((3502, 3521), 'model.create_model', 'create_model', (['mplan'], {}), '(mplan)\n', (3514, 3521), False, 'from model import create_model\n'), ((3529, 3571), 'os.path.join', 'os.path.join', (['out_dir', '"""model-summary.txt"""'], {}), "(out_dir, 'model-summary.txt')\n", (3541, 3571), False, 'import os\n'), ((3671, 3688), 'os.chmod', 'os.chmod', (['fn', '(292)'], {}), '(fn, 292)\n', (3679, 3688), False, 'import os\n'), ((3801, 3826), 'lr.create_lr_schedule', 'create_lr_schedule', (['tplan'], {}), '(tplan)\n', (3819, 3826), False, 'from lr import create_lr_schedule\n'), ((4571, 4606), 'os.path.join', 'os.path.join', (['out_dir', '"""best.model"""'], {}), "(out_dir, 'best.model')\n", (4583, 4606), False, 'import os\n'), ((5138, 5167), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (5150, 5167), True, 'import pandas as pd\n'), ((5176, 5211), 'os.path.join', 'os.path.join', (['out_dir', '"""last.model"""'], {}), "(out_dir, 'last.model')\n", (5188, 5211), False, 'import os\n'), ((5250, 5267), 'os.chmod', 'os.chmod', (['fn', '(493)'], {}), '(fn, 493)\n', (5258, 5267), False, 'import os\n'), ((6053, 6089), 'os.path.join', 'os.path.join', (['out_dir', '"""history.csv"""'], {}), "(out_dir, 'history.csv')\n", (6065, 6089), False, 'import os\n'), ((6163, 6180), 'os.chmod', 'os.chmod', (['fn', '(292)'], {}), '(fn, 292)\n', (6171, 6180), False, 'import os\n'), ((6261, 6296), 'os.path.join', 'os.path.join', (['out_dir', '"""report.txt"""'], {}), "(out_dir, 'report.txt')\n", (6273, 6296), False, 'import os\n'), ((6912, 6929), 'os.chmod', 'os.chmod', (['fn', '(292)'], {}), '(fn, 292)\n', (6920, 6929), False, 'import os\n'), ((7100, 7113), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (7107, 7113), False, 'from absl import app\n'), ((2572, 2600), 'os.path.basename', 'os.path.basename', (['FLAGS.plan'], {}), '(FLAGS.plan)\n', (2588, 2600), False, 'import os\n'), ((2656, 2674), 'toml.dump', 'toml.dump', (['plan', 'f'], {}), '(plan, f)\n', (2665, 2674), False, 'import toml\n'), ((3010, 3127), 'data.create_input_generator', 'create_input_generator', (['dplan', 'dplan.test'], {'is_train': '(False)', 'do_repeat': '(False)', 'return_legal_moves': 'return_legal_moves'}), '(dplan, dplan.test, is_train=False, do_repeat=False,\n return_legal_moves=return_legal_moves)\n', (3032, 3127), False, 'from data import create_input_generator\n'), ((3707, 3723), 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), '()\n', (3721, 3723), False, 'from tensorflow.keras.callbacks import TerminateOnNaN, EarlyStopping, ModelCheckpoint, LambdaCallback, Callback\n'), ((4184, 4205), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (4187, 4205), False, 'from tensorflow.keras.optimizers import Adam, Ftrl, SGD\n'), ((4626, 4759), 'tf_utils_callbacks.callbacks.BestNModelCheckpoint', 'BestNModelCheckpoint', ([], {'filepath': 'best_path', 'monitor': '"""val_accuracy"""', 'model': '"""max"""', 'max_to_keep': '(1)', 'save_weights_only': '(False)', 'verbose': '(0)'}), "(filepath=best_path, monitor='val_accuracy', model=\n 'max', max_to_keep=1, save_weights_only=False, verbose=0)\n", (4646, 4759), False, 'from tf_utils_callbacks.callbacks import BestNModelCheckpoint\n'), ((5384, 5395), 'time.time', 'time.time', ([], {}), '()\n', (5393, 5395), False, 'import time\n'), ((5595, 5606), 'time.time', 'time.time', ([], {}), '()\n', (5604, 5606), False, 'import time\n'), ((5617, 5717), 'data.create_input_generator', 'create_input_generator', (['dplan', 'dplan.test'], {'is_train': '(False)', 'return_legal_moves': 'return_legal_moves'}), '(dplan, dplan.test, is_train=False,\n return_legal_moves=return_legal_moves)\n', (5639, 5717), False, 'from data import create_input_generator\n'), ((2373, 2389), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2387, 2389), False, 'from datetime import datetime\n'), ((3290, 3343), 'tensorflow.data.experimental.prefetch_to_device', 'tf.data.experimental.prefetch_to_device', (['"""/gpu:0"""', 'bs'], {}), "('/gpu:0', bs)\n", (3329, 3343), True, 'import tensorflow as tf\n'), ((3365, 3418), 'tensorflow.data.experimental.prefetch_to_device', 'tf.data.experimental.prefetch_to_device', (['"""/gpu:0"""', 'bs'], {}), "('/gpu:0', bs)\n", (3404, 3418), True, 'import tensorflow as tf\n'), ((3440, 3493), 'tensorflow.data.experimental.prefetch_to_device', 'tf.data.experimental.prefetch_to_device', (['"""/gpu:0"""', 'bs'], {}), "('/gpu:0', bs)\n", (3479, 3493), True, 'import tensorflow as tf\n'), ((3631, 3649), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (3646, 3649), False, 'from contextlib import redirect_stdout\n'), ((4256, 4278), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (4260, 4278), False, 'from tensorflow.keras.optimizers import Adam, Ftrl, SGD\n'), ((4371, 4418), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4400, 4418), False, 'from tensorflow.keras.losses import SparseCategoricalCrossentropy\n'), ((5504, 5515), 'time.time', 'time.time', ([], {}), '()\n', (5513, 5515), False, 'import time\n'), ((5954, 5965), 'time.time', 'time.time', ([], {}), '()\n', (5963, 5965), False, 'import time\n'), ((5778, 5831), 'tensorflow.data.experimental.prefetch_to_device', 'tf.data.experimental.prefetch_to_device', (['"""/gpu:0"""', '(32)'], {}), "('/gpu:0', 32)\n", (5817, 5831), True, 'import tensorflow as tf\n'), ((5849, 5886), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['best_path'], {}), '(best_path)\n', (5875, 5886), True, 'import tensorflow as tf\n'), ((1979, 2021), 'tensorflow.python.keras.backend.get_value', 'backend.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (1996, 2021), False, 'from tensorflow.python.keras import backend\n'), ((6887, 6898), 'time.time', 'time.time', ([], {}), '()\n', (6896, 6898), False, 'import time\n')]
|
import asyncio
import random
from pyckaxe.utils.logging import get_logger
def preview_logging():
log = get_logger("preview_logging")
log.debug("debug")
log.info("info")
log.warning("warning")
log.error("error")
log.critical("critical")
try:
raise ValueError("don't worry this is a fake error")
except:
log.exception("exception")
async def preview_async_logging():
async def do_debug():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.debug(f"debug")
async def do_info():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.info(f"info")
async def do_warning():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.warning(f"warning")
async def do_error():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.error(f"error")
async def do_critical():
await asyncio.sleep(random.randint(500, 2000) / 1000)
log.critical(f"critical")
log = get_logger("preview_async_logging")
awaitables = [
do_debug(),
do_info(),
do_warning(),
do_error(),
do_critical(),
]
await asyncio.gather(*awaitables)
|
[
"asyncio.gather",
"pyckaxe.utils.logging.get_logger",
"random.randint"
] |
[((110, 139), 'pyckaxe.utils.logging.get_logger', 'get_logger', (['"""preview_logging"""'], {}), "('preview_logging')\n", (120, 139), False, 'from pyckaxe.utils.logging import get_logger\n'), ((1024, 1059), 'pyckaxe.utils.logging.get_logger', 'get_logger', (['"""preview_async_logging"""'], {}), "('preview_async_logging')\n", (1034, 1059), False, 'from pyckaxe.utils.logging import get_logger\n'), ((1199, 1226), 'asyncio.gather', 'asyncio.gather', (['*awaitables'], {}), '(*awaitables)\n', (1213, 1226), False, 'import asyncio\n'), ((471, 496), 'random.randint', 'random.randint', (['(500)', '(2000)'], {}), '(500, 2000)\n', (485, 496), False, 'import random\n'), ((587, 612), 'random.randint', 'random.randint', (['(500)', '(2000)'], {}), '(500, 2000)\n', (601, 612), False, 'import random\n'), ((704, 729), 'random.randint', 'random.randint', (['(500)', '(2000)'], {}), '(500, 2000)\n', (718, 729), False, 'import random\n'), ((825, 850), 'random.randint', 'random.randint', (['(500)', '(2000)'], {}), '(500, 2000)\n', (839, 850), False, 'import random\n'), ((945, 970), 'random.randint', 'random.randint', (['(500)', '(2000)'], {}), '(500, 2000)\n', (959, 970), False, 'import random\n')]
|
from enum import Enum
import numpy as np
import tensorflow as tf
from edward1_utils import get_ancestors, get_descendants
class GenerativeMode(Enum):
UNCONDITIONED = 1 # i.e. sampling the learnt prior
CONDITIONED = 2 # i.e. sampling the posterior, with variational samples substituted
RECONSTRUCTION = 3 # i.e. mode of the posterior
def noncopying_integrated_reparam_klqp(generative_builder, variational_builder, name_to_data_map, discrete_name_to_states_map, sample_count=1, beta=1., alpha=5.e6, grad_clip_magnitude=None):
# Every variable in the generative and variational should have a leading dimension that is 'IID', corresponding to
# an index-into-batch or otherwise sampled independently -- when we make substitutions, this dimension may be
# expanded to incorporate more samples. Thus, all RVs are indexed by iid-index, *
# Generative RVs are created by lambdas, taking zero or one parameters. There should be zero parameters when
# dim0 (i.e. the iid-dimension) has size fixed by ancestral variables; there should be one parameter when it's a 'root'
# variable (i.e. doesn't have any ancestor-RVs) and its base dim0 should be multiplied by that parameter
# Variational RVs are created similarly; the name given to the lambda should match that of the corresponding
# generative variable. Sample/discrete-expanded observations are retrived with a second lambda
# generative_builder is free to return any type or None; for example, it may choose to return an object containing
# some of its random variables; the unconditioned and mode-reconstruction versions of this result are returned to
# the caller
# ** note that we do not allow (but do not check for!) non-leaf non-RV tensors that are iib-indexed and have an RV as a sibling to
# ** be included as parents of RVs in the graph, as these cannot easily be expanded to the correct dimensionality -- their iid-index
# ** will always be of 'base' size, and will not broadcast correctly against 'upsampled' iid-indices of the sibling RV
# ** this could be fixed by handling such things essentially the same as expansion-like-discrete
# ** note that having RVs created with a non-default (i.e. not scalar) sample_shape will not work in general, as we call rv.sample()
# ** directly without passing this in -- so the shape will not be what the caller expects
assert len(discrete_name_to_states_map) < 2
if len(discrete_name_to_states_map) == 1:
assert False # ...as this is broken for now -- discrete states get 'blurred together'
discrete_name, discrete_states = discrete_name_to_states_map.items()[0] # discrete_states is a numpy array indexed by discrete-index, *
assert discrete_name not in name_to_data_map
else:
discrete_name = None
discrete_states = np.zeros([1])
# Build the 'prior', i.e. the generative without variational substitutions, so we can evaluate the prior probability of the variational samples later
name_to_unconditioned_generative_variable = {}
generative_root_variable_names = set()
def make_unconditioned_rv(name, builder):
with tf.name_scope(name):
assert name not in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(1) if is_root_variable else builder()
assert is_root_variable == (len(get_ancestors(variable, name_to_unconditioned_generative_variable.values())) == 0) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
if is_root_variable:
generative_root_variable_names.add(name)
name_to_unconditioned_generative_variable[name] = variable
return variable.value
with tf.variable_scope('generative'), tf.name_scope('unconditioned'):
unconditioned_generative = generative_builder(make_unconditioned_rv, GenerativeMode.UNCONDITIONED)
def expand_like_discrete(substituted_value):
# This will be applied to all variables that aren't indexed by discrete-state
substituted_value = tf.reshape(substituted_value, [sample_count, -1] + list(map(int, substituted_value.get_shape()[1:]))) # indexed by sample-index, iid-index, *
substituted_value = tf.tile(substituted_value, [1, discrete_states.shape[0]] + [1] * (substituted_value.get_shape().ndims - 2)) # indexed by sample-index, discrete-index * iid-index, *
return tf.reshape(substituted_value, [-1] + list(map(int, substituted_value.get_shape()[2:]))) # indexed by sample-index * discrete-index * iid-index, *
name_to_substituted_value = {} # each value is indexed by sample-index * discrete-index * iid-index, *
# Construct expanded copies of the observations (tiled over sample and discrete indices); these are made available
# to the variational so it can reason over them, and are used as substitutions in the generative
for name in name_to_data_map:
assert name != discrete_name # ** need to think about this case!
# ** should also probably assert that the observed variable is not a variational-descendant of the discrete (or any other variable!)
substituted_value = tf.tile(
name_to_data_map[name],
[sample_count] + [1] * (name_to_data_map[name].get_shape().ndims - 1)
) # indexed by sample-index * iid-index, *
# ** is calling expand_like_discrete not strictly less efficient that just adding the discrete-state-count into the above tile?
name_to_substituted_value[name] = expand_like_discrete(substituted_value) # always expand, as an observed variable cannot be variational-descendant of the discrete
def is_variable_discrete_indexed(variable):
# Substituted values are always discrete-indexed, hence having one of them as an ancestor is a sufficient
# condition for being discrete-indexed. In practice we check the reverse, as the substitution is not an RV
# hence won't be returned as an ancestor. It is also a necessary condition, as there is no other route through
# which discrete-indexing can be added
return any(
len(get_descendants(substituted_value, [variable])) > 0
for substituted_value in name_to_substituted_value.values()
)
# Build the variational, substituting samples and expanding all variables to be indexed by sample and discrete indices
name_to_conditioned_variational_variable = {}
def make_variational_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
assert name not in name_to_data_map
assert name not in name_to_conditioned_variational_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(sample_count) if is_root_variable else builder()
assert is_root_variable == (
len(get_ancestors(variable, name_to_conditioned_variational_variable.values())) == 0 # it's a root variable if it doesn't have any variational RV as an ancestor...
and
all(
len(get_descendants(name_to_substituted_value[observation_name], [variable])) == 0 # ...and no observation has it as a descendant -- i.e. it doesn't have any observation as an ancestor either
for observation_name in name_to_data_map
)
) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
substituted_value = variable.value # indexed by sample-index * [discrete-index *] iid-index, *
if discrete_name is not None: # if there's a discrete to be integrated, then *all* substituted values must be discrete-indexed
if name == discrete_name:
assert map(int, substituted_value.get_shape()[1:]) == list(discrete_states.shape[1:]) # check the discrete values have the same shape as samples from the distribution
substituted_value = tf.tile(
discrete_states[np.newaxis, :, np.newaxis, ...],
[sample_count, 1, int(substituted_value.get_shape()[0]) / sample_count / (discrete_states.shape[0] if is_variable_discrete_indexed(variable) else 1)] + [1] * (len(discrete_states.shape) - 1)
) # indexed by sample-index, discrete-index, iid-index, *
substituted_value = tf.reshape(substituted_value, [-1] + list(discrete_states.shape[1:])) # indexed by sample-index * discrete-index * iid-index, *
else:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_conditioned_variational_variable[name] = variable # this is used to evaluate the variational density of the variational sample; for both this and next, uses ancestral substitutions in case of non-MF variational
name_to_substituted_value[name] = substituted_value # this is substituted into the generative
return substituted_value
with tf.variable_scope('variational'), tf.name_scope('conditioned'):
variational_builder(make_variational_rv, lambda observation_name: name_to_substituted_value[observation_name])
if discrete_name is not None:
assert discrete_name in name_to_conditioned_variational_variable
assert discrete_name in name_to_substituted_value
# Build the 'conditioned generative', with values substituted from the variational and observations
name_to_conditioned_generative_variable = {}
def make_conditioned_rv(name, builder):
with tf.name_scope(name):
is_root_variable = name in generative_root_variable_names # i.e. whether this is an RV with no ancestor-RVs, meaning that it should be replicated according to sample_count (otherwise, replication of some ancestor should 'bubble down' to us)
variable = builder(sample_count) if is_root_variable else builder()
name_to_conditioned_generative_variable[name] = variable # used to evaluate the generative density of the variational sample (and the observed data), with ancestral substitutions
if name not in name_to_substituted_value:
# Marginalise by sampling from the generative (with ancestral conditioning), as there's no corresponding variational or observation
# ** could condition the warning on whether it actually has descendants!
print('warning: {} has neither variational distribution nor observed value, hence will be marginalised by sampling'.format(name))
substituted_value = variable.value
if discrete_name is not None:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_substituted_value[name] = substituted_value
return name_to_substituted_value[name]
with tf.variable_scope('generative', reuse=True), tf.name_scope('conditioned'):
conditioned_generative = generative_builder(make_conditioned_rv, GenerativeMode.CONDITIONED)
if discrete_name is not None:
assert discrete_name in name_to_conditioned_generative_variable
def get_mode_or_mean(variable):
try:
return variable.distribution.mode()
except NotImplementedError:
print('warning: using mean instead of mode for {} in reconstruction'.format(variable.distribution.name))
return variable.distribution.mean() # fall back to mean, e.g. for uniform random variables
# Build a second copy of the variational, with the (variational) mode of each variable substituted, in order to do
# a full 'ancestrally modal' reconstruction in the non-MF case
name_to_variational_mode = {}
def make_variational_reconstruction_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ** cache from first variational model creation above?
variable = builder(1) if is_root_variable else builder()
name_to_variational_mode[name] = get_mode_or_mean(variable)
return name_to_variational_mode[name]
with tf.variable_scope('variational', reuse=True), tf.name_scope('modes'):
variational_builder(make_variational_reconstruction_rv, lambda observation_name: name_to_data_map[observation_name])
# This third copy of the generative is not used by inference, but is returned to the caller to use for reconstructions
# It does not perform any sample/discrete expansion, but substitutes variational modes for ancestral latents
def make_reconstruction_rv(name, builder):
with tf.name_scope(name):
if name in name_to_variational_mode:
return name_to_variational_mode[name]
else:
# ** non-use of name_to_data_map here may not be desirable if the variable is not a leaf
variable = builder(1) if name in generative_root_variable_names else builder()
return get_mode_or_mean(variable)
with tf.variable_scope('generative', reuse=True), tf.name_scope('reconstruction'):
reconstruction_modes = generative_builder(make_reconstruction_rv, GenerativeMode.RECONSTRUCTION)
with tf.name_scope('integrated_klqp'):
def lifted_log_prob(variable, value, name): # ** would be nice if we could rely on variable.name == name!
# variable is a random variable, indexed by sample-index * [discrete-index *] iid-index, *
# value is a tensor, indexed by sample-index * discrete-index * iid-index, *
# This function evaluates variable.log_prob on slices of value taken over discrete-index, summing away non-iid dimensions
discrete_state_count = discrete_states.shape[0]
if discrete_name is None:
log_prob = variable.distribution.log_prob(value)
return tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims)))[np.newaxis, ...]
elif is_variable_discrete_indexed(variable):
log_prob = variable.distribution.log_prob(value) # indexed by sample-index * discrete-index * iid-index, *
log_prob = tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims))) # indexed by sample-index * discrete-index * iid-index
log_prob = tf.reshape(log_prob, [sample_count, discrete_state_count, -1]) # indexed by sample-index, discrete-index, iid-index
return tf.reshape(tf.transpose(log_prob, [1, 0, 2]), [discrete_state_count, -1]) # indexed by discrete-index, sample-index * iid-index
else:
value = tf.reshape(value, [sample_count, discrete_state_count, -1] + list(map(int, value.get_shape()[1:]))) # indexed by sample-index, discrete-index, iid-index, *
value = tf.transpose(value, [1, 0, 2] + list(range(3, value.get_shape().ndims))) # indexed by discrete-index, sample-index, iid-index, *
value = tf.reshape(value, [discrete_state_count, -1] + list(map(int, value.get_shape()[3:]))) # indexed by discrete-index, sample-index * iid-index, *
log_prob = tf.stack([
variable.distribution.log_prob(value[state_index])
for state_index in range(discrete_state_count)
]) # indexed by discrete-index, sample-index * iid-index, *
return tf.reduce_sum(log_prob, axis=range(2, log_prob.get_shape().ndims)) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
discrete_qz_probs = tf.exp(lifted_log_prob(
name_to_conditioned_variational_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the probability under the variational, of each discrete state
def E_log_prob_wrt_discrete(variable, value, name): # ** again, would be nice if could rely on variable.name == name!
# log_prob is indexed by sample-index * [discrete-index *] iid-index, *
# result is scalar, being a mean over samples, and minibatch-elements, an expectation over discrete-states, and a sum over remaining dimensions
maybe_weighted_log_prob = lifted_log_prob(variable, value, name) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
maybe_weighted_log_prob *= discrete_qz_probs
return tf.reduce_mean(maybe_weighted_log_prob) # that we do a mean over iid-index means we treat the minibatch-indexing as independent sampling, not a joint rv
log_Px = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
for name in name_to_data_map
)
log_Pz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
# for name in name_to_conditioned_generative_variable
for name in name_to_conditioned_variational_variable # variational not generative so we only include things with variational (not prior) substitutions
if name != discrete_name # ...as we use L1 divergence for this instead
if name not in name_to_data_map # ...as it's in P(x) instead
)
log_Qz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
for name in name_to_conditioned_variational_variable
if name != discrete_name # ...as we use L1 divergence for this instead
)
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
if name not in name_to_data_map:
tf.summary.scalar('P(z_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_data_map:
tf.summary.scalar('P(x_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
value = E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
tf.summary.scalar('Q(z_' + name + ')', value)
if discrete_name is not None:
discrete_z_probs = tf.exp(lifted_log_prob(
name_to_unconditioned_generative_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the prior (unconditioned gen.) probability of the discrete states; it will be constant over sample-index and iid-index iff the discrete has no gen. ancestors
discrete_z_probs = tf.reduce_mean(discrete_z_probs, axis=1) # indexed by discrete-index
discrete_qz_probs = tf.reduce_mean(discrete_qz_probs, axis=1) # ditto; the mean here is calculating the aggregated posterior over the batch and samples
discrete_divergence_loss = tf.reduce_mean(tf.abs(discrete_z_probs - discrete_qz_probs)) * alpha # L1 loss
else:
discrete_divergence_loss = 0.
tf.losses.add_loss(0.) # this is needed because get_total_loss throws instead of returning zero if no losses have been registered
additional_losses = tf.losses.get_total_loss()
loss = -(log_Px + (log_Pz - log_Qz) * beta) + discrete_divergence_loss + additional_losses
tf.summary.scalar('inference/loss', loss)
tf.summary.scalar('inference/log_Px', log_Px)
tf.summary.scalar('inference/log_Pz', log_Pz)
tf.summary.scalar('inference/log_Qz', log_Qz)
tf.summary.scalar('inference/Ldd', discrete_divergence_loss)
tf.summary.scalar('inference/L*', additional_losses)
var_list = tf.trainable_variables()
grads = tf.gradients(loss, [v._ref() for v in var_list])
abs_grads = tf.abs(tf.concat([tf.reshape(grad, [-1]) for grad in grads if grad is not None], axis=0))
loss = tf.Print(loss, [log_Px, log_Pz * beta, log_Qz * beta, discrete_divergence_loss, additional_losses, tf.reduce_mean(abs_grads), tf.reduce_max(abs_grads)], 'p(x), p(z), q(z), Ldd, L*, <|g|>, max |g| = ')
if grad_clip_magnitude is not None:
grads, _ = tf.clip_by_global_norm(grads, grad_clip_magnitude)
return loss, list(zip(grads, var_list)), unconditioned_generative, reconstruction_modes, conditioned_generative
|
[
"tensorflow.abs",
"tensorflow.losses.add_loss",
"tensorflow.summary.scalar",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"numpy.zeros",
"tensorflow.variable_scope",
"tensorflow.reduce_mean",
"edward1_utils.get_descendants",
"tensorflow.transpose",
"tensorflow.reduce_max",
"tensorflow.name_scope",
"tensorflow.clip_by_global_norm",
"tensorflow.losses.get_total_loss"
] |
[((20630, 20671), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/loss"""', 'loss'], {}), "('inference/loss', loss)\n", (20647, 20671), True, 'import tensorflow as tf\n'), ((20676, 20721), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Px"""', 'log_Px'], {}), "('inference/log_Px', log_Px)\n", (20693, 20721), True, 'import tensorflow as tf\n'), ((20726, 20771), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Pz"""', 'log_Pz'], {}), "('inference/log_Pz', log_Pz)\n", (20743, 20771), True, 'import tensorflow as tf\n'), ((20776, 20821), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Qz"""', 'log_Qz'], {}), "('inference/log_Qz', log_Qz)\n", (20793, 20821), True, 'import tensorflow as tf\n'), ((20826, 20886), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/Ldd"""', 'discrete_divergence_loss'], {}), "('inference/Ldd', discrete_divergence_loss)\n", (20843, 20886), True, 'import tensorflow as tf\n'), ((20891, 20943), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/L*"""', 'additional_losses'], {}), "('inference/L*', additional_losses)\n", (20908, 20943), True, 'import tensorflow as tf\n'), ((20960, 20984), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (20982, 20984), True, 'import tensorflow as tf\n'), ((2862, 2875), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (2870, 2875), True, 'import numpy as np\n'), ((3993, 4024), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {}), "('generative')\n", (4010, 4024), True, 'import tensorflow as tf\n'), ((4026, 4056), 'tensorflow.name_scope', 'tf.name_scope', (['"""unconditioned"""'], {}), "('unconditioned')\n", (4039, 4056), True, 'import tensorflow as tf\n'), ((9570, 9602), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""variational"""'], {}), "('variational')\n", (9587, 9602), True, 'import tensorflow as tf\n'), ((9604, 9632), 'tensorflow.name_scope', 'tf.name_scope', (['"""conditioned"""'], {}), "('conditioned')\n", (9617, 9632), True, 'import tensorflow as tf\n'), ((11489, 11532), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {'reuse': '(True)'}), "('generative', reuse=True)\n", (11506, 11532), True, 'import tensorflow as tf\n'), ((11534, 11562), 'tensorflow.name_scope', 'tf.name_scope', (['"""conditioned"""'], {}), "('conditioned')\n", (11547, 11562), True, 'import tensorflow as tf\n'), ((12838, 12882), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""variational"""'], {'reuse': '(True)'}), "('variational', reuse=True)\n", (12855, 12882), True, 'import tensorflow as tf\n'), ((12884, 12906), 'tensorflow.name_scope', 'tf.name_scope', (['"""modes"""'], {}), "('modes')\n", (12897, 12906), True, 'import tensorflow as tf\n'), ((13731, 13774), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {'reuse': '(True)'}), "('generative', reuse=True)\n", (13748, 13774), True, 'import tensorflow as tf\n'), ((13776, 13807), 'tensorflow.name_scope', 'tf.name_scope', (['"""reconstruction"""'], {}), "('reconstruction')\n", (13789, 13807), True, 'import tensorflow as tf\n'), ((13924, 13956), 'tensorflow.name_scope', 'tf.name_scope', (['"""integrated_klqp"""'], {}), "('integrated_klqp')\n", (13937, 13956), True, 'import tensorflow as tf\n'), ((20340, 20363), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['(0.0)'], {}), '(0.0)\n', (20358, 20363), True, 'import tensorflow as tf\n'), ((20499, 20525), 'tensorflow.losses.get_total_loss', 'tf.losses.get_total_loss', ([], {}), '()\n', (20523, 20525), True, 'import tensorflow as tf\n'), ((21425, 21475), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'grad_clip_magnitude'], {}), '(grads, grad_clip_magnitude)\n', (21447, 21475), True, 'import tensorflow as tf\n'), ((3184, 3203), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (3197, 3203), True, 'import tensorflow as tf\n'), ((6768, 6794), 'tensorflow.name_scope', 'tf.name_scope', (["('q_' + name)"], {}), "('q_' + name)\n", (6781, 6794), True, 'import tensorflow as tf\n'), ((10129, 10148), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (10142, 10148), True, 'import tensorflow as tf\n'), ((12419, 12445), 'tensorflow.name_scope', 'tf.name_scope', (["('q_' + name)"], {}), "('q_' + name)\n", (12432, 12445), True, 'import tensorflow as tf\n'), ((13330, 13349), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (13343, 13349), True, 'import tensorflow as tf\n'), ((17315, 17354), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['maybe_weighted_log_prob'], {}), '(maybe_weighted_log_prob)\n', (17329, 17354), True, 'import tensorflow as tf\n'), ((19921, 19961), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discrete_z_probs'], {'axis': '(1)'}), '(discrete_z_probs, axis=1)\n', (19935, 19961), True, 'import tensorflow as tf\n'), ((20023, 20064), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discrete_qz_probs'], {'axis': '(1)'}), '(discrete_qz_probs, axis=1)\n', (20037, 20064), True, 'import tensorflow as tf\n'), ((21263, 21288), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['abs_grads'], {}), '(abs_grads)\n', (21277, 21288), True, 'import tensorflow as tf\n'), ((21290, 21314), 'tensorflow.reduce_max', 'tf.reduce_max', (['abs_grads'], {}), '(abs_grads)\n', (21303, 21314), True, 'import tensorflow as tf\n'), ((19282, 19327), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('Q(z_' + name + ')')", 'value'], {}), "('Q(z_' + name + ')', value)\n", (19299, 19327), True, 'import tensorflow as tf\n'), ((21081, 21103), 'tensorflow.reshape', 'tf.reshape', (['grad', '[-1]'], {}), '(grad, [-1])\n', (21091, 21103), True, 'import tensorflow as tf\n'), ((15040, 15102), 'tensorflow.reshape', 'tf.reshape', (['log_prob', '[sample_count, discrete_state_count, -1]'], {}), '(log_prob, [sample_count, discrete_state_count, -1])\n', (15050, 15102), True, 'import tensorflow as tf\n'), ((20210, 20254), 'tensorflow.abs', 'tf.abs', (['(discrete_z_probs - discrete_qz_probs)'], {}), '(discrete_z_probs - discrete_qz_probs)\n', (20216, 20254), True, 'import tensorflow as tf\n'), ((6403, 6449), 'edward1_utils.get_descendants', 'get_descendants', (['substituted_value', '[variable]'], {}), '(substituted_value, [variable])\n', (6418, 6449), False, 'from edward1_utils import get_ancestors, get_descendants\n'), ((15191, 15224), 'tensorflow.transpose', 'tf.transpose', (['log_prob', '[1, 0, 2]'], {}), '(log_prob, [1, 0, 2])\n', (15203, 15224), True, 'import tensorflow as tf\n'), ((7557, 7629), 'edward1_utils.get_descendants', 'get_descendants', (['name_to_substituted_value[observation_name]', '[variable]'], {}), '(name_to_substituted_value[observation_name], [variable])\n', (7572, 7629), False, 'from edward1_utils import get_ancestors, get_descendants\n')]
|
# coding=utf-8
# @Time : 2020/10/24 12:13
# @Auto : zzf-jeff
import torch
import torch.nn as nn
import math
from ..builder import BACKBONES
from .base import BaseBackbone
import torch.utils.model_zoo as model_zoo
from torchocr.utils.checkpoints import load_checkpoint
__all__ = [
"DetResNet"
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(BasicBlock, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.with_modulated_dcn = False
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
else:
from torchvision.ops import DeformConv2d
deformable_groups = dcn.get('deformable_groups', 1)
offset_channels = 18
self.conv2_offset = nn.Conv2d(planes, deformable_groups * offset_channels, kernel_size=3, padding=1)
self.conv2 = DeformConv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(Bottleneck, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.with_modulated_dcn = False
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
else:
deformable_groups = dcn.get('deformable_groups', 1)
from torchvision.ops import DeformConv2d
offset_channels = 18
self.conv2_offset = nn.Conv2d(planes, deformable_groups * offset_channels, stride=stride, kernel_size=3,
padding=1)
self.conv2 = DeformConv2d(planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dcn = dcn
self.with_dcn = dcn is not None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
@BACKBONES.register_module()
class DetResNet(BaseBackbone):
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, depth, in_channels, num_classes=1000):
super(DetResNet, self).__init__()
self.inplanes = 64
self.out_channels = []
self.block = self.arch_settings[depth][0]
self.num_block = self.arch_settings[depth][1]
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block=self.block, planes=64, blocks=self.num_block[0], stride=1)
self.layer2 = self._make_layer(block=self.block, planes=128, blocks=self.num_block[1], stride=2)
self.layer3 = self._make_layer(block=self.block, planes=256, blocks=self.num_block[2], stride=2)
self.layer4 = self._make_layer(block=self.block, planes=512, blocks=self.num_block[3], stride=2)
def _make_layer(self, block, planes, blocks, stride=1, dcn=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dcn=dcn))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dcn=dcn))
self.out_channels.append(planes * block.expansion)
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(pretrained, str):
load_checkpoint(self,pretrained)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool1(x)
c2 = self.layer1(x)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
return (c2, c3, c4, c5)
|
[
"torch.nn.ReLU",
"math.sqrt",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torchocr.utils.checkpoints.load_checkpoint",
"torchvision.ops.DeformConv2d",
"torch.nn.MaxPool2d"
] |
[((836, 925), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (845, 925), True, 'import torch.nn as nn\n'), ((1240, 1262), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1254, 1262), True, 'import torch.nn as nn\n'), ((1284, 1305), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1291, 1305), True, 'import torch.nn as nn\n'), ((1865, 1887), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1879, 1887), True, 'import torch.nn as nn\n'), ((2737, 2791), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, bias=False)\n', (2746, 2791), True, 'import torch.nn as nn\n'), ((2812, 2834), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2826, 2834), True, 'import torch.nn as nn\n'), ((3480, 3502), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3494, 3502), True, 'import torch.nn as nn\n'), ((3525, 3581), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * 4)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, planes * 4, kernel_size=1, bias=False)\n', (3534, 3581), True, 'import torch.nn as nn\n'), ((3602, 3628), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * 4)'], {}), '(planes * 4)\n', (3616, 3628), True, 'import torch.nn as nn\n'), ((3650, 3671), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3657, 3671), True, 'import torch.nn as nn\n'), ((5038, 5112), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (5047, 5112), True, 'import torch.nn as nn\n'), ((5165, 5183), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (5179, 5183), True, 'import torch.nn as nn\n'), ((5205, 5226), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5212, 5226), True, 'import torch.nn as nn\n'), ((5251, 5299), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (5263, 5299), True, 'import torch.nn as nn\n'), ((6483, 6505), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6496, 6505), True, 'import torch.nn as nn\n'), ((1404, 1467), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, padding=1, bias=False)\n', (1413, 1467), True, 'import torch.nn as nn\n'), ((1669, 1754), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(deformable_groups * offset_channels)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(planes, deformable_groups * offset_channels, kernel_size=3, padding=1\n )\n', (1678, 1754), True, 'import torch.nn as nn\n'), ((1776, 1842), 'torchvision.ops.DeformConv2d', 'DeformConv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, padding=1, bias=False)\n', (1788, 1842), False, 'from torchvision.ops import DeformConv2d\n'), ((2933, 3011), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n', (2942, 3011), True, 'import torch.nn as nn\n'), ((3213, 3312), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(deformable_groups * offset_channels)'], {'stride': 'stride', 'kernel_size': '(3)', 'padding': '(1)'}), '(planes, deformable_groups * offset_channels, stride=stride,\n kernel_size=3, padding=1)\n', (3222, 3312), True, 'import torch.nn as nn\n'), ((3378, 3464), 'torchvision.ops.DeformConv2d', 'DeformConv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(planes, planes, kernel_size=3, padding=1, stride=stride, bias=\n False)\n', (3390, 3464), False, 'from torchvision.ops import DeformConv2d\n'), ((5953, 6050), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=\n stride, bias=False)\n', (5962, 6050), True, 'import torch.nn as nn\n'), ((6091, 6131), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (6105, 6131), True, 'import torch.nn as nn\n'), ((7007, 7040), 'torchocr.utils.checkpoints.load_checkpoint', 'load_checkpoint', (['self', 'pretrained'], {}), '(self, pretrained)\n', (7022, 7040), False, 'from torchocr.utils.checkpoints import load_checkpoint\n'), ((6794, 6812), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (6803, 6812), False, 'import math\n')]
|
from poyonga import Groonga
import gevent
from gevent import monkey
monkey.patch_all()
def fetch(cmd, **kwargs):
g = Groonga()
ret = g.call(cmd, **kwargs)
print(ret.status)
print(ret.body)
print("*" * 40)
return ret.body
cmds = [
("status", {}),
("log_level", {"level": "warning"}),
# ("table_create", {"name": "Site", "flags": "TABLE_HASH_KEY"}),
("select", {"table": "Site"}),
]
jobs = [gevent.spawn(fetch, cmd, **kwargs) for cmd, kwargs in cmds]
gevent.joinall(jobs)
results = [job.value for job in jobs]
print(results)
|
[
"gevent.spawn",
"poyonga.Groonga",
"gevent.monkey.patch_all",
"gevent.joinall"
] |
[((69, 87), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (85, 87), False, 'from gevent import monkey\n'), ((494, 514), 'gevent.joinall', 'gevent.joinall', (['jobs'], {}), '(jobs)\n', (508, 514), False, 'import gevent\n'), ((124, 133), 'poyonga.Groonga', 'Groonga', ([], {}), '()\n', (131, 133), False, 'from poyonga import Groonga\n'), ((434, 468), 'gevent.spawn', 'gevent.spawn', (['fetch', 'cmd'], {}), '(fetch, cmd, **kwargs)\n', (446, 468), False, 'import gevent\n')]
|
"""This module offers GUI tools for manipulating table-like step functions
of "elementary" cellular automatons.
Ideas for further utilities:
* Display conflicting rules for horizontal or vertical symmetry, rotational
symmetry, ...
* An editing mode, that handles simple binary logic, like::
c == 1 then result = 1
c == 0 then result = 0
l == 0 and r == 1 then result = 0
* A graphical editing mode that allows adding "pattern matching" for rules with
"dontcare fields" or something of that sort.
* A graphical editing mode with zooming UI.
* ...
"""
from __future__ import absolute_import
import numpy as np
from ..elementarytools import *
from ..cagen import elementary_digits_and_values, rule_nr_to_rule_arr
from ..external.qt import *
from ..display.qt import PALETTE_QC
from itertools import product
import random
GAP = object()
"""The value passed to create_subwidget when a position is not held by a
field."""
CELL_COL = PALETTE_QC
"""What colors to use for what field values."""
CELL_COL[GAP] = QColor("gray")
class CellDisplayWidget(QLabel):
"""A little Widget that displays a cell in a neighbourhood."""
def __init__(self, value, position=None, size=16, palette=None, **kwargs):
"""Create the DisplayWidget.
:param value: The cell value to show.
:param position: Alternatively, the position of the cell in the result
list, to be used for communication to the outside.
:param size: The size of the cell, used for both width and height.
:param palette: The palette of colors to use.
"""
super(CellDisplayWidget, self).__init__(**kwargs)
self.setFixedSize(size, size)
self.setPixmap(self.__pixmap_for_value(value))
self.position = position
self._palette = palette or CELL_COL
def __pixmap_for_value(self, value):
"""Create a pixmap for the value of the cell."""
pixmap = QPixmap(QSize(self.width(), self.height()))
pixmap.fill(CELL_COL[value])
return pixmap
class EditableCellDisplayWidget(QPushButton):
"""A clickable and keyboard-operatable display widget for cells."""
value_changed = Signal([int, int])
"""This signal will be emitted when the user changed the value of the
cell. It will emit the position and the new value."""
def __init__(self, value, position, base=2, size=16, palette=None, **kwargs):
"""Create the editable display widget.
:param value: The start value.
:param position: The position in the result list, used in the
:attr:`value_changed` signal.
:param base: The numerical base for values.
:param size: The size for the display, used for both width and height.
:param palette: The palette of qcolors to use.
"""
super(EditableCellDisplayWidget, self).__init__(**kwargs)
self.value = value
self.base = base
self._palette = palette
self.setFixedSize(size, size)
self.setFlat(True)
self.setAutoFillBackground(True)
self.bg_color = self._palette[self.value]
self.position = position
self.clicked.connect(self._change_value)
def _change_value(self):
"""Called by the clicked signal of the underlying QPushButton."""
self.value = (self.value + 1) % self.base
self.bg_color = self._palette[self.value]
self.update()
self.value_changed.emit(self.position, self.value)
def set_value(self, value, emit=False):
self.value = value
self.bg_color = self._palette[self.value]
self.update()
if emit:
self.value_changed.emit(self.position, self.value)
def paintEvent(self, event):
"""Redraw the button, add a rectangle inside the button if it has the
focus."""
paint = QPainter(self)
paint.fillRect(event.rect(), self.bg_color)
if self.hasFocus():
paint.setPen(QColor("red") if self.bg_color != QColor("red")
else QColor("black"))
paint.drawRect(QRect(1, 1, self.width() - 3, self.height() - 3))
def set_position(self, position):
self.position = position
# TODO implement CellDisplayWidgets for images.
class BaseNeighbourhoodDisplay(QWidget):
"""The BaseNeighbourhoodDisplay offers a skeleton for different ways of
displaying neighbourhoods.
Subclass this and implement create_subwidget, which will be fed an offset
and the corresponding entry from the values dictionary, or :data:`GAP` if
there is no spot in the neighbourhood at that position, and will then be
put into a QGridLayout.
This class itself displays colored blocks in the shape of the
neighbourhood."""
def __init__(self, neighbourhood, values=None, base=2, palette=None, **kwargs):
super(BaseNeighbourhoodDisplay, self).__init__(**kwargs)
self.neighbourhood = neighbourhood
self.offsets = neighbourhood.offsets
self.names = neighbourhood.names
self.bbox = self.neighbourhood.bounding_box()
self.base = base
self.palette = palette or CELL_COL
dims = len(self.bbox)
assert dims in (1, 2), "Only 1d or 2d neighbourhoods are supported"
if dims == 1:
# for making the code easier, we will only handle 2d neighbourhoods
# by trivially turning a 1d neighbourhood into a 2d neighbourhood.
self.offsets = tuple((x[0], 0) for x in self.offsets)
self.bbox = self.bbox[0], (0, 0)
if values is None:
values = dict((offs, 0) for offs in self.offsets)
if values.keys()[0] not in self.offsets:
values = dict((self.offsets[self.names.index(name)],
value) for name, value in values.iteritems())
self.values = values.copy()
self.subwidgets = {}
self.layout = QGridLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
(grid_w, grid_h), (offs_x, offs_y) = self.determine_size()
widths = [[] for _ in range(grid_h)]
heights = [[] for _ in range(grid_w)]
positions = product(range(grid_w),
range(grid_h))
for (col, row) in positions:
offset = (col + offs_x, row + offs_y)
subwidget = self.create_subwidget(offset, self.values.get(offset, GAP))
subwidget.setObjectName("cell_%d_%d" % offset)
self.subwidgets[offset] = subwidget
if subwidget is not None:
self.layout.addWidget(subwidget, row, col)
w, h = subwidget.width(), subwidget.height()
else:
w, h = 0, 0
widths[row].append(w)
heights[col].append(h)
width = max([sum(part) for part in widths])
height = max([sum(part) for part in heights])
self.setFixedSize(width, height)
def determine_size(self):
"""Determine how many fields to allocate in the grid.
Subclass this, if you want more gaps around the edges.
Return a tuple of width, height and a tuple of x-offset and y-offset."""
return ((self.bbox[0][1] - self.bbox[0][0] + 1,
self.bbox[1][1] - self.bbox[1][0] + 1),
(self.bbox[0][0], self.bbox[1][0]))
def create_subwidget(self, offset, value):
"""Create a widget for a cell in the neighbourhood.
:param offset: A tuple of (x, y) for the position of the cell
:param value: The value of the cell, as per the values dictionary, or
if the widget is to be created for an empty space,
:data:`GAP`.
:returns: a QWidget initialised for the cell. Alternatively, None."""
return CellDisplayWidget(value, self.palette)
def update_value(self, widget, offset, new_value):
"""Manipulate the given widget for the new value.
:returns: None, if the widget was manipulated, alternatively a new
QWidget to take its place."""
widget.setPixmap(self.__pixmap_for_value(new_value))
class NextToResult(QWidget):
"""A simple utility class to display a neighbourhood widget and a result
widget next to each other in different relations."""
def __init__(self, neighbourhood_widget, result_widget, direction="l", **kwargs):
"""Create the widget.
:param neighbourhood_widget: The neighbourhood widget to put in.
:param result_widget: The result widget to put in.
:param direction: The direction the neighbourhood widget to put at.
Valid directions are l, u, d and r for left, up, down and right
respectively."""
super(NextToResult, self).__init__(**kwargs)
assert direction in "udlr"
self.result_widget = result_widget
self.neighbourhood_widget = neighbourhood_widget
self.result_widget.setObjectName("result")
self.neighbourhood_widget.setObjectName("neighbourhood")
if direction in "lr":
layout = QHBoxLayout()
spacing = self.result_widget.width()
else:
layout = QVBoxLayout()
spacing = self.result_widget.height()
if direction in "lu":
layout.addWidget(self.result_widget)
layout.addSpacing(spacing)
layout.addWidget(self.neighbourhood_widget)
if direction in "rd":
layout.addSpacing(spacing)
layout.addWidget(self.result_widget)
self.setLayout(layout)
class ElementaryRuleWindow(QWidget):
"""A window usable to modify the table of an elementary step function."""
def __init__(self, neighbourhood, rule=None, base=2, palette_info=None, **kwargs):
""":param neighbourhood: The `Neighbourhood` instance to get the
data from.
:param rule: The rule to set at the beginning.
:param base: The numerical base for the cells.
:param palette: The palette_info for the simulator.
"""
super(ElementaryRuleWindow, self).__init__(**kwargs)
self.neighbourhood = neighbourhood
self.base = base
self.entries = len(self.neighbourhood.offsets)
if rule is None:
rule = random.randrange(0, base ** (base ** self.entries))
self.rule_nr = rule
self.rule = np.array(rule_nr_to_rule_arr(self.rule_nr, self.entries, self.base))
self.n_r_widgets = []
self.display_widget = QWidget(self)
self.display_widget.setObjectName("display_widget")
self.display_layout = QGridLayout(self.display_widget)
self.display_layout.setSizeConstraint(QLayout.SetFixedSize)
digits_and_values = elementary_digits_and_values(self.neighbourhood,
self.base, self.rule)
for pos, data in enumerate(digits_and_values):
data = data.copy()
result = data["result_value"]
del data["result_value"]
n_w = BaseNeighbourhoodDisplay(neighbourhood, data, parent=self)
r_w = EditableCellDisplayWidget(result, pos, base=base, parent=self, palette=self.palette)
n_r_w = NextToResult(n_w, r_w, parent=self, direction="r")
n_r_w.setObjectName("block_%d" % pos)
r_w.value_changed.connect(self._result_changed)
self.n_r_widgets.append(n_r_w)
self.digits_and_values = digits_and_values
self._rewrap_grid()
self.display_widget.setLayout(self.display_layout)
self.scroll_area = QScrollArea(self)
self.scroll_area.setWidget(self.display_widget)
self.scroll_area.setObjectName("scroll_area")
layout = QVBoxLayout(self)
self.rule_nr_display = QLabel("Editing rule %s" % hex(self.rule_nr), self)
self.rule_nr_display.setObjectName("rule_nr_display")
# make text selectable and links (if any) clickable
self.rule_nr_display.setTextInteractionFlags(Qt.TextBrowserInteraction)
layout.addWidget(self.rule_nr_display)
layout.addWidget(self.scroll_area)
action_buttons = QHBoxLayout(self)
minimize_button = QPushButton("Minimize rule number", self)
minimize_button.clicked.connect(self.minimize_rule_number)
minimize_button.setObjectName("minimize")
action_buttons.addWidget(minimize_button)
action_buttons.addSpacing(11)
for name, action in neighbourhood_actions.iteritems():
act_btn = QPushButton(name, self)
def do_action(act=action):
self.do_neighbourhood_action(act)
act_btn.clicked.connect(do_action)
act_btn.setObjectName("action_%s" % name)
action_buttons.addWidget(act_btn)
layout.addLayout(action_buttons)
self.setLayout(layout)
def _result_changed(self, position, value):
"""React to a change in the results."""
self.digits_and_values[position]["result_value"] = value
self.recalculate_rule_number()
def recalculate_rule_number(self):
"""Recalculate what number corresponds to the result values saved in
:attr:`digits_and_values`.
:returns: the new rule number."""
num = 0
for digit, values in enumerate(self.digits_and_values):
num += values["result_value"] * (self.base ** digit)
self.rule_nr = num
self.rule_nr_display.setText("Editing rule %s" % (hex(self.rule_nr)))
return self.rule_nr
def minimize_rule_number(self):
best_num, (best_route, result), _ = minimize_rule_values(self.neighbourhood, self.digits_and_values)
if best_num == self.rule_nr:
QMessageBox.information(self, "No optimization found",
"""This rule set is already the lowest I can make out of it.""")
else:
okay = QMessageBox.question(self, "Apply optimization?",
"""With these actions, the rule number %d can be reached:
%s""" % (best_num, ", ".join(best_route)),
buttons=QMessageBox.Ok | QMessageBox.Cancel,
defaultButton=QMessageBox.Ok)
if okay == QMessageBox.Ok:
for num, data in enumerate(result):
self.n_r_widgets[num].result_widget.set_value(data)
self.digits_and_values = elementary_digits_and_values(self.neighbourhood, self.base, result)
self.recalculate_rule_number()
def do_neighbourhood_action(self, action):
result = action(self.neighbourhood, self.digits_and_values, base=self.base)
for num, data in enumerate(result):
val = data["result_value"]
self.n_r_widgets[num].result_widget.set_value(val)
self.digits_and_values = result
self.recalculate_rule_number()
def _rewrap_grid(self, old_width=None):
"""Put all the widgets into a grid, so that they fill just enough of
the width, so that there is no horizontal scroll bar."""
count = len(self.n_r_widgets)
# all items should have the same size actually
width_per_bit = self.n_r_widgets[0].sizeHint().width() + \
self.display_layout.spacing()
spacing = self.display_layout.horizontalSpacing()
if spacing == -1:
spacing = 11
available_width = self.contentsRect().width()
columns = available_width / (width_per_bit) - 1
if old_width is not None:
old_columns = old_width / (width_per_bit) - 1
if old_columns == columns:
return
if columns <= 0:
columns = 1
items_per_column = int(count / columns) + 1
for widget in self.n_r_widgets:
self.display_layout.removeWidget(widget)
for num, widget in enumerate(self.n_r_widgets):
col = num / items_per_column
row = num % items_per_column
self.display_layout.addWidget(widget, row, col)
height_per_bit = self.n_r_widgets[0].sizeHint().height()
v_spacing = self.display_layout.verticalSpacing()
if v_spacing == -1:
v_spacing = 11
height = (height_per_bit + v_spacing) * items_per_column
self.display_widget.setFixedSize(available_width, height)
def resizeEvent(self, event):
"""React to a size change of the widget."""
self._rewrap_grid(old_width = event.oldSize().width())
def main():
from ..cagen import VonNeumannNeighbourhood
import sys
vn = VonNeumannNeighbourhood()
dvw = ElementaryRuleWindow(vn, base=3)
dvw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
[
"random.randrange"
] |
[((10386, 10435), 'random.randrange', 'random.randrange', (['(0)', '(base ** base ** self.entries)'], {}), '(0, base ** base ** self.entries)\n', (10402, 10435), False, 'import random\n')]
|
import tasks
from time import sleep
print("add 3+5")
ret = tasks.add.delay(3,5)
print("Task ID:")
print(ret)
sleep(10)
print(ret.status)
|
[
"tasks.add.delay",
"time.sleep"
] |
[((60, 81), 'tasks.add.delay', 'tasks.add.delay', (['(3)', '(5)'], {}), '(3, 5)\n', (75, 81), False, 'import tasks\n'), ((110, 119), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (115, 119), False, 'from time import sleep\n')]
|
# -*- coding: future_fstrings -*-
from __future__ import print_function
import argparse
import binascii
import struct
import sys
import logging
from libptmalloc.frontend import printutils as pu
from libptmalloc.ptmalloc import ptmalloc as pt
from libptmalloc.frontend import helpers as h
from libptmalloc.frontend.commands.gdb import ptcmd
log = logging.getLogger("libptmalloc")
log.trace("ptparam.py")
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
raise Exception("sys.exit()")
class ptparam(ptcmd.ptcmd):
"""Command to print information about malloc parameters represented by the malloc_par structure
"""
def __init__(self, ptm):
log.debug("ptparam.__init__()")
super(ptparam, self).__init__(ptm, "ptparam")
self.parser = argparse.ArgumentParser(
description="""Print malloc parameter(s) information
Analyze the malloc_par structure's fields.""",
add_help=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog='NOTE: Last defined mp_ will be cached for future use')
# self.parser.add_argument(
# "-v", "--verbose", dest="verbose", action="count", default=0,
# help="Use verbose output (multiple for more verbosity)"
# )
self.parser.add_argument(
"-h", "--help", dest="help", action="store_true", default=False,
help="Show this help"
)
self.parser.add_argument(
"-l", dest="list", action="store_true", default=False,
help="List malloc parameter(s)' address only"
)
self.parser.add_argument(
"--use-cache", dest="use_cache", action="store_true", default=False,
help="Do not fetch parameters data if you know they haven't changed since last time they were cached"
)
self.parser.add_argument(
"address", default=None, nargs="?", type=h.string_to_int,
help="A malloc_par struct address. Optional with cached malloc parameters"
)
# allows to enable a different log level during development/debugging
self.parser.add_argument(
"--loglevel", dest="loglevel", default=None,
help=argparse.SUPPRESS
)
@h.catch_exceptions
@ptcmd.ptcmd.init_and_cleanup
def invoke(self, arg, from_tty):
"""Inherited from gdb.Command
See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html
"""
log.debug("ptparam.invoke()")
self.cache.update_param(self.args.address, show_status=True, use_cache=self.args.use_cache)
if self.args.list:
self.list_parameters()
return
print(self.cache.par)
def list_parameters(self):
"""List malloc parameter(s)' address only"""
par = self.cache.par
print("Parameter(s) found:", end="\n")
print(" parameter @ ", end="")
pu.print_header("{:#x}".format(int(par.address)), end="\n")
|
[
"argparse.ArgumentParser",
"logging.getLogger"
] |
[((349, 381), 'logging.getLogger', 'logging.getLogger', (['"""libptmalloc"""'], {}), "('libptmalloc')\n", (366, 381), False, 'import logging\n'), ((818, 1083), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Print malloc parameter(s) information\n\nAnalyze the malloc_par structure\'s fields."""', 'add_help': '(False)', 'formatter_class': 'argparse.RawTextHelpFormatter', 'epilog': '"""NOTE: Last defined mp_ will be cached for future use"""'}), '(description=\n """Print malloc parameter(s) information\n\nAnalyze the malloc_par structure\'s fields."""\n , add_help=False, formatter_class=argparse.RawTextHelpFormatter, epilog\n =\'NOTE: Last defined mp_ will be cached for future use\')\n', (841, 1083), False, 'import argparse\n')]
|
import sympy as sp
def get_4th_order_rungekutta(dydx, x0, y0, n:int, h, x = sp.Symbol('x'), y = sp.Symbol('y')):
"""
Method to get the values of x, y and dy/dx using fourth-order Runge-Kutta method in a form of a 2d list
Parameters:
dydx: Equation to get the derivative
x0: initial value of x
y0: initial value of y
n: number of iterations
h: value of increment for x
x: x Symbol object
y: y Symbol object
"""
sets = []
sets.append([x0, y0, dydx.evalf(subs={x: x0, y: y0})])
for i in range(1, n+1):
xOld = sets[i-1][0]
yOld = sets[i-1][1]
k1 = dydx.evalf(subs={x:xOld, y:yOld})
k2 = dydx.evalf(subs={x:xOld + h/2, y:yOld + h*k1/2})
k3 = dydx.evalf(subs={x:xOld + h/2, y:yOld + h*k2/2})
k4 = dydx.evalf(subs={x:xOld + h, y:yOld + k3 * h})
yNew = yOld + (1.0/6.0) * (k1 + 2*k2 + 2*k3 + k4) * h
xNew = xOld + h
sets.append([xNew, yNew, dydx.evalf(subs={x:xNew, y:yNew})])
return sets
def example():
#Usage example
sets = get_4th_order_rungekutta(sp.Symbol('y'), 0, 1, 3, 1)
for s in sets:
print(s)
|
[
"sympy.Symbol"
] |
[((81, 95), 'sympy.Symbol', 'sp.Symbol', (['"""x"""'], {}), "('x')\n", (90, 95), True, 'import sympy as sp\n'), ((101, 115), 'sympy.Symbol', 'sp.Symbol', (['"""y"""'], {}), "('y')\n", (110, 115), True, 'import sympy as sp\n'), ((1118, 1132), 'sympy.Symbol', 'sp.Symbol', (['"""y"""'], {}), "('y')\n", (1127, 1132), True, 'import sympy as sp\n')]
|
import os
import sys
import json
class NoEnvironmentFile(Exception):
pass
class KeyNotFound(Exception):
pass
DEFAULT = object()
class LocalEnv:
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
def __init__(self):
self.files = []
self.data = {}
self.first_load = False
def load(self, file=None):
"""
If no file is defined, the .env file will be searched
in invoker module's directory
"""
if file is None:
file = self._invoker()
self.files.append({'file': file, 'exists': '', 'loaded': False})
# search all files given and load them
for file_dict in self.files:
file_dict['exists'] = os.path.isfile(file_dict['file'])
if file_dict['exists'] and not file_dict['loaded']:
with open(file_dict['file']) as f:
for line in f:
line = line.strip()
if not line or line.startswith('#') or '=' not in line:
continue
key, value = line.split('=', 1)
key = key.replace('export', '')
key = key.strip()
value = value.strip().strip('\'"')
self.data[key] = value
file_dict['loaded'] = True
def _cast(self, cast, data):
if cast is bool and str(data).lower() not in self._BOOLEANS:
raise ValueError(f'value can not be parsed as boolean')
elif cast is bool:
return self._BOOLEANS[str(data).lower()]
else:
return cast(data)
def get(self, key, default=DEFAULT, cast=None):
if not self.first_load:
self.load()
self.first_load = True
try:
ret_val = self.data[key] if cast is None else self._cast(cast, self.data[key])
except KeyError:
if default != DEFAULT:
ret_val = default if cast is None else self._cast(cast, default)
else:
raise KeyNotFound(f'value not found in files: \n{json.dumps(self.files, indent=4)}')
return ret_val
def _invoker(self):
# tip from:
# https://github.com/henriquebastos/python-decouple/blob/master/decouple.py
# MAGIC! Get the caller's module path.
frame = sys._getframe()
path = os.path.dirname(frame.f_back.f_back.f_back.f_code.co_filename)
file = os.path.join(path, '.env')
return file
localenv = LocalEnv()
|
[
"os.path.dirname",
"sys._getframe",
"json.dumps",
"os.path.isfile",
"os.path.join"
] |
[((2494, 2509), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (2507, 2509), False, 'import sys\n'), ((2525, 2587), 'os.path.dirname', 'os.path.dirname', (['frame.f_back.f_back.f_back.f_code.co_filename'], {}), '(frame.f_back.f_back.f_back.f_code.co_filename)\n', (2540, 2587), False, 'import os\n'), ((2603, 2629), 'os.path.join', 'os.path.join', (['path', '""".env"""'], {}), "(path, '.env')\n", (2615, 2629), False, 'import os\n'), ((823, 856), 'os.path.isfile', 'os.path.isfile', (["file_dict['file']"], {}), "(file_dict['file'])\n", (837, 856), False, 'import os\n'), ((2243, 2275), 'json.dumps', 'json.dumps', (['self.files'], {'indent': '(4)'}), '(self.files, indent=4)\n', (2253, 2275), False, 'import json\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils module for common utility methods used in the tests"""
from itertools import tee
from json import dumps
from requests import Response
def iter_len_plus_one(iterator):
"""Return the length + 1 of the given iterator.
The +1 is because in the tests the first side effect is already consumed"""
return sum(1 for _ in tee(iterator)) + 1
def _get_call_arguments(self):
if len(self) == 2:
# returned tuple is args, kwargs = self
_, kwargs = self
else:
# returned tuple is name, args, kwargs = self
_, _, kwargs = self
return kwargs
def _make_response(status, body, reason):
resp = Response()
resp.status_code = status
resp._content = dumps(body).encode('utf-8')
resp.reason = reason
return resp
|
[
"itertools.tee",
"requests.Response",
"json.dumps"
] |
[((1437, 1447), 'requests.Response', 'Response', ([], {}), '()\n', (1445, 1447), False, 'from requests import Response\n'), ((1498, 1509), 'json.dumps', 'dumps', (['body'], {}), '(body)\n', (1503, 1509), False, 'from json import dumps\n'), ((1123, 1136), 'itertools.tee', 'tee', (['iterator'], {}), '(iterator)\n', (1126, 1136), False, 'from itertools import tee\n')]
|
import subprocess
try:
from flask import Flask, request, send_from_directory
except ImportError:
print('This example needs Flask to run. Try running:\n'
'pip install flask')
app = Flask(__name__)
STATIC_DIR = 'examples/reverse_image_search/static'
# TODO(wcrichto): figure out how to prevent image caching
@app.route('/mystatic/<path:path>')
def mystatic(path):
return send_from_directory('static', path)
@app.route('/', methods=['GET','POST'])
def index():
if request.method == 'POST':
f = request.files['file']
f.save('{}/query.jpg'.format(STATIC_DIR))
subprocess.check_call(['python', 'examples/reverse_image_search/search.py'])
return """
<img src="/mystatic/result0.jpg" />
<img src="/mystatic/result1.jpg" />
<img src="/mystatic/result2.jpg" />
<img src="/mystatic/result3.jpg" />
<img src="/mystatic/result4.jpg" />
"""
else:
return """
<form method="post" enctype="multipart/form-data">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
"""
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
[
"flask.send_from_directory",
"flask.Flask",
"subprocess.check_call"
] |
[((199, 214), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (204, 214), False, 'from flask import Flask, request, send_from_directory\n'), ((396, 431), 'flask.send_from_directory', 'send_from_directory', (['"""static"""', 'path'], {}), "('static', path)\n", (415, 431), False, 'from flask import Flask, request, send_from_directory\n'), ((611, 687), 'subprocess.check_call', 'subprocess.check_call', (["['python', 'examples/reverse_image_search/search.py']"], {}), "(['python', 'examples/reverse_image_search/search.py'])\n", (632, 687), False, 'import subprocess\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.helpers.decode_helper import _transpose_and_gather_feat
from lib.losses.focal_loss import focal_loss_cornernet
from lib.losses.uncertainty_loss import laplacian_aleatoric_uncertainty_loss
from lib.losses.dim_aware_loss import dim_aware_l1_loss
eps = 1e-6
def compute_centernet3d_loss(input, target):
stats_dict = {}
edge_fusion = False
if 'edge_len' in target.keys():
edge_fusion = True
seg_loss = compute_segmentation_loss(input, target)
offset2d_loss = compute_offset2d_loss(input, target, edge_fusion=edge_fusion)
size2d_loss = compute_size2d_loss(input, target)
offset3d_loss = compute_offset3d_loss(input, target, edge_fusion=edge_fusion)
depth_loss = compute_depth_loss(input, target)
size3d_loss = compute_size3d_loss(input, target)
heading_loss = compute_heading_loss(input, target)
# statistics
stats_dict['seg'] = seg_loss.item()
stats_dict['offset2d'] = offset2d_loss.item()
stats_dict['size2d'] = size2d_loss.item()
stats_dict['offset3d'] = offset3d_loss.item()
stats_dict['depth'] = depth_loss.item()
stats_dict['size3d'] = size3d_loss.item()
stats_dict['heading'] = heading_loss.item()
total_loss = seg_loss + offset2d_loss + size2d_loss + offset3d_loss + \
depth_loss + size3d_loss + heading_loss
return total_loss, stats_dict
def compute_segmentation_loss(input, target):
input['heatmap'] = torch.clamp(input['heatmap'].sigmoid_(), min=1e-4, max=1 - 1e-4)
loss = focal_loss_cornernet(input['heatmap'], target['heatmap'])
return loss
def compute_size2d_loss(input, target):
# compute size2d loss
size2d_input = extract_input_from_tensor(input['size_2d'], target['indices'], target['mask_2d'])
size2d_target = extract_target_from_tensor(target['size_2d'], target['mask_2d'])
size2d_loss = F.l1_loss(size2d_input, size2d_target, reduction='mean')
if torch.any(torch.isnan(size2d_loss)):
size2d_loss = torch.tensor([0.0]).to(size2d_input.device)
return size2d_loss
def compute_offset2d_loss(input, target, edge_fusion=False):
# compute offset2d loss
offset2d_input = extract_input_from_tensor(input['offset_2d'], target['indices'], target['mask_2d'])
offset2d_target = extract_target_from_tensor(target['offset_2d'], target['mask_2d'])
if edge_fusion:
trunc_mask = extract_target_from_tensor(target['trunc_mask'], target['mask_2d']).bool()
offset2d_loss = F.l1_loss(offset2d_input, offset2d_target, reduction='none').sum(dim=1)
# use different loss functions for inside and outside objects
trunc_offset_loss = torch.log(1 + offset2d_loss[trunc_mask]).sum() / torch.clamp(trunc_mask.sum() + eps, min=1)
offset2d_loss = offset2d_loss[~trunc_mask].mean()
return trunc_offset_loss + offset2d_loss
elif(target['mask_2d'].sum() > 0):
offset2d_loss = F.l1_loss(offset2d_input, offset2d_target, reduction='mean')
return offset2d_loss
else:
offset2d_loss = torch.tensor([0.0]).to(offset2d_input.device)
return offset2d_loss
def compute_depth_loss(input, target):
depth_input = extract_input_from_tensor(input['depth'], target['indices'], target['mask_3d'])
depth_input, depth_log_variance = depth_input[:, 0:1], depth_input[:, 1:2]
depth_input = 1. / (depth_input.sigmoid() + 1e-6) - 1.
depth_target = extract_target_from_tensor(target['depth'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
depth_loss = laplacian_aleatoric_uncertainty_loss(depth_input, depth_target, depth_log_variance)
else:
depth_loss = torch.tensor([0.0]).to(depth_input.device)
return depth_loss
def compute_offset3d_loss(input, target, edge_fusion=False):
offset3d_input = extract_input_from_tensor(input['offset_3d'], target['indices'], target['mask_3d'])
offset3d_target = extract_target_from_tensor(target['offset_3d'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
offset3d_loss = F.l1_loss(offset3d_input, offset3d_target, reduction='mean')
else:
offset3d_loss = torch.tensor([0.0]).to(offset3d_input.device)
if edge_fusion:
sum_target_trunc_mask = target['trunc_mask'].sum()
if sum_target_trunc_mask > 0:
trunc_offset3d_input = extract_input_from_tensor(input['offset_3d'], target['indices'], target['trunc_mask'])
trunc_offset3d_target = extract_target_from_tensor(target['offset_3d'], target['trunc_mask'])
trunc_offset3d_loss = torch.log(1 + F.l1_loss(trunc_offset3d_input,
trunc_offset3d_target, reduction='none').sum() / torch.clamp(sum_target_trunc_mask, min=1))
else:
trunc_offset3d_loss = torch.tensor([0.0]).to(offset3d_input.device)
return offset3d_loss + trunc_offset3d_loss
return offset3d_loss
def compute_size3d_loss(input, target):
size3d_input = extract_input_from_tensor(input['size_3d'], target['indices'], target['mask_3d'])
size3d_target = extract_target_from_tensor(target['size_3d'], target['mask_3d'])
# target['dimension'] is size3d_target
dimension_target = extract_target_from_tensor(target['dimension'], target['mask_3d'])
if target['mask_3d'].sum() > 0:
size3d_loss = dim_aware_l1_loss(size3d_input, size3d_target, dimension_target)
else:
size3d_loss = torch.tensor([0.0]).to(size3d_input.device)
return size3d_loss
def compute_heading_loss(input, target):
heading_input = _transpose_and_gather_feat(input['heading'], target['indices']) # B * C * H * W ---> B * K * C
heading_input = heading_input.view(-1, 24)
heading_target_cls = target['heading_bin'].view(-1)
heading_target_res = target['heading_res'].view(-1)
mask = target['mask_2d'].view(-1)
# classification loss
heading_input_cls = heading_input[:, 0:12]
# heading_input_cls, heading_target_cls = heading_input_cls[mask], heading_target_cls[mask]
heading_input_cls, heading_target_cls = heading_input_cls[mask > 0], heading_target_cls[mask > 0]
if mask.sum() > 0:
cls_loss = F.cross_entropy(heading_input_cls, heading_target_cls, reduction='mean')
else:
cls_loss = torch.tensor([0.0]).to(heading_input_cls.device)
# regression loss
heading_input_res = heading_input[:, 12:24]
heading_input_res, heading_target_res = heading_input_res[mask > 0], heading_target_res[mask > 0]
cls_onehot = torch.zeros(heading_target_cls.shape[0], 12).cuda().scatter_(dim=1, index=heading_target_cls.view(-1, 1), value=1)
heading_input_res = torch.sum(heading_input_res * cls_onehot, 1)
reg_loss = F.l1_loss(heading_input_res, heading_target_res, reduction='mean')
if torch.any(torch.isnan(reg_loss)):
reg_loss = torch.tensor([0.0]).to(heading_input_res.device)
return cls_loss + reg_loss
###################### auxiliary functions #########################
def extract_input_from_tensor(input, ind, mask):
input = _transpose_and_gather_feat(input, ind) # B*C*H*W --> B*K*C
return input[mask > 0] # B*K*C --> M * C
def extract_target_from_tensor(target, mask):
return target[mask > 0]
if __name__ == '__main__':
input_cls = torch.zeros(2, 50, 12) # B * 50 * 24
input_reg = torch.zeros(2, 50, 12) # B * 50 * 24
target_cls = torch.zeros(2, 50, 1, dtype=torch.int64)
target_reg = torch.zeros(2, 50, 1)
input_cls, target_cls = input_cls.view(-1, 12), target_cls.view(-1)
cls_loss = F.cross_entropy(input_cls, target_cls, reduction='mean')
a = torch.zeros(2, 24, 10, 10)
b = torch.zeros(2, 10).long()
c = torch.ones(2, 10).long()
d = torch.zeros(2, 10, 1).long()
e = torch.zeros(2, 10, 1)
print(compute_heading_loss(a, b, c, d, e))
|
[
"torch.ones",
"lib.losses.dim_aware_loss.dim_aware_l1_loss",
"lib.losses.focal_loss.focal_loss_cornernet",
"torch.log",
"torch.nn.functional.l1_loss",
"torch.nn.functional.cross_entropy",
"torch.clamp",
"torch.zeros",
"lib.losses.uncertainty_loss.laplacian_aleatoric_uncertainty_loss",
"torch.sum",
"torch.isnan",
"torch.tensor",
"lib.helpers.decode_helper._transpose_and_gather_feat"
] |
[((1577, 1634), 'lib.losses.focal_loss.focal_loss_cornernet', 'focal_loss_cornernet', (["input['heatmap']", "target['heatmap']"], {}), "(input['heatmap'], target['heatmap'])\n", (1597, 1634), False, 'from lib.losses.focal_loss import focal_loss_cornernet\n'), ((1923, 1979), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['size2d_input', 'size2d_target'], {'reduction': '"""mean"""'}), "(size2d_input, size2d_target, reduction='mean')\n", (1932, 1979), True, 'import torch.nn.functional as F\n'), ((5614, 5677), 'lib.helpers.decode_helper._transpose_and_gather_feat', '_transpose_and_gather_feat', (["input['heading']", "target['indices']"], {}), "(input['heading'], target['indices'])\n", (5640, 5677), False, 'from lib.helpers.decode_helper import _transpose_and_gather_feat\n'), ((6703, 6747), 'torch.sum', 'torch.sum', (['(heading_input_res * cls_onehot)', '(1)'], {}), '(heading_input_res * cls_onehot, 1)\n', (6712, 6747), False, 'import torch\n'), ((6763, 6829), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['heading_input_res', 'heading_target_res'], {'reduction': '"""mean"""'}), "(heading_input_res, heading_target_res, reduction='mean')\n", (6772, 6829), True, 'import torch.nn.functional as F\n'), ((7104, 7142), 'lib.helpers.decode_helper._transpose_and_gather_feat', '_transpose_and_gather_feat', (['input', 'ind'], {}), '(input, ind)\n', (7130, 7142), False, 'from lib.helpers.decode_helper import _transpose_and_gather_feat\n'), ((7331, 7353), 'torch.zeros', 'torch.zeros', (['(2)', '(50)', '(12)'], {}), '(2, 50, 12)\n', (7342, 7353), False, 'import torch\n'), ((7386, 7408), 'torch.zeros', 'torch.zeros', (['(2)', '(50)', '(12)'], {}), '(2, 50, 12)\n', (7397, 7408), False, 'import torch\n'), ((7441, 7481), 'torch.zeros', 'torch.zeros', (['(2)', '(50)', '(1)'], {'dtype': 'torch.int64'}), '(2, 50, 1, dtype=torch.int64)\n', (7452, 7481), False, 'import torch\n'), ((7499, 7520), 'torch.zeros', 'torch.zeros', (['(2)', '(50)', '(1)'], {}), '(2, 50, 1)\n', (7510, 7520), False, 'import torch\n'), ((7609, 7665), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['input_cls', 'target_cls'], {'reduction': '"""mean"""'}), "(input_cls, target_cls, reduction='mean')\n", (7624, 7665), True, 'import torch.nn.functional as F\n'), ((7675, 7701), 'torch.zeros', 'torch.zeros', (['(2)', '(24)', '(10)', '(10)'], {}), '(2, 24, 10, 10)\n', (7686, 7701), False, 'import torch\n'), ((7814, 7835), 'torch.zeros', 'torch.zeros', (['(2)', '(10)', '(1)'], {}), '(2, 10, 1)\n', (7825, 7835), False, 'import torch\n'), ((1997, 2021), 'torch.isnan', 'torch.isnan', (['size2d_loss'], {}), '(size2d_loss)\n', (2008, 2021), False, 'import torch\n'), ((3584, 3671), 'lib.losses.uncertainty_loss.laplacian_aleatoric_uncertainty_loss', 'laplacian_aleatoric_uncertainty_loss', (['depth_input', 'depth_target', 'depth_log_variance'], {}), '(depth_input, depth_target,\n depth_log_variance)\n', (3620, 3671), False, 'from lib.losses.uncertainty_loss import laplacian_aleatoric_uncertainty_loss\n'), ((4081, 4141), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['offset3d_input', 'offset3d_target'], {'reduction': '"""mean"""'}), "(offset3d_input, offset3d_target, reduction='mean')\n", (4090, 4141), True, 'import torch.nn.functional as F\n'), ((5387, 5451), 'lib.losses.dim_aware_loss.dim_aware_l1_loss', 'dim_aware_l1_loss', (['size3d_input', 'size3d_target', 'dimension_target'], {}), '(size3d_input, size3d_target, dimension_target)\n', (5404, 5451), False, 'from lib.losses.dim_aware_loss import dim_aware_l1_loss\n'), ((6222, 6294), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['heading_input_cls', 'heading_target_cls'], {'reduction': '"""mean"""'}), "(heading_input_cls, heading_target_cls, reduction='mean')\n", (6237, 6294), True, 'import torch.nn.functional as F\n'), ((6847, 6868), 'torch.isnan', 'torch.isnan', (['reg_loss'], {}), '(reg_loss)\n', (6858, 6868), False, 'import torch\n'), ((2969, 3029), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['offset2d_input', 'offset2d_target'], {'reduction': '"""mean"""'}), "(offset2d_input, offset2d_target, reduction='mean')\n", (2978, 3029), True, 'import torch.nn.functional as F\n'), ((7710, 7728), 'torch.zeros', 'torch.zeros', (['(2)', '(10)'], {}), '(2, 10)\n', (7721, 7728), False, 'import torch\n'), ((7744, 7761), 'torch.ones', 'torch.ones', (['(2)', '(10)'], {}), '(2, 10)\n', (7754, 7761), False, 'import torch\n'), ((7777, 7798), 'torch.zeros', 'torch.zeros', (['(2)', '(10)', '(1)'], {}), '(2, 10, 1)\n', (7788, 7798), False, 'import torch\n'), ((2046, 2065), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (2058, 2065), False, 'import torch\n'), ((2537, 2597), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['offset2d_input', 'offset2d_target'], {'reduction': '"""none"""'}), "(offset2d_input, offset2d_target, reduction='none')\n", (2546, 2597), True, 'import torch.nn.functional as F\n'), ((3699, 3718), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (3711, 3718), False, 'import torch\n'), ((4176, 4195), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (4188, 4195), False, 'import torch\n'), ((5484, 5503), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (5496, 5503), False, 'import torch\n'), ((6324, 6343), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (6336, 6343), False, 'import torch\n'), ((6890, 6909), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (6902, 6909), False, 'import torch\n'), ((2707, 2747), 'torch.log', 'torch.log', (['(1 + offset2d_loss[trunc_mask])'], {}), '(1 + offset2d_loss[trunc_mask])\n', (2716, 2747), False, 'import torch\n'), ((3093, 3112), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (3105, 3112), False, 'import torch\n'), ((4846, 4865), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (4858, 4865), False, 'import torch\n'), ((6564, 6608), 'torch.zeros', 'torch.zeros', (['heading_target_cls.shape[0]', '(12)'], {}), '(heading_target_cls.shape[0], 12)\n', (6575, 6608), False, 'import torch\n'), ((4754, 4795), 'torch.clamp', 'torch.clamp', (['sum_target_trunc_mask'], {'min': '(1)'}), '(sum_target_trunc_mask, min=1)\n', (4765, 4795), False, 'import torch\n'), ((4615, 4687), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['trunc_offset3d_input', 'trunc_offset3d_target'], {'reduction': '"""none"""'}), "(trunc_offset3d_input, trunc_offset3d_target, reduction='none')\n", (4624, 4687), True, 'import torch.nn.functional as F\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-19 10:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0021_page_ref'),
]
operations = [
migrations.AddField(
model_name='printproduct',
name='is_museum_only',
field=models.BooleanField(default=False, verbose_name='Museum purchase only'),
),
]
|
[
"django.db.models.BooleanField"
] |
[((399, 470), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Museum purchase only"""'}), "(default=False, verbose_name='Museum purchase only')\n", (418, 470), False, 'from django.db import migrations, models\n')]
|
# This code calculates compressibility factor (z-factor) for natural hydrocarbon gases
# with 3 different methods. It is the outcomes of the following paper:
# <br>
# <NAME>.; <NAME>., <NAME>.; <NAME>. & <NAME>, <NAME>.
# Using artificial neural networks to estimate the Z-Factor for natural hydrocarbon gases
# Journal of Petroleum Science and Engineering, 2010, 73, 248-257
# <br>
# The original paper can be found at:
# <a href="http://www.sciencedirect.com/science/article/pii/S0920410510001427">here</a>.
# <p>
# Artificial Neural Network (ANN)has been applied and two accurate non-iterative methods are presented.
# The Dranchuk and Abou-Kassem equation of state model, which is an iterative method, is
# also presented here for comparison. All the methods are:
# <ul>
# <li> ANN10: this method is the most accurate ANN method that presented in the paper.
# <li> ANN5: this method is the next accurate ANN method that presented in the paper.
# <li> DAK: this is the Dranchuk and Abou-Kassem equation of state.
# </ul>
#
# @author <a href="mailto:<EMAIL>"><NAME></a>
# @author <a href="mailto:<EMAIL>"><NAME>.</a>
import numpy as np
class CalculateZFactor:
# Minimum and Maximum values used in the neural network to normalize the input and output values.
def __init__(self):
pass
Ppr_min = 0
Ppr_max = 30
Tpr_min = 1
Tpr_max = 3
Z_min = 0.25194
Z_max = 2.66
# -------------START OF NETWORK 2-5-5-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_5 = [
[-1.5949, 7.9284, 7.2925],
[-1.7917, 1.2117, 2.221],
[5.3547, -4.5424, -0.9846],
[4.6209, 2.2228, 8.9966],
[-2.3577, -0.1499, -1.5063]
]
# Weights and Biases for the 2nd layer of neurons
wb2_5 = [
[2.3617, -4.0858, 1.2062, -1.1518, -1.2915, 2.0626],
[10.0141, 9.8649, -11.4445, -123.0698, 7.5898, 95.1393],
[10.4103, 14.1358, -10.9061, -125.5468, 6.3448, 93.8916],
[-1.7794, 14.0742, -1.4195, 12.0894, -15.4537, -9.9439],
[-0.5988, -0.4354, -0.336, 9.9429, -0.4029, -8.3371]
]
# Weights and Biases for the 3rd layer of neurons
wb3_5 = [1.4979, -37.466, 37.7958, -7.7463, 6.9079, 2.8462]
# -------------END OF NETWORK 2-5-5-1 STRUCTURE-------------
# -------------START OF NETWORK 2-10-10-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_10 = [
[2.2458, -2.2493, -3.7801],
[3.4663, 8.1167, -14.9512],
[5.0509, -1.8244, 3.5017],
[6.1185, -0.2045, 0.3179],
[1.3366, 4.9303, 2.2153],
[-2.8652, 1.1679, 1.0218],
[-6.5716, -0.8414, -8.1646],
[-6.1061, 12.7945, 7.2201],
[13.0884, 7.5387, 19.2231],
[70.7187, 7.6138, 74.6949]
]
# Weights and Biases for the 2nd layer of neurons
wb2_10 = [
[4.674, 1.4481, -1.5131, 0.0461, -0.1427, 2.5454, -6.7991, -0.5948, -1.6361, 0.5801, -3.0336],
[-6.7171, -0.7737, -5.6596, 2.975, 14.6248, 2.7266, 5.5043, -13.2659, -0.7158, 3.076, 15.9058],
[7.0753, -3.0128, -1.1779, -6.445, -1.1517, 7.3248, 24.7022, -0.373, 4.2665, -7.8302, -3.1938],
[2.5847, -12.1313, 21.3347, 1.2881, -0.2724, -1.0393, -19.1914, -0.263, -3.2677, -12.4085, -10.2058],
[-19.8404, 4.8606, 0.3891, -4.5608, -0.9258, -7.3852, 18.6507, 0.0403, -6.3956, -0.9853, 13.5862],
[16.7482, -3.8389, -1.2688, 1.9843, -0.1401, -8.9383, -30.8856, -1.5505, -4.7172, 10.5566, 8.2966],
[2.4256, 2.1989, 18.8572, -14.5366, 11.64, -19.3502, 26.6786, -8.9867, -13.9055, 5.195, 9.7723],
[-16.388, 12.1992, -2.2401, -4.0366, -0.368, -6.9203, -17.8283, -0.0244, 9.3962, -1.7107, -1.0572],
[14.6257, 7.5518, 12.6715, -12.7354, 10.6586, -43.1601, 1.3387, -16.3876, 8.5277, 45.9331, -6.6981],
[-6.9243, 0.6229, 1.6542, -0.6833, 1.3122, -5.588, -23.4508, 0.5679, 1.7561, -3.1352, 5.8675]
]
# Weights and Biases for the 3rd layer of neurons
wb3_10 = [-30.1311, 2.0902, -3.5296, 18.1108, -2.528, -0.7228, 0.0186, 5.3507, -0.1476, -5.0827, 3.9767]
# -------------END OF NETWORK 2-10-10-1 STRUCTURE-------------
# input and output of the 1st layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_5 = np.zeros((5, 2))
# input and output of the 2nd layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_5 = np.zeros((5, 2))
# input and output of the 1st layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_10 = np.zeros((10, 2))
# input and output of the 2nd layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_10 = np.zeros((10, 2))
TOLERANCE = 0.0001 # tolerance of DAK
MAX_NO_Iterations = 20 # Max number of iterations for DAK
def ANN10(self, Ppr: float, Tpr: float) -> float:
"""
his method calculates the z-factor using a 2x10x10x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(10):
self.n1_10[i][0] = Ppr_n * self.wb1_10[i][0] + Tpr_n * self.wb1_10[i][1] + self.wb1_10[i][2]
self.n1_10[i][1] = log_sig(self.n1_10[i][0])
for i in range(10):
self.n2_10[i][0] = 0
for j in range(len(self.n2_10)):
self.n2_10[i][0] += self.n1_10[j][1] * self.wb2_10[i][j]
self.n2_10[i][0] += self.wb2_10[i][10] # adding the bias value
self.n2_10[i][1] = log_sig(self.n2_10[i][0])
z_n = 0
for j in range(len(self.n2_10)):
z_n += self.n2_10[j][1] * self.wb3_10[j]
z_n += self.wb3_10[10] # adding the bias value
zAnn10 = (z_n + 1) * (self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn10
def ANN5(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using a 2x5x5x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(5):
self.n1_5[i][0] = Ppr_n * self.wb1_5[i][0] + Tpr_n * self.wb1_5[i][1] + self.wb1_5[i][2]
self.n1_5[i][1] = log_sig(self.n1_5[i][0])
for i in range(5):
self.n2_5[i][0] = 0
for j in range(len(self.n2_5)):
self.n2_5[i][0] += self.n1_5[j][1] * self.wb2_5[i][j]
self.n2_5[i][0] += self.wb2_5[i][5] # adding the bias value
self.n2_5[i][1] = log_sig(self.n2_5[i][0])
z_n = 0
for j in range(len(self.n2_5)):
z_n += self.n2_5[j][1] * self.wb3_5[j]
z_n += self.wb3_5[5] # adding the bias value
zAnn5 = (z_n + 1) * (
self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn5
def DAK(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using Dranchuk and Abou-Kassem (DAK) method.
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
A1 = 0.3265
A2 = -1.07
A3 = -0.5339
A4 = 0.01569
A5 = -0.05165
A6 = 0.5475
A7 = -0.7361
A8 = 0.1844
A9 = 0.1056
A10 = 0.6134
A11 = 0.721
z_new = 1.0
z_old = 1.0
den = calculate_density(Ppr, Tpr, z_old)
for i in range(1, self.MAX_NO_Iterations + 1):
z_old = z_new
z_new = 1 + \
(A1 + A2 / Tpr + A3 / Tpr ** 3 + A4 / Tpr ** 4 + A5 / Tpr ** 5) * den + \
(A6 + A7 / Tpr + A8 / Tpr ** 2) * den ** 2 - \
A9 * (A7 / Tpr + A8 / Tpr ** 2) * den ** 5 + \
A10 * (1 + A11 * den ** 2) * den ** 2 / Tpr ** 3 * np.exp(-1 * A11 * den ** 2)
den = calculate_density(Ppr, Tpr, z_new)
if np.abs(z_new - z_old) < self.TOLERANCE:
break
zDAK = z_new
return zDAK
def log_sig(x):
return 1 / (1 + np.exp(-1 * x))
def calculate_density(pr: float, tr: float, z: float):
return 0.27 * pr / tr / z
|
[
"numpy.abs",
"numpy.zeros",
"numpy.exp"
] |
[((4319, 4335), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (4327, 4335), True, 'import numpy as np\n'), ((4441, 4457), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (4449, 4457), True, 'import numpy as np\n'), ((4567, 4584), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (4575, 4584), True, 'import numpy as np\n'), ((4693, 4710), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (4701, 4710), True, 'import numpy as np\n'), ((8915, 8929), 'numpy.exp', 'np.exp', (['(-1 * x)'], {}), '(-1 * x)\n', (8921, 8929), True, 'import numpy as np\n'), ((8772, 8793), 'numpy.abs', 'np.abs', (['(z_new - z_old)'], {}), '(z_new - z_old)\n', (8778, 8793), True, 'import numpy as np\n'), ((8674, 8701), 'numpy.exp', 'np.exp', (['(-1 * A11 * den ** 2)'], {}), '(-1 * A11 * den ** 2)\n', (8680, 8701), True, 'import numpy as np\n')]
|
# Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FieldReference."""
import operator
from absl.testing import absltest
from absl.testing import parameterized
import ml_collections
from ml_collections.config_dict import config_dict
class FieldReferenceTest(parameterized.TestCase):
def _test_binary_operator(self,
initial_value,
other_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing binary operators.
Generally speaking this checks that:
1. `op(initial_value, other_value) COMP true_value`
2. `op(new_initial_value, other_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the binary operator.
other_value: The second argument for the binary operator.
op: The binary operator.
true_value: The expected output of the binary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the binary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref, other_value)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.b = other_value
config.result = op(config.get_ref('a'), config.b)
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def _test_unary_operator(self,
initial_value,
op,
true_value,
new_initial_value,
new_true_value,
assert_fn=None):
"""Helper for testing unary operators.
Generally speaking this checks that:
1. `op(initial_value) COMP true_value`
2. `op(new_initial_value) COMP new_true_value
where `COMP` is the comparison function defined by `assert_fn`.
Args:
initial_value: Initial value for the `FieldReference`, this is the first
argument for the unary operator.
op: The unary operator.
true_value: The expected output of the unary operator.
new_initial_value: The value that the `FieldReference` is changed to.
new_true_value: The expected output of the unary operator after the
`FieldReference` has changed.
assert_fn: Function used to check the output values.
"""
if assert_fn is None:
assert_fn = self.assertEqual
ref = ml_collections.FieldReference(initial_value)
new_ref = op(ref)
assert_fn(new_ref.get(), true_value)
config = ml_collections.ConfigDict()
config.a = initial_value
config.result = op(config.get_ref('a'))
assert_fn(config.result, true_value)
config.a = new_initial_value
assert_fn(config.result, new_true_value)
def testBasic(self):
ref = ml_collections.FieldReference(1)
self.assertEqual(ref.get(), 1)
def testGetRef(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.c, 21.0)
def testFunction(self):
def fn(x):
return x + 5
config = ml_collections.ConfigDict()
config.a = 1
config.b = fn(config.get_ref('a'))
config.c = fn(config.get_ref('b'))
self.assertEqual(config.b, 6)
self.assertEqual(config.c, 11)
config.a = 2
self.assertEqual(config.b, 7)
self.assertEqual(config.c, 12)
def testCycles(self):
config = ml_collections.ConfigDict()
config.a = 1.
config.b = config.get_ref('a') + 10
config.c = config.get_ref('b') + 10
self.assertEqual(config.b, 11.0)
self.assertEqual(config.c, 21.0)
# Introduce a cycle
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = config.get_ref('c') - 1.0
# Introduce a cycle on second operand
with self.assertRaisesRegex(config_dict.MutabilityError, 'cycle'):
config.a = ml_collections.FieldReference(5.0) + config.get_ref('c')
# We can create multiple FieldReferences that all point to the same object
l = [0]
config = ml_collections.ConfigDict()
config.a = l
config.b = l
config.c = config.get_ref('a') + ['c']
config.d = config.get_ref('b') + ['d']
self.assertEqual(config.c, [0, 'c'])
self.assertEqual(config.d, [0, 'd'])
# Make sure nothing was mutated
self.assertEqual(l, [0])
self.assertEqual(config.c, [0, 'c'])
config.a = [1]
config.b = [2]
self.assertEqual(l, [0])
self.assertEqual(config.c, [1, 'c'])
self.assertEqual(config.d, [2, 'd'])
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 3,
'new_initial_value': 10,
'new_true_value': 12
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 4.5,
'new_initial_value': 3.7,
'new_true_value': 6.2
}, {
'initial_value': 'hello, ',
'other_value': 'world!',
'true_value': 'hello, world!',
'new_initial_value': 'foo, ',
'new_true_value': 'foo, world!'
}, {
'initial_value': ['hello'],
'other_value': ['world'],
'true_value': ['hello', 'world'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'world']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 15.0,
'new_initial_value': 12,
'new_true_value': 17.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 19.0
}, {
'initial_value': 5.0,
'other_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': 8.0,
'new_true_value': None
}, {
'initial_value': config_dict.placeholder(str),
'other_value': 'tail',
'true_value': None,
'new_initial_value': 'head',
'new_true_value': 'headtail'
})
def testAdd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.add,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 5,
'other_value': 3,
'true_value': 2,
'new_initial_value': -1,
'new_true_value': -4
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': -0.5,
'new_initial_value': 12.3,
'new_true_value': 9.8
}, {
'initial_value': set(['hello', 123, 4.5]),
'other_value': set([123]),
'true_value': set(['hello', 4.5]),
'new_initial_value': set([123]),
'new_true_value': set([])
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 5.0,
'new_initial_value': 12,
'new_true_value': 7.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 5.0
})
def testSub(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.sub,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 1,
'other_value': 2,
'true_value': 2,
'new_initial_value': 3,
'new_true_value': 6
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 5.0,
'new_initial_value': 3.5,
'new_true_value': 8.75
}, {
'initial_value': ['hello'],
'other_value': 3,
'true_value': ['hello', 'hello', 'hello'],
'new_initial_value': ['foo'],
'new_true_value': ['foo', 'foo', 'foo']
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 50.0,
'new_initial_value': 1,
'new_true_value': 5.0
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 12,
'new_true_value': 84.0
})
def testMul(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.mul,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1.5,
'new_initial_value': 10,
'new_true_value': 5.0
}, {
'initial_value': 2.0,
'other_value': 2.5,
'true_value': 0.8,
'new_initial_value': 6.3,
'new_true_value': 2.52
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5.0),
'true_value': 2.0,
'new_initial_value': 13,
'new_true_value': 2.6
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 7.0,
'true_value': None,
'new_initial_value': 17.5,
'new_true_value': 2.5
})
def testTrueDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.truediv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 7,
'new_true_value': 3
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 2,
'new_initial_value': 28,
'new_true_value': 5
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 3
})
def testFloorDiv(self, initial_value, other_value, true_value,
new_initial_value, new_true_value):
self._test_binary_operator(initial_value, other_value, operator.floordiv,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 9,
'new_initial_value': 10,
'new_true_value': 100
}, {
'initial_value': 2.7,
'other_value': 3.2,
'true_value': 24.0084457245,
'new_initial_value': 6.5,
'new_true_value': 399.321543621
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 1e5,
'new_initial_value': 2,
'new_true_value': 32
}, {
'initial_value': config_dict.placeholder(float),
'other_value': 3.0,
'true_value': None,
'new_initial_value': 7.0,
'new_true_value': 343.0
})
def testPow(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.pow,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': 3,
'other_value': 2,
'true_value': 1,
'new_initial_value': 10,
'new_true_value': 0
}, {
'initial_value': 5.3,
'other_value': 3.2,
'true_value': 2.0999999999999996,
'new_initial_value': 77,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(10),
'other_value': ml_collections.FieldReference(5),
'true_value': 0,
'new_initial_value': 32,
'new_true_value': 2
}, {
'initial_value': config_dict.placeholder(int),
'other_value': 7,
'true_value': None,
'new_initial_value': 25,
'new_true_value': 4
})
def testMod(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(
initial_value,
other_value,
operator.mod,
true_value,
new_initial_value,
new_true_value,
assert_fn=self.assertAlmostEqual)
@parameterized.parameters(
{
'initial_value': True,
'other_value': True,
'true_value': True,
'new_initial_value': False,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(False),
'other_value': ml_collections.FieldReference(False),
'true_value': False,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': False,
'new_true_value': False
})
def testAnd(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.and_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': False,
'true_value': False,
'new_initial_value': True,
'new_true_value': True
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': True,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': False,
'true_value': None,
'new_initial_value': True,
'new_true_value': True
})
def testOr(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.or_,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': False,
'other_value': True,
'true_value': True,
'new_initial_value': True,
'new_true_value': False
}, {
'initial_value': ml_collections.FieldReference(True),
'other_value': ml_collections.FieldReference(True),
'true_value': False,
'new_initial_value': False,
'new_true_value': True
}, {
'initial_value': config_dict.placeholder(bool),
'other_value': True,
'true_value': None,
'new_initial_value': True,
'new_true_value': False
})
def testXor(self, initial_value, other_value, true_value, new_initial_value,
new_true_value):
self._test_binary_operator(initial_value, other_value, operator.xor,
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': -3,
'new_initial_value': -22,
'new_true_value': 22
}, {
'initial_value': 15.3,
'true_value': -15.3,
'new_initial_value': -0.2,
'new_true_value': 0.2
}, {
'initial_value': ml_collections.FieldReference(7),
'true_value': ml_collections.FieldReference(-7),
'new_initial_value': 123,
'new_true_value': -123
}, {
'initial_value': config_dict.placeholder(int),
'true_value': None,
'new_initial_value': -6,
'new_true_value': 6
})
def testNeg(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.neg, true_value,
new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': config_dict.create(attribute=2),
'true_value': 2,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
{
'initial_value': config_dict.create(attribute={'a': 1}),
'true_value': config_dict.create(a=1),
'new_initial_value': config_dict.create(attribute={'b': 1}),
'new_true_value': config_dict.create(b=1),
},
{
'initial_value':
ml_collections.FieldReference(config_dict.create(attribute=2)),
'true_value':
ml_collections.FieldReference(2),
'new_initial_value':
config_dict.create(attribute=3),
'new_true_value':
3,
},
{
'initial_value': config_dict.placeholder(config_dict.ConfigDict),
'true_value': None,
'new_initial_value': config_dict.create(attribute=3),
'new_true_value': 3,
},
)
def testAttr(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, lambda x: x.attr('attribute'),
true_value, new_initial_value, new_true_value)
@parameterized.parameters(
{
'initial_value': 3,
'true_value': 3,
'new_initial_value': -101,
'new_true_value': 101
}, {
'initial_value': -15.3,
'true_value': 15.3,
'new_initial_value': 7.3,
'new_true_value': 7.3
}, {
'initial_value': ml_collections.FieldReference(-7),
'true_value': ml_collections.FieldReference(7),
'new_initial_value': 3,
'new_true_value': 3
}, {
'initial_value': config_dict.placeholder(float),
'true_value': None,
'new_initial_value': -6.25,
'new_true_value': 6.25
})
def testAbs(self, initial_value, true_value, new_initial_value,
new_true_value):
self._test_unary_operator(initial_value, operator.abs, true_value,
new_initial_value, new_true_value)
def testToInt(self):
self._test_unary_operator(25.3, lambda ref: ref.to_int(), 25, 27.9, 27)
ref = ml_collections.FieldReference(64.7)
ref = ref.to_int()
self.assertEqual(ref.get(), 64)
self.assertEqual(ref._field_type, int)
def testToFloat(self):
self._test_unary_operator(12, lambda ref: ref.to_float(), 12.0, 0, 0.0)
ref = ml_collections.FieldReference(647)
ref = ref.to_float()
self.assertEqual(ref.get(), 647.0)
self.assertEqual(ref._field_type, float)
def testToString(self):
self._test_unary_operator(12, lambda ref: ref.to_str(), '12', 0, '0')
ref = ml_collections.FieldReference(647)
ref = ref.to_str()
self.assertEqual(ref.get(), '647')
self.assertEqual(ref._field_type, str)
def testSetValue(self):
ref = ml_collections.FieldReference(1.0)
other = ml_collections.FieldReference(3)
ref_plus_other = ref + other
self.assertEqual(ref_plus_other.get(), 4.0)
ref.set(2.5)
self.assertEqual(ref_plus_other.get(), 5.5)
other.set(110)
self.assertEqual(ref_plus_other.get(), 112.5)
# Type checking
with self.assertRaises(TypeError):
other.set('this is a string')
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference('this is a string'))
with self.assertRaises(TypeError):
other.set(ml_collections.FieldReference(None, field_type=str))
def testSetResult(self):
ref = ml_collections.FieldReference(1.0)
result = ref + 1.0
second_result = result + 1.0
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 2.0)
self.assertEqual(second_result.get(), 3.0)
ref.set(2.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 3.0)
self.assertEqual(second_result.get(), 4.0)
result.set(4.0)
self.assertEqual(ref.get(), 2.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
# All references are broken at this point.
ref.set(1.0)
self.assertEqual(ref.get(), 1.0)
self.assertEqual(result.get(), 4.0)
self.assertEqual(second_result.get(), 5.0)
def testTypeChecking(self):
ref = ml_collections.FieldReference(1)
string_ref = ml_collections.FieldReference('a')
x = ref + string_ref
with self.assertRaises(TypeError):
x.get()
def testNoType(self):
self.assertRaisesRegex(TypeError, 'field_type should be a type.*',
ml_collections.FieldReference, None, 0)
def testEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertEqual(ref1, 1)
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, 2)
self.assertNotEqual(ref1, ref3)
# ConfigDict inside FieldReference
ref1 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref2 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 1}))
ref3 = ml_collections.FieldReference(ml_collections.ConfigDict({'a': 2}))
self.assertEqual(ref1, ml_collections.ConfigDict({'a': 1}))
self.assertEqual(ref1, ref1)
self.assertEqual(ref1, ref2)
self.assertNotEqual(ref1, ml_collections.ConfigDict({'a': 2}))
self.assertNotEqual(ref1, ref3)
def testLessEqual(self):
# Simple case
ref1 = ml_collections.FieldReference(1)
ref2 = ml_collections.FieldReference(1)
ref3 = ml_collections.FieldReference(2)
self.assertLessEqual(ref1, 1)
self.assertLessEqual(ref1, 2)
self.assertLessEqual(0, ref1)
self.assertLessEqual(1, ref1)
self.assertGreater(ref1, 0)
self.assertLessEqual(ref1, ref1)
self.assertLessEqual(ref1, ref2)
self.assertLessEqual(ref1, ref3)
self.assertGreater(ref3, ref1)
def testControlFlowError(self):
ref1 = ml_collections.FieldReference(True)
ref2 = ml_collections.FieldReference(False)
with self.assertRaises(NotImplementedError):
if ref1:
pass
with self.assertRaises(NotImplementedError):
_ = ref1 and ref2
with self.assertRaises(NotImplementedError):
_ = ref1 or ref2
with self.assertRaises(NotImplementedError):
_ = not ref1
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"ml_collections.config_dict.config_dict.placeholder",
"ml_collections.ConfigDict",
"ml_collections.config_dict.config_dict.create",
"ml_collections.FieldReference"
] |
[((24528, 24543), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (24541, 24543), False, 'from absl.testing import absltest\n'), ((2095, 2139), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['initial_value'], {}), '(initial_value)\n', (2124, 2139), False, 'import ml_collections\n'), ((2230, 2257), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (2255, 2257), False, 'import ml_collections\n'), ((3566, 3610), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['initial_value'], {}), '(initial_value)\n', (3595, 3610), False, 'import ml_collections\n'), ((3688, 3715), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (3713, 3715), False, 'import ml_collections\n'), ((3943, 3975), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (3972, 3975), False, 'import ml_collections\n'), ((4049, 4076), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (4074, 4076), False, 'import ml_collections\n'), ((4288, 4315), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (4313, 4315), False, 'import ml_collections\n'), ((4605, 4632), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (4630, 4632), False, 'import ml_collections\n'), ((5238, 5265), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', ([], {}), '()\n', (5263, 5265), False, 'import ml_collections\n'), ((20345, 20380), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(64.7)'], {}), '(64.7)\n', (20374, 20380), False, 'import ml_collections\n'), ((20596, 20630), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(647)'], {}), '(647)\n', (20625, 20630), False, 'import ml_collections\n'), ((20852, 20886), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(647)'], {}), '(647)\n', (20881, 20886), False, 'import ml_collections\n'), ((21029, 21063), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1.0)'], {}), '(1.0)\n', (21058, 21063), False, 'import ml_collections\n'), ((21076, 21108), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(3)'], {}), '(3)\n', (21105, 21108), False, 'import ml_collections\n'), ((21677, 21711), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1.0)'], {}), '(1.0)\n', (21706, 21711), False, 'import ml_collections\n'), ((22410, 22442), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (22439, 22442), False, 'import ml_collections\n'), ((22460, 22494), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['"""a"""'], {}), "('a')\n", (22489, 22494), False, 'import ml_collections\n'), ((22790, 22822), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (22819, 22822), False, 'import ml_collections\n'), ((22834, 22866), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (22863, 22866), False, 'import ml_collections\n'), ((22878, 22910), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(2)'], {}), '(2)\n', (22907, 22910), False, 'import ml_collections\n'), ((23640, 23672), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (23669, 23672), False, 'import ml_collections\n'), ((23684, 23716), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(1)'], {}), '(1)\n', (23713, 23716), False, 'import ml_collections\n'), ((23728, 23760), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(2)'], {}), '(2)\n', (23757, 23760), False, 'import ml_collections\n'), ((24122, 24157), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(True)'], {}), '(True)\n', (24151, 24157), False, 'import ml_collections\n'), ((24169, 24205), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(False)'], {}), '(False)\n', (24198, 24205), False, 'import ml_collections\n'), ((6544, 6577), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (6573, 6577), False, 'import ml_collections\n'), ((6604, 6638), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5.0)'], {}), '(5.0)\n', (6633, 6638), False, 'import ml_collections\n'), ((6776, 6806), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (6799, 6806), False, 'from ml_collections.config_dict import config_dict\n'), ((7004, 7034), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (7027, 7034), False, 'from ml_collections.config_dict import config_dict\n'), ((7173, 7201), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['str'], {}), '(str)\n', (7196, 7201), False, 'from ml_collections.config_dict import config_dict\n'), ((8238, 8271), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (8267, 8271), False, 'import ml_collections\n'), ((8298, 8332), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5.0)'], {}), '(5.0)\n', (8327, 8332), False, 'import ml_collections\n'), ((8468, 8498), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (8491, 8498), False, 'from ml_collections.config_dict import config_dict\n'), ((9513, 9546), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (9542, 9546), False, 'import ml_collections\n'), ((9573, 9607), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5.0)'], {}), '(5.0)\n', (9602, 9607), False, 'import ml_collections\n'), ((9743, 9773), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (9766, 9773), False, 'from ml_collections.config_dict import config_dict\n'), ((10574, 10607), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (10603, 10607), False, 'import ml_collections\n'), ((10634, 10668), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5.0)'], {}), '(5.0)\n', (10663, 10668), False, 'import ml_collections\n'), ((10804, 10834), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (10827, 10834), False, 'from ml_collections.config_dict import config_dict\n'), ((11472, 11505), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (11501, 11505), False, 'import ml_collections\n'), ((11532, 11564), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5)'], {}), '(5)\n', (11561, 11564), False, 'import ml_collections\n'), ((11696, 11724), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['int'], {}), '(int)\n', (11719, 11724), False, 'from ml_collections.config_dict import config_dict\n'), ((12552, 12585), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (12581, 12585), False, 'import ml_collections\n'), ((12612, 12644), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5)'], {}), '(5)\n', (12641, 12644), False, 'import ml_collections\n'), ((12778, 12808), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (12801, 12808), False, 'from ml_collections.config_dict import config_dict\n'), ((13680, 13713), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(10)'], {}), '(10)\n', (13709, 13713), False, 'import ml_collections\n'), ((13740, 13772), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5)'], {}), '(5)\n', (13769, 13772), False, 'import ml_collections\n'), ((13904, 13932), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['int'], {}), '(int)\n', (13927, 13932), False, 'from ml_collections.config_dict import config_dict\n'), ((14629, 14665), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(False)'], {}), '(False)\n', (14658, 14665), False, 'import ml_collections\n'), ((14692, 14728), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(False)'], {}), '(False)\n', (14721, 14728), False, 'import ml_collections\n'), ((14870, 14899), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['bool'], {}), '(bool)\n', (14893, 14899), False, 'from ml_collections.config_dict import config_dict\n'), ((15548, 15583), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(True)'], {}), '(True)\n', (15577, 15583), False, 'import ml_collections\n'), ((15610, 15645), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(True)'], {}), '(True)\n', (15639, 15645), False, 'import ml_collections\n'), ((15786, 15815), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['bool'], {}), '(bool)\n', (15809, 15815), False, 'from ml_collections.config_dict import config_dict\n'), ((16459, 16494), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(True)'], {}), '(True)\n', (16488, 16494), False, 'import ml_collections\n'), ((16521, 16556), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(True)'], {}), '(True)\n', (16550, 16556), False, 'import ml_collections\n'), ((16698, 16727), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['bool'], {}), '(bool)\n', (16721, 16727), False, 'from ml_collections.config_dict import config_dict\n'), ((17476, 17508), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(7)'], {}), '(7)\n', (17505, 17508), False, 'import ml_collections\n'), ((17534, 17567), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(-7)'], {}), '(-7)\n', (17563, 17567), False, 'import ml_collections\n'), ((17676, 17704), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['int'], {}), '(int)\n', (17699, 17704), False, 'from ml_collections.config_dict import config_dict\n'), ((18108, 18139), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': '(2)'}), '(attribute=2)\n', (18126, 18139), False, 'from ml_collections.config_dict import config_dict\n'), ((18199, 18230), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': '(3)'}), '(attribute=3)\n', (18217, 18230), False, 'from ml_collections.config_dict import config_dict\n'), ((18307, 18345), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': "{'a': 1}"}), "(attribute={'a': 1})\n", (18325, 18345), False, 'from ml_collections.config_dict import config_dict\n'), ((18371, 18394), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'a': '(1)'}), '(a=1)\n', (18389, 18394), False, 'from ml_collections.config_dict import config_dict\n'), ((18427, 18465), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': "{'b': 1}"}), "(attribute={'b': 1})\n", (18445, 18465), False, 'from ml_collections.config_dict import config_dict\n'), ((18495, 18518), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'b': '(1)'}), '(b=1)\n', (18513, 18518), False, 'from ml_collections.config_dict import config_dict\n'), ((18680, 18712), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(2)'], {}), '(2)\n', (18709, 18712), False, 'import ml_collections\n'), ((18759, 18790), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': '(3)'}), '(attribute=3)\n', (18777, 18790), False, 'from ml_collections.config_dict import config_dict\n'), ((18881, 18928), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['config_dict.ConfigDict'], {}), '(config_dict.ConfigDict)\n', (18904, 18928), False, 'from ml_collections.config_dict import config_dict\n'), ((18991, 19022), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': '(3)'}), '(attribute=3)\n', (19009, 19022), False, 'from ml_collections.config_dict import config_dict\n'), ((19665, 19698), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(-7)'], {}), '(-7)\n', (19694, 19698), False, 'import ml_collections\n'), ((19724, 19756), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(7)'], {}), '(7)\n', (19753, 19756), False, 'import ml_collections\n'), ((19860, 19890), 'ml_collections.config_dict.config_dict.placeholder', 'config_dict.placeholder', (['float'], {}), '(float)\n', (19883, 19890), False, 'from ml_collections.config_dict import config_dict\n'), ((23157, 23192), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'a': 1}"], {}), "({'a': 1})\n", (23182, 23192), False, 'import ml_collections\n'), ((23235, 23270), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'a': 1}"], {}), "({'a': 1})\n", (23260, 23270), False, 'import ml_collections\n'), ((23313, 23348), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'a': 2}"], {}), "({'a': 2})\n", (23338, 23348), False, 'import ml_collections\n'), ((23377, 23412), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'a': 1}"], {}), "({'a': 1})\n", (23402, 23412), False, 'import ml_collections\n'), ((23510, 23545), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'a': 2}"], {}), "({'a': 2})\n", (23535, 23545), False, 'import ml_collections\n'), ((5076, 5110), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['(5.0)'], {}), '(5.0)\n', (5105, 5110), False, 'import ml_collections\n'), ((18608, 18639), 'ml_collections.config_dict.config_dict.create', 'config_dict.create', ([], {'attribute': '(2)'}), '(attribute=2)\n', (18626, 18639), False, 'from ml_collections.config_dict import config_dict\n'), ((21479, 21528), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['"""this is a string"""'], {}), "('this is a string')\n", (21508, 21528), False, 'import ml_collections\n'), ((21586, 21637), 'ml_collections.FieldReference', 'ml_collections.FieldReference', (['None'], {'field_type': 'str'}), '(None, field_type=str)\n', (21615, 21637), False, 'import ml_collections\n')]
|
# Standard
import gc
from pathlib import Path
import time
# PIP
from ignite.metrics import PSNR, SSIM
from lpips import LPIPS
from ptflops import get_model_complexity_info
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
# Custom
from custom.softsplat.model import SoftSplat
from custom.vimeo.dataset import Vimeo
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
start_time = time.time()
def end_timer_and_print():
global start_time
torch.cuda.synchronize()
end_time = time.time()
print("Total execution time = {:.3f} sec".format(end_time - start_time))
memory = torch.cuda.max_memory_allocated() // 1024 // 1024
print(f"Max memory used by tensors = {memory}MB")
def test(cfg):
print(f"[ {cfg.model.flow_extractor} ]")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
work_dir = Path(cfg.work_dir).absolute()
data_dir = work_dir / cfg.data_dir
weight_dir = work_dir / cfg.weight_dir
# Load data
test_dataset = Vimeo(
data_dir=data_dir,
state="test",
is_pt=False,
is_aug=False,
is_crop=True,
)
test_dataloader = DataLoader(
test_dataset,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pin_memory=True,
)
# Init model
model = SoftSplat(cfg.model).to(device)
model.eval()
if cfg.flops:
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=cfg.amp):
macs, params = get_model_complexity_info(model, (3, cfg.model.height, cfg.model.width))
print("{:<30} {:<8}".format("Computational complexity: ", macs))
print("{:<30} {:<8}".format("Number of parameters: ", params))
return
# Load model
if cfg.name != "none":
weight_path = weight_dir / f"{cfg.name}.pt"
print(f"Load {cfg.name} model from {weight_path}")
state_dict = torch.load(weight_path)
model.load_state_dict(state_dict)
# Set metrics
if cfg.psnr:
metric_psnr = PSNR(data_range=1.0, device=device)
if cfg.ssim:
metric_ssim = SSIM(data_range=1.0, device=device)
if cfg.lpips:
calculate_lpips = LPIPS(net="alex", verbose=False).to(device)
# Inference
total_psnr = 0
total_ssim = 0
total_lpips = 0
with torch.no_grad():
start_timer()
for batch in tqdm(test_dataloader):
img1, img2, y = batch
img1 = img1.to(device)
img2 = img2.to(device)
if cfg.psnr or cfg.ssim or cfg.lpips:
y = y.to(device)
with torch.cuda.amp.autocast(enabled=cfg.amp):
y_hat = model(img1, img2)
if cfg.amp:
y_hat = y_hat.float()
if cfg.psnr:
metric_psnr.update((y_hat, y))
total_psnr += metric_psnr.compute()
metric_psnr.reset()
if cfg.ssim:
metric_ssim.update((y_hat, y))
total_ssim += metric_ssim.compute()
metric_ssim.reset()
if cfg.lpips:
total_lpips += calculate_lpips(y_hat, y).mean()
end_timer_and_print()
if cfg.psnr:
average_psnr = total_psnr / len(test_dataloader)
print(f"PSNR: {average_psnr:.4f}")
if cfg.ssim:
average_ssim = total_ssim / len(test_dataloader)
print(f"SSIM: {average_ssim:.4f}")
if cfg.lpips:
average_lpips = total_lpips / len(test_dataloader)
print(f"LPIPS: {average_lpips:.5f}")
|
[
"torch.cuda.synchronize",
"custom.vimeo.dataset.Vimeo",
"torch.cuda.max_memory_allocated",
"gc.collect",
"pathlib.Path",
"torch.no_grad",
"torch.cuda.amp.autocast",
"torch.utils.data.DataLoader",
"torch.load",
"ptflops.get_model_complexity_info",
"tqdm.tqdm",
"torch.cuda.reset_peak_memory_stats",
"ignite.metrics.PSNR",
"custom.softsplat.model.SoftSplat",
"ignite.metrics.SSIM",
"torch.cuda.is_available",
"time.time",
"lpips.LPIPS",
"torch.cuda.empty_cache"
] |
[((428, 440), 'gc.collect', 'gc.collect', ([], {}), '()\n', (438, 440), False, 'import gc\n'), ((445, 469), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (467, 469), False, 'import torch\n'), ((474, 510), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {}), '()\n', (508, 510), False, 'import torch\n'), ((515, 539), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (537, 539), False, 'import torch\n'), ((557, 568), 'time.time', 'time.time', ([], {}), '()\n', (566, 568), False, 'import time\n'), ((624, 648), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (646, 648), False, 'import torch\n'), ((664, 675), 'time.time', 'time.time', ([], {}), '()\n', (673, 675), False, 'import time\n'), ((1173, 1252), 'custom.vimeo.dataset.Vimeo', 'Vimeo', ([], {'data_dir': 'data_dir', 'state': '"""test"""', 'is_pt': '(False)', 'is_aug': '(False)', 'is_crop': '(True)'}), "(data_dir=data_dir, state='test', is_pt=False, is_aug=False, is_crop=True)\n", (1178, 1252), False, 'from custom.vimeo.dataset import Vimeo\n'), ((1322, 1424), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'cfg.batch_size', 'num_workers': 'cfg.num_workers', 'pin_memory': '(True)'}), '(test_dataset, batch_size=cfg.batch_size, num_workers=cfg.\n num_workers, pin_memory=True)\n', (1332, 1424), False, 'from torch.utils.data import DataLoader\n'), ((2112, 2135), 'torch.load', 'torch.load', (['weight_path'], {}), '(weight_path)\n', (2122, 2135), False, 'import torch\n'), ((2236, 2271), 'ignite.metrics.PSNR', 'PSNR', ([], {'data_range': '(1.0)', 'device': 'device'}), '(data_range=1.0, device=device)\n', (2240, 2271), False, 'from ignite.metrics import PSNR, SSIM\n'), ((2311, 2346), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'device': 'device'}), '(data_range=1.0, device=device)\n', (2315, 2346), False, 'from ignite.metrics import PSNR, SSIM\n'), ((2519, 2534), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2532, 2534), False, 'import torch\n'), ((2579, 2600), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {}), '(test_dataloader)\n', (2583, 2600), False, 'from tqdm import tqdm\n'), ((766, 799), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (797, 799), False, 'import torch\n'), ((971, 996), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (994, 996), False, 'import torch\n'), ((1025, 1043), 'pathlib.Path', 'Path', (['cfg.work_dir'], {}), '(cfg.work_dir)\n', (1029, 1043), False, 'from pathlib import Path\n'), ((1489, 1509), 'custom.softsplat.model.SoftSplat', 'SoftSplat', (['cfg.model'], {}), '(cfg.model)\n', (1498, 1509), False, 'from custom.softsplat.model import SoftSplat\n'), ((1570, 1585), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1583, 1585), False, 'import torch\n'), ((1604, 1644), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'cfg.amp'}), '(enabled=cfg.amp)\n', (1627, 1644), False, 'import torch\n'), ((1677, 1749), 'ptflops.get_model_complexity_info', 'get_model_complexity_info', (['model', '(3, cfg.model.height, cfg.model.width)'], {}), '(model, (3, cfg.model.height, cfg.model.width))\n', (1702, 1749), False, 'from ptflops import get_model_complexity_info\n'), ((2391, 2423), 'lpips.LPIPS', 'LPIPS', ([], {'net': '"""alex"""', 'verbose': '(False)'}), "(net='alex', verbose=False)\n", (2396, 2423), False, 'from lpips import LPIPS\n'), ((2807, 2847), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'cfg.amp'}), '(enabled=cfg.amp)\n', (2830, 2847), False, 'import torch\n')]
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: <NAME> (<EMAIL>)
###############################################################################
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn
class PartialConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
self.multi_channel = False
self.return_mask = False
super(PartialConv1d, self).__init__(*args, **kwargs)
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]
self.last_size = (None, None, None)
self.update_mask = None
self.mask_ratio = None
@torch.jit.ignore
def forward(self, input: torch.Tensor, mask_in: Tuple[int, int, int] = None):
assert len(input.shape) == 3
# if a mask is input, or tensor shape changed, update mask ratio
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
mask = torch.ones(1, 1, input.data.shape[2]).to(input)
else:
mask = mask_in
self.update_mask = F.conv1d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
# for mixed precision training, change 1e-8 to 1e-6
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-6)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv1d, self).forward(torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
|
[
"torch.ones",
"torch.mul",
"torch.clamp",
"torch.nn.functional.conv1d",
"torch.no_grad"
] |
[((1230, 1267), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'self.kernel_size[0]'], {}), '(1, 1, self.kernel_size[0])\n', (1240, 1267), False, 'import torch\n'), ((3088, 3123), 'torch.mul', 'torch.mul', (['output', 'self.update_mask'], {}), '(output, self.update_mask)\n', (3097, 3123), False, 'import torch\n'), ((3159, 3194), 'torch.mul', 'torch.mul', (['raw_out', 'self.mask_ratio'], {}), '(raw_out, self.mask_ratio)\n', (3168, 3194), False, 'import torch\n'), ((1825, 1840), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1838, 1840), False, 'import torch\n'), ((2192, 2322), 'torch.nn.functional.conv1d', 'F.conv1d', (['mask', 'self.weight_maskUpdater'], {'bias': 'None', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': '(1)'}), '(mask, self.weight_maskUpdater, bias=None, stride=self.stride,\n padding=self.padding, dilation=self.dilation, groups=1)\n', (2200, 2322), True, 'import torch.nn.functional as F\n'), ((2662, 2697), 'torch.clamp', 'torch.clamp', (['self.update_mask', '(0)', '(1)'], {}), '(self.update_mask, 0, 1)\n', (2673, 2697), False, 'import torch\n'), ((2732, 2776), 'torch.mul', 'torch.mul', (['self.mask_ratio', 'self.update_mask'], {}), '(self.mask_ratio, self.update_mask)\n', (2741, 2776), False, 'import torch\n'), ((2830, 2852), 'torch.mul', 'torch.mul', (['input', 'mask'], {}), '(input, mask)\n', (2839, 2852), False, 'import torch\n'), ((3007, 3054), 'torch.mul', 'torch.mul', (['(raw_out - bias_view)', 'self.mask_ratio'], {}), '(raw_out - bias_view, self.mask_ratio)\n', (3016, 3054), False, 'import torch\n'), ((2052, 2089), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'input.data.shape[2]'], {}), '(1, 1, input.data.shape[2])\n', (2062, 2089), False, 'import torch\n')]
|
# Generated by Django 2.2.1 on 2019-05-15 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DiseaseClassify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='uploadimage',
name='predict_image',
field=models.FileField(upload_to='predict_image/'),
),
]
|
[
"django.db.models.FileField"
] |
[((345, 389), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""predict_image/"""'}), "(upload_to='predict_image/')\n", (361, 389), False, 'from django.db import migrations, models\n')]
|
from rlcore.algo import PPO
from rlcore.storage import RolloutStorage
class Neo(object):
def __init__(self, args, policy, obs_shape, action_space):
super().__init__()
self.obs_shape = obs_shape
self.action_space = action_space
self.actor_critic = policy # it is MPNN instance
self.rollouts = RolloutStorage(args.num_steps, args.num_processes, self.obs_shape, self.action_space,
recurrent_hidden_state_size=1)
self.args = args
self.trainer = PPO(self.actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef,
args.entropy_coef, lr=args.lr,max_grad_norm=args.max_grad_norm)
self.alive = True ##
def load_model(self, policy_state):
self.actor_critic.load_state_dict(policy_state)
def initialize_obs(self, obs):
# this function is called at the start of episode
self.rollouts.reset()
self.rollouts.obs[0].copy_(obs)
def initialize_new_episode(self, step, obs, masks):
## one rollout can have multiple episodes
self.rollouts.obs[step].copy_(obs)
self.rollouts.masks[step].copy_(masks)
def update_rollout(self, obs, reward, mask):
self.rollouts.insert(obs, self.states, self.action, self.action_log_prob, self.value, reward, mask)
def act(self, step, deterministic=False):
self.value, self.action, self.action_log_prob, self.states = self.actor_critic.act(self.rollouts.obs[step],
self.rollouts.recurrent_hidden_states[step],self.rollouts.masks[step],deterministic=deterministic)
return self.action
def wrap_horizon(self, next_value, start_pt, end_pt):
self.rollouts.compute_returns(next_value, True, self.args.gamma, self.args.tau, start_pt, end_pt)
def before_update(self):
self.rollouts.before_update()
def after_update(self):
self.rollouts.after_update()
def update(self):
return self.trainer.update(self.rollouts)
|
[
"rlcore.storage.RolloutStorage",
"rlcore.algo.PPO"
] |
[((325, 446), 'rlcore.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', 'args.num_processes', 'self.obs_shape', 'self.action_space'], {'recurrent_hidden_state_size': '(1)'}), '(args.num_steps, args.num_processes, self.obs_shape, self.\n action_space, recurrent_hidden_state_size=1)\n', (339, 446), False, 'from rlcore.storage import RolloutStorage\n'), ((518, 690), 'rlcore.algo.PPO', 'PPO', (['self.actor_critic', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'max_grad_norm': 'args.max_grad_norm'}), '(self.actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,\n args.value_loss_coef, args.entropy_coef, lr=args.lr, max_grad_norm=args\n .max_grad_norm)\n', (521, 690), False, 'from rlcore.algo import PPO\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 14:52:32 2021
@author: Patrice
Simple utility script to read tiles from drive and compile a large tensor saved as an npy file.
Use only if you have enough ram to contain all your samples at once
"""
import numpy as np
import glob
import skimage.io as io
def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print ("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print ("Toc: start time not set")
tic()
folder='/media/patrice/DataDrive/SEE_ICE/JointTrain/'
OutputName='JointTensor5k'
tilesize=50
bands=4
classes=7
subsample=1#percentage subsample in each class
NormFactor=8192 #will save a normalised tensor ready for the CNN, better for memory to normalise now
UINT8=False #if true this will overide NormFactor and reduce the radiometry to 8-bit via normalisation by 16384
FP16=True #cast final tensor in float 16 for mixed precision training
Itot=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
Itot=Itot+len(clist)
print ('found '+str(Itot)+' tile samples')
MasterTensor=np.zeros((int(subsample*Itot),tilesize,tilesize,bands), dtype='float16')
MasterLabel=np.zeros((int(subsample*Itot)), dtype='float16')
tile=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
idx = np.random.choice(np.arange(len(clist)), int(len(clist)*subsample), replace=False)
for i in range(len(idx)):
I=io.imread(clist[idx[i]]).reshape((1,tilesize,tilesize,bands))
Label=c
MasterLabel[tile] = Label
if UINT8 and not(FP16):
MasterTensor=np.uint8(MasterTensor)
MasterTensor[tile,:,:,:] = np.uint8(255*I/16384)
elif FP16 and UINT8:
MasterTensor=np.float16(MasterTensor)
I= np.uint8(255*I/16384)
MasterTensor[tile,:,:,:]=np.float16(I/255)
elif not(UINT8) and FP16:
MasterTensor=np.float16(MasterTensor)
MasterTensor[tile,:,:,:]=np.float16(I/NormFactor)
else:
MasterTensor=np.int16(MasterTensor)
MasterTensor[tile,:,:,:]=np.int16(I)
tile+=1
print('Class '+str(c)+' compiled')
if UINT8 and not(FP16):#downsample radiometry and save as uint8
np.save(folder+OutputName+'_T_uint8',MasterTensor)
np.save(folder+OutputName+'_L_uint8',MasterLabel)
elif FP16 and UINT8:#data will be float 16, but first they have been downsampled to 8bit before normalisation
np.save(folder+OutputName+'_T_uint8float16',MasterTensor)
np.save(folder+OutputName+'_L_uint8float16',MasterLabel)
elif not(UINT8) and FP16:
np.save(folder+OutputName+'_T_float16',MasterTensor)
np.save(folder+OutputName+'_L_float16',MasterLabel)
else:
np.save(folder+OutputName+'_T_int16',MasterTensor)
np.save(folder+OutputName+'_L_int16',MasterLabel)
#Output as npy arrays for both the tensor and the label
toc()
|
[
"numpy.float16",
"numpy.uint8",
"numpy.save",
"time.time",
"glob.glob",
"numpy.int16",
"skimage.io.imread"
] |
[((448, 459), 'time.time', 'time.time', ([], {}), '()\n', (457, 459), False, 'import time\n'), ((1210, 1243), 'glob.glob', 'glob.glob', (["(class_folder + '*.tif')"], {}), "(class_folder + '*.tif')\n", (1219, 1243), False, 'import glob\n'), ((1549, 1582), 'glob.glob', 'glob.glob', (["(class_folder + '*.tif')"], {}), "(class_folder + '*.tif')\n", (1558, 1582), False, 'import glob\n'), ((2535, 2590), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_uint8')", 'MasterTensor'], {}), "(folder + OutputName + '_T_uint8', MasterTensor)\n", (2542, 2590), True, 'import numpy as np\n'), ((2590, 2644), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_uint8')", 'MasterLabel'], {}), "(folder + OutputName + '_L_uint8', MasterLabel)\n", (2597, 2644), True, 'import numpy as np\n'), ((2760, 2822), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_uint8float16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_uint8float16', MasterTensor)\n", (2767, 2822), True, 'import numpy as np\n'), ((2822, 2883), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_uint8float16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_uint8float16', MasterLabel)\n", (2829, 2883), True, 'import numpy as np\n'), ((1890, 1912), 'numpy.uint8', 'np.uint8', (['MasterTensor'], {}), '(MasterTensor)\n', (1898, 1912), True, 'import numpy as np\n'), ((1952, 1977), 'numpy.uint8', 'np.uint8', (['(255 * I / 16384)'], {}), '(255 * I / 16384)\n', (1960, 1977), True, 'import numpy as np\n'), ((2919, 2976), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_float16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_float16', MasterTensor)\n", (2926, 2976), True, 'import numpy as np\n'), ((2976, 3032), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_float16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_float16', MasterLabel)\n", (2983, 3032), True, 'import numpy as np\n'), ((3045, 3100), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_int16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_int16', MasterTensor)\n", (3052, 3100), True, 'import numpy as np\n'), ((3100, 3154), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_int16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_int16', MasterLabel)\n", (3107, 3154), True, 'import numpy as np\n'), ((1721, 1745), 'skimage.io.imread', 'io.imread', (['clist[idx[i]]'], {}), '(clist[idx[i]])\n', (1730, 1745), True, 'import skimage.io as io\n'), ((2028, 2052), 'numpy.float16', 'np.float16', (['MasterTensor'], {}), '(MasterTensor)\n', (2038, 2052), True, 'import numpy as np\n'), ((2068, 2093), 'numpy.uint8', 'np.uint8', (['(255 * I / 16384)'], {}), '(255 * I / 16384)\n', (2076, 2093), True, 'import numpy as np\n'), ((2127, 2146), 'numpy.float16', 'np.float16', (['(I / 255)'], {}), '(I / 255)\n', (2137, 2146), True, 'import numpy as np\n'), ((2204, 2228), 'numpy.float16', 'np.float16', (['MasterTensor'], {}), '(MasterTensor)\n', (2214, 2228), True, 'import numpy as np\n'), ((2266, 2292), 'numpy.float16', 'np.float16', (['(I / NormFactor)'], {}), '(I / NormFactor)\n', (2276, 2292), True, 'import numpy as np\n'), ((2330, 2352), 'numpy.int16', 'np.int16', (['MasterTensor'], {}), '(MasterTensor)\n', (2338, 2352), True, 'import numpy as np\n'), ((2390, 2401), 'numpy.int16', 'np.int16', (['I'], {}), '(I)\n', (2398, 2401), True, 'import numpy as np\n'), ((572, 583), 'time.time', 'time.time', ([], {}), '()\n', (581, 583), False, 'import time\n')]
|
"""
Feedforward model construct
number of hidden layers:5
neural units of hidden layers: [2000, 1000, 800, 500, 100]
activation function: elu
"""
import torch as tch
class FNN(tch.nn.Module):
def __init__(self, n_inputs):
# call constructors from superclass
super(FNN, self).__init__()
# define network layers
self.hidden1 = tch.nn.Linear(n_inputs, 1000)
self.hidden2 = tch.nn.Linear(1000, 800)
self.hidden3 = tch.nn.Linear(800, 500)
self.hidden4 = tch.nn.Linear(500, 100)
self.output = tch.nn.Linear(100, 1)
# dropout
self.dropout = tch.nn.Dropout(p=0.1)
# activate
self.fnn = tch.nn.Sequential(self.hidden1, tch.nn.ELU(), self.dropout,
self.hidden2, tch.nn.ELU(), self.dropout,
self.hidden3, tch.nn.ELU(), self.dropout,
self.hidden4, tch.nn.ELU(), self.dropout,
self.output)
def forward(self, x):
return self.fnn(x)
if __name__ == "__main__":
net = FNN(756) #Feedforward_bn(100)
print('initiating an feed forward network....')
print(' construct=\n {:}'.format(net))
|
[
"torch.nn.Dropout",
"torch.nn.ELU",
"torch.nn.Linear"
] |
[((384, 413), 'torch.nn.Linear', 'tch.nn.Linear', (['n_inputs', '(1000)'], {}), '(n_inputs, 1000)\n', (397, 413), True, 'import torch as tch\n'), ((437, 461), 'torch.nn.Linear', 'tch.nn.Linear', (['(1000)', '(800)'], {}), '(1000, 800)\n', (450, 461), True, 'import torch as tch\n'), ((485, 508), 'torch.nn.Linear', 'tch.nn.Linear', (['(800)', '(500)'], {}), '(800, 500)\n', (498, 508), True, 'import torch as tch\n'), ((532, 555), 'torch.nn.Linear', 'tch.nn.Linear', (['(500)', '(100)'], {}), '(500, 100)\n', (545, 555), True, 'import torch as tch\n'), ((578, 599), 'torch.nn.Linear', 'tch.nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (591, 599), True, 'import torch as tch\n'), ((650, 671), 'torch.nn.Dropout', 'tch.nn.Dropout', ([], {'p': '(0.1)'}), '(p=0.1)\n', (664, 671), True, 'import torch as tch\n'), ((742, 754), 'torch.nn.ELU', 'tch.nn.ELU', ([], {}), '()\n', (752, 754), True, 'import torch as tch\n'), ((821, 833), 'torch.nn.ELU', 'tch.nn.ELU', ([], {}), '()\n', (831, 833), True, 'import torch as tch\n'), ((900, 912), 'torch.nn.ELU', 'tch.nn.ELU', ([], {}), '()\n', (910, 912), True, 'import torch as tch\n'), ((979, 991), 'torch.nn.ELU', 'tch.nn.ELU', ([], {}), '()\n', (989, 991), True, 'import torch as tch\n')]
|
import httpx
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from pytest import mark
from sqlalchemy import text
def test_startup():
from fastapi_sqla import _Session, startup
startup()
session = _Session()
assert session.execute(text("SELECT 1")).scalar() == 1
@mark.asyncio
async def test_fastapi_integration():
from fastapi_sqla import _Session, setup
app = FastAPI()
setup(app)
@app.get("/one")
def now():
session = _Session()
result = session.execute(text("SELECT 1")).scalar()
session.close()
return result
async with LifespanManager(app):
async with httpx.AsyncClient(
app=app, base_url="http://example.local"
) as client:
res = await client.get("/one")
assert res.json() == 1
|
[
"fastapi_sqla._Session",
"asgi_lifespan.LifespanManager",
"httpx.AsyncClient",
"sqlalchemy.text",
"fastapi_sqla.startup",
"fastapi_sqla.setup",
"fastapi.FastAPI"
] |
[((209, 218), 'fastapi_sqla.startup', 'startup', ([], {}), '()\n', (216, 218), False, 'from fastapi_sqla import _Session, startup\n'), ((234, 244), 'fastapi_sqla._Session', '_Session', ([], {}), '()\n', (242, 244), False, 'from fastapi_sqla import _Session, setup\n'), ((415, 424), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (422, 424), False, 'from fastapi import FastAPI\n'), ((429, 439), 'fastapi_sqla.setup', 'setup', (['app'], {}), '(app)\n', (434, 439), False, 'from fastapi_sqla import _Session, setup\n'), ((495, 505), 'fastapi_sqla._Session', '_Session', ([], {}), '()\n', (503, 505), False, 'from fastapi_sqla import _Session, setup\n'), ((628, 648), 'asgi_lifespan.LifespanManager', 'LifespanManager', (['app'], {}), '(app)\n', (643, 648), False, 'from asgi_lifespan import LifespanManager\n'), ((669, 728), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {'app': 'app', 'base_url': '"""http://example.local"""'}), "(app=app, base_url='http://example.local')\n", (686, 728), False, 'import httpx\n'), ((273, 289), 'sqlalchemy.text', 'text', (['"""SELECT 1"""'], {}), "('SELECT 1')\n", (277, 289), False, 'from sqlalchemy import text\n'), ((539, 555), 'sqlalchemy.text', 'text', (['"""SELECT 1"""'], {}), "('SELECT 1')\n", (543, 555), False, 'from sqlalchemy import text\n')]
|
from setuptools import setup, find_packages
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='build-flask-app',
description='Set up a modern flask web server by running one command.',
long_description=README,
long_description_content_type="text/markdown",
packages=find_packages(),
version='0.1.0',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Kushagrabainsla/build-flask-app',
install_requires=[
'Flask',
'Flask-SQLAlchemy',
'Flask-SocketIO',
'gunicorn',
'eventlet',
'gevent',
'dnspython',
'pymongo',
'Flask-PyMongo',
'PyInquirer',
'termcolor',
'flask-cors',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['build-flask-app=build_flask_app.main:main'],
},
)
|
[
"pathlib.Path",
"setuptools.find_packages"
] |
[((67, 89), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import pathlib\n'), ((344, 359), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (357, 359), False, 'from setuptools import setup, find_packages\n')]
|
from linkedlist import LinkedList
class Queue(object):
def __init__(self):
self._store = LinkedList()
def enqueue(self, data):
self._store.add_back(data)
def dequeue(self):
if(self._store.front() != None):
data = self._store.front().data
self._store.delete(self._store.front())
return data
return None
def peek(self):
if(self._store.front() != None):
return self._store.front().data
return None
def items(self):
for i in self._store.items():
yield i.data
def count(self):
return self._store.count()
|
[
"linkedlist.LinkedList"
] |
[((102, 114), 'linkedlist.LinkedList', 'LinkedList', ([], {}), '()\n', (112, 114), False, 'from linkedlist import LinkedList\n')]
|
#!/usr/bin/python3
#
# Extract audio metadata from m4a file
#
# Author: <NAME>
# Date: 04 Jan 2021
#
import glob
from mutagen.mp4 import MP4
import numpy as np
filez = glob.glob("2020_12_27_AM.m4a")
mp4file = MP4(filez[0])
for tag in mp4file.tags:
print('{}: {}'.format(tag, mp4file.tags[tag]))
|
[
"mutagen.mp4.MP4",
"glob.glob"
] |
[((176, 206), 'glob.glob', 'glob.glob', (['"""2020_12_27_AM.m4a"""'], {}), "('2020_12_27_AM.m4a')\n", (185, 206), False, 'import glob\n'), ((217, 230), 'mutagen.mp4.MP4', 'MP4', (['filez[0]'], {}), '(filez[0])\n', (220, 230), False, 'from mutagen.mp4 import MP4\n')]
|
#!/usr/bin/python3
import tkinter as tk
from tkinter import messagebox
from PIL import ImageTk
from PIL import Image
from os import path
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
import base64
from sys import exit
global mainBgColr, secBgColr, theme
mainBgColr = "#121212"
secBgColr = "#1a1a1a"
global path_entry, msg_entry, password_entry, encode_button, label1, label2, label3, label4
def confirmClose():
if messagebox.askyesno(title='confirmation', message='Are you sure that you want to quit?'):
window.destroy()
window = tk.Tk()
window.title('IMGHide v1.0')
window.geometry('630x370')
window.configure(bg='#121212')
window.minsize(550, 370)
window.maxsize(630, 370)
im1 = Image.open("assets/header.png")
header = ImageTk.PhotoImage(im1)
im2 = Image.open("assets/enc_button.png")
enc_button = ImageTk.PhotoImage(im2)
im3 = Image.open("assets/dec_button.png")
dec_button = ImageTk.PhotoImage(im3)
def encrypt(key, source, encode=True):
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = Random.new().read(AES.block_size) # generate IV
encryptor = AES.new(key, AES.MODE_CBC, IV)
padding = AES.block_size - len(source) % AES.block_size # calculate needed padding
source += bytes([padding]) * padding # Python 2.x: source += chr(padding) * padding
data = IV + encryptor.encrypt(source) # store the IV at the beginning and encrypt
return base64.b64encode(data).decode() if encode else data
def decrypt(key, source, decode=True):
if decode:
source = base64.b64decode(source.encode())
key = SHA256.new(key).digest() # use SHA-256 over our key to get a proper-sized AES key
IV = source[:AES.block_size] # extract the IV from the beginning
decryptor = AES.new(key, AES.MODE_CBC, IV)
data = decryptor.decrypt(source[AES.block_size:]) # decrypt
padding = data[-1] # pick the padding value from the end; Python 2.x: ord(data[-1])
if data[-padding:] != bytes([padding]) * padding: # Python 2.x: chr(padding) * padding
messagebox.showerror("Error", "Invalid Padding Detected When Decrypting The Message !!!")
return data[:-padding] # remove the padding
def convertToRGB(img):
try:
rgba_image = img
rgba_image.load()
background = Image.new("RGB", rgba_image.size, (255, 255, 255))
background.paste(rgba_image, mask = rgba_image.split()[3])
info_label.config(text='$ Converted image to RGB')
window.update()
return background
except Exception as e:
info_label.config(text="$ Couldn't convert image to RGB")
window.update()
messagebox.showerror("Error", f"Couldn't convert image to RGB\n{e}")
exit(1)
def getPixelCount(img):
width, height = Image.open(img).size
return width*height
def encodeImage(image,message,filename):
info_label.config(text="$ Encoding The Image")
window.update()
try:
width, height = image.size
pix = image.getdata()
current_pixel = 0
tmp=0
# three_pixels = []
x=0
y=0
info_label.config(text="$ Encoding The Image.")
window.update()
for ch in message:
info_label.config(text="$ Encoding The Image..")
window.update()
binary_value = format(ord(ch), '08b')
# For each character, get 3 pixels at a time
p1 = pix[current_pixel]
p2 = pix[current_pixel+1]
p3 = pix[current_pixel+2]
three_pixels = [val for val in p1+p2+p3]
for i in range(0,8):
current_bit = binary_value[i]
if current_bit == '0':
if three_pixels[i]%2!=0:
three_pixels[i]= three_pixels[i]-1 if three_pixels[i]==255 else three_pixels[i]+1
elif current_bit == '1':
if three_pixels[i]%2==0:
three_pixels[i]= three_pixels[i]-1 if three_pixels[i]==255 else three_pixels[i]+1
current_pixel+=3
tmp+=1
#Set 9th value
if(tmp==len(message)):
# Make as 1 (odd) - stop reading
if three_pixels[-1]%2==0:
three_pixels[-1]= three_pixels[-1]-1 if three_pixels[-1]==255 else three_pixels[-1]+1
else:
# Make as 0 (even) - continue reading
if three_pixels[-1]%2!=0:
three_pixels[-1]= three_pixels[-1]-1 if three_pixels[-1]==255 else three_pixels[-1]+1
three_pixels = tuple(three_pixels)
st=0
end=3
for i in range(0,3):
image.putpixel((x,y), three_pixels[st:end])
st+=3
end+=3
if (x == width - 1):
x = 0
y += 1
else:
x += 1
info_label.config(text="$ Encoding The Image...")
window.update()
encoded_filename = filename.split('.')[0] + "-encrypted.png"
image.save(encoded_filename)
info_label.config(text="$ Image Saved Successfully")
window.update()
messagebox.showinfo("Success", f"Image encoded and saved as {encoded_filename}\nOriginal filename {filename}")
except Exception as e:
messagebox.showerror("Error", f"An error occured\n{e}")
exit(1)
def decodeImage(image):
info_label.config(text="$ Decoding The Image")
window.update()
try:
pix = image.getdata()
current_pixel = 0
decoded=""
info_label.config(text="$ Decoding The Image.")
window.update()
while True:
info_label.config(text="$ Decoding The Image..")
window.update()
# Get 3 pixels each time
binary_value=""
p1 = pix[current_pixel]
p2 = pix[current_pixel+1]
p3 = pix[current_pixel+2]
three_pixels = [val for val in p1+p2+p3]
for i in range(0,8):
if three_pixels[i]%2==0:
binary_value+="0"
elif three_pixels[i]%2!=0:
binary_value+="1"
info_label.config(text="$ Decoding The Image...")
window.update()
#Convert binary value to ascii and add to string
binary_value.strip()
ascii_value = int(binary_value,2)
decoded+=chr(ascii_value)
current_pixel+=3
info_label.config(text="$ Decoding The Image.")
window.update()
if three_pixels[-1]%2!=0:
# stop reading
break
info_label.config(text="$ Image Decoded")
window.update()
return decoded
except Exception as e:
messagebox.showerror("Error", f"An error occured\n{e}")
exit(1)
def insertHeaders(img):
pass
def init_encode():
c2.config(state='disabled')
encode_button.config(state='disabled')
img = path_var.get()
if(not(path.exists(img))):
messagebox.showerror("Error", "Image not found!\nGiven Image Name/Path is Invalid")
encode_button.config(state='normal')
return 1
message = str(msg_var.get())
if(len(message)*3 > getPixelCount(img)):
messagebox.showerror("Error", "Given message is too long to be encoded in the image.\nPlease try another image with more pixels")
encode_button.config(state='normal')
return 1
password = password_var.get()
cipher=""
if password!="":
cipher = encrypt(key=password.encode(),source=message.encode())
else:
cipher = message
image = Image.open(img)
info_label.config(text=f"Image Mode: {image.mode}")
window.update()
if image.mode!='RGB':
image = convertToRGB(image)
newimg = image.copy()
encodeImage(image=newimg,message=cipher,filename=image.filename)
encode_button.config(state='normal')
c2.config(state='normal')
checkbox_var1.set(0)
checkbox_var2.set(0)
disable_checkbox()
def copytext(msg):
r = tk.Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(msg)
r.update()
def init_decode():
c1.config(state='disabled')
encode_button.config(state='disabled')
img = path_var.get()
if(not(path.exists(img))):
messagebox.showerror("Error", "Image not found!\nFilename/Path Provided is Invalid!")
encode_button.config(state='normal')
return 1
password = str(password_var.get())
image = Image.open(img)
cipher = decodeImage(image)
decrypted=""
if password!="":
decrypted = decrypt(key=password.encode(), source=cipher)
else:
decrypted=cipher
response = messagebox.askyesno("Decoded Message", f'"{decrypted.decode("UTF-8")}"\n\nDo You Want To Copy The Message ?')
if response == True:
copytext(decrypted)
c1.config(state='normal')
encode_button.config(state='normal')
checkbox_var1.set(0)
checkbox_var2.set(0)
disable_checkbox()
def disable_checkbox():
global path_entry, msg_entry, password_entry, encode_button, label1, label2, label3, label4
if (checkbox_var1.get() == 1) & (checkbox_var2.get() == 0):
info_label.config(text='$ Encode Selected')
c2.config(state='disabled')
label1 = tk.Label(window,bg=mainBgColr,fg='white', text='File Name/Path', font=('calibre',9,'normal'))
label1.place(relx = 0.3, rely = 0.43, anchor = 'center')
path_entry = tk.Entry(window,textvariable = path_var, font=('calibre',10,'normal'))
path_entry.place(relx = 0.4, rely = 0.43, anchor = 'w')
label2 = tk.Label(window,bg=mainBgColr,fg='white', text=' Message', font=('calibre',9,'normal'))
label2.place(relx = 0.33, rely = 0.53, anchor = 'center')
msg_entry = tk.Entry(window,textvariable = msg_var, font=('calibre',10,'normal'))
msg_entry.place(relx = 0.4, rely = 0.53, anchor = 'w')
label3 = tk.Label(window,bg=mainBgColr,fg='white', text='Password', font=('calibre',9,'normal'))
label3.place(relx = 0.33, rely = 0.63, anchor = 'center')
password_entry = tk.Entry(window,textvariable = password_var, show='*', font=('calibre',10,'normal'))
password_entry.place(relx = 0.4, rely = 0.63, anchor = 'w')
label4 = tk.Label(window,bg=mainBgColr,fg='white',text='(Leave Empty For No Password)', font=('calibre',9,'normal'))
label4.place(relx = 0.5, rely = 0.7, anchor = 'center')
encode_button = tk.Button(window,image=enc_button,borderwidth = 2,relief="flat",command=init_encode)
encode_button.place(relx = 0.4, rely = 0.825, anchor = 'w')
elif (checkbox_var1.get() == 0) & (checkbox_var2.get() == 1):
info_label.config(text='$ Decode Selected')
c1.config(state='disabled')
label1 = tk.Label(window,bg=mainBgColr,fg='white', text='File Name/Path', font=('calibre',9,'normal'))
label1.place(relx = 0.3, rely = 0.45, anchor = 'center')
path_entry = tk.Entry(window,textvariable = path_var, font=('calibre',10,'normal'))
path_entry.place(relx = 0.4, rely = 0.45, anchor = 'w')
label2 = tk.Label(window,bg=mainBgColr,fg='white', text='Password', font=('calibre',9,'normal'))
label2.place(relx = 0.33, rely = 0.55, anchor = 'center')
label3 = tk.Label(window, text='', font=('calibre',9,'normal'))
password_entry = tk.Entry(window,textvariable=password_var, show='*', font=('calibre',10,'normal'))
password_entry.place(relx = 0.4, rely = 0.55, anchor = 'w')
label4 = tk.Label(window,bg=mainBgColr,fg='white', text='(Leave Empty For No Password)', font=('calibre',9,'normal'))
label4.place(relx = 0.5, rely = 0.65, anchor = 'center')
msg_entry = tk.Entry(window,textvariable = msg_var, font=('calibre',10,'normal'))
encode_button = tk.Button(window,image=dec_button,command=init_decode,borderwidth = 2,relief="flat")
encode_button.place(relx = 0.4, rely = 0.8, anchor = 'w')
elif (checkbox_var1.get() == 0) & (checkbox_var2.get() == 0):
info_label.config(text='$ Nothing Selected')
c1.config(state='normal')
c2.config(state='normal')
path_entry.delete('0',tk.END)
path_entry.destroy()
msg_entry.delete('0',tk.END)
msg_entry.destroy()
password_entry.delete('0',tk.END)
password_entry.destroy()
encode_button.destroy()
label1.destroy()
label2.destroy()
label3.destroy()
label4.destroy()
else:
info_label.config(text='$ Selected Nothing')
checkbox_var1 = tk.IntVar()
checkbox_var2 = tk.IntVar()
path_var = tk.StringVar()
password_var = tk.StringVar()
msg_var = tk.StringVar()
title = tk.Label(window, image=header, bg=mainBgColr)
title.place(relx = 0.5, rely = 0.12, anchor = 'center')
c1 = tk.Checkbutton(window, text='Encode', bg='#029dd3', variable=checkbox_var1, onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#00bdff')
c1.place(relx = 0.5, rely = 0.28, anchor = 'center')
c2 = tk.Checkbutton(window,text='Decode', bg='#14c700', variable=checkbox_var2, onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#1aff00')
c2.place(relx = 0.5, rely = 0.35, anchor = 'center')
info_label = tk.Label(window, bg=secBgColr, fg='#1aff00',width=30, text='$ Everything Initialised', font=('calibre',9,'normal'))
info_label.place(relx = 0, rely = 1, anchor ='sw')
author_label = tk.Label(window, bg=mainBgColr, fg='#00c6ff', width=30, text='GUI by heyDevlopr ( GitHub )\nIMGHide by TechRaj ( YouTube )', font=('TkHeadingFont',9,'normal'))
author_label.place(relx = 1, rely = 1, anchor ='se')
window.protocol("WM_DELETE_WINDOW", confirmClose)
window.mainloop()
|
[
"tkinter.StringVar",
"PIL.Image.new",
"Crypto.Random.new",
"tkinter.Label",
"tkinter.Checkbutton",
"tkinter.Button",
"tkinter.Entry",
"os.path.exists",
"tkinter.Tk",
"tkinter.messagebox.showinfo",
"tkinter.IntVar",
"tkinter.messagebox.showerror",
"sys.exit",
"Crypto.Hash.SHA256.new",
"PIL.ImageTk.PhotoImage",
"PIL.Image.open",
"base64.b64encode",
"Crypto.Cipher.AES.new",
"tkinter.messagebox.askyesno"
] |
[((588, 595), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (593, 595), True, 'import tkinter as tk\n'), ((740, 771), 'PIL.Image.open', 'Image.open', (['"""assets/header.png"""'], {}), "('assets/header.png')\n", (750, 771), False, 'from PIL import Image\n'), ((781, 804), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['im1'], {}), '(im1)\n', (799, 804), False, 'from PIL import ImageTk\n'), ((812, 847), 'PIL.Image.open', 'Image.open', (['"""assets/enc_button.png"""'], {}), "('assets/enc_button.png')\n", (822, 847), False, 'from PIL import Image\n'), ((861, 884), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['im2'], {}), '(im2)\n', (879, 884), False, 'from PIL import ImageTk\n'), ((892, 927), 'PIL.Image.open', 'Image.open', (['"""assets/dec_button.png"""'], {}), "('assets/dec_button.png')\n", (902, 927), False, 'from PIL import Image\n'), ((941, 964), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['im3'], {}), '(im3)\n', (959, 964), False, 'from PIL import ImageTk\n'), ((12812, 12823), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (12821, 12823), True, 'import tkinter as tk\n'), ((12840, 12851), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (12849, 12851), True, 'import tkinter as tk\n'), ((12863, 12877), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12875, 12877), True, 'import tkinter as tk\n'), ((12893, 12907), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12905, 12907), True, 'import tkinter as tk\n'), ((12918, 12932), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12930, 12932), True, 'import tkinter as tk\n'), ((12942, 12987), 'tkinter.Label', 'tk.Label', (['window'], {'image': 'header', 'bg': 'mainBgColr'}), '(window, image=header, bg=mainBgColr)\n', (12950, 12987), True, 'import tkinter as tk\n'), ((13050, 13211), 'tkinter.Checkbutton', 'tk.Checkbutton', (['window'], {'text': '"""Encode"""', 'bg': '"""#029dd3"""', 'variable': 'checkbox_var1', 'onvalue': '(1)', 'offvalue': '(0)', 'command': 'disable_checkbox', 'activebackground': '"""#00bdff"""'}), "(window, text='Encode', bg='#029dd3', variable=checkbox_var1,\n onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#00bdff'\n )\n", (13064, 13211), True, 'import tkinter as tk\n'), ((13262, 13423), 'tkinter.Checkbutton', 'tk.Checkbutton', (['window'], {'text': '"""Decode"""', 'bg': '"""#14c700"""', 'variable': 'checkbox_var2', 'onvalue': '(1)', 'offvalue': '(0)', 'command': 'disable_checkbox', 'activebackground': '"""#1aff00"""'}), "(window, text='Decode', bg='#14c700', variable=checkbox_var2,\n onvalue=1, offvalue=0, command=disable_checkbox, activebackground='#1aff00'\n )\n", (13276, 13423), True, 'import tkinter as tk\n'), ((13481, 13604), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'secBgColr', 'fg': '"""#1aff00"""', 'width': '(30)', 'text': '"""$ Everything Initialised"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=secBgColr, fg='#1aff00', width=30, text=\n '$ Everything Initialised', font=('calibre', 9, 'normal'))\n", (13489, 13604), True, 'import tkinter as tk\n'), ((13664, 13838), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""#00c6ff"""', 'width': '(30)', 'text': '"""GUI by heyDevlopr ( GitHub )\nIMGHide by TechRaj ( YouTube )"""', 'font': "('TkHeadingFont', 9, 'normal')"}), '(window, bg=mainBgColr, fg=\'#00c6ff\', width=30, text=\n """GUI by heyDevlopr ( GitHub )\nIMGHide by TechRaj ( YouTube )""", font\n =(\'TkHeadingFont\', 9, \'normal\'))\n', (13672, 13838), True, 'import tkinter as tk\n'), ((463, 556), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', ([], {'title': '"""confirmation"""', 'message': '"""Are you sure that you want to quit?"""'}), "(title='confirmation', message=\n 'Are you sure that you want to quit?')\n", (482, 556), False, 'from tkinter import messagebox\n'), ((1173, 1203), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'IV'], {}), '(key, AES.MODE_CBC, IV)\n', (1180, 1203), False, 'from Crypto.Cipher import AES\n'), ((1816, 1846), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'IV'], {}), '(key, AES.MODE_CBC, IV)\n', (1823, 1846), False, 'from Crypto.Cipher import AES\n'), ((7803, 7818), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (7813, 7818), False, 'from PIL import Image\n'), ((8224, 8231), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (8229, 8231), True, 'import tkinter as tk\n'), ((8676, 8691), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (8686, 8691), False, 'from PIL import Image\n'), ((2101, 2194), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Invalid Padding Detected When Decrypting The Message !!!"""'], {}), "('Error',\n 'Invalid Padding Detected When Decrypting The Message !!!')\n", (2121, 2194), False, 'from tkinter import messagebox\n'), ((2345, 2395), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'rgba_image.size', '(255, 255, 255)'], {}), "('RGB', rgba_image.size, (255, 255, 255))\n", (2354, 2395), False, 'from PIL import Image\n'), ((2827, 2842), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2837, 2842), False, 'from PIL import Image\n'), ((5296, 5418), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Success"""', 'f"""Image encoded and saved as {encoded_filename}\nOriginal filename {filename}"""'], {}), '(\'Success\',\n f"""Image encoded and saved as {encoded_filename}\nOriginal filename {filename}"""\n )\n', (5315, 5418), False, 'from tkinter import messagebox\n'), ((7159, 7175), 'os.path.exists', 'path.exists', (['img'], {}), '(img)\n', (7170, 7175), False, 'from os import path\n'), ((7187, 7277), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Image not found!\nGiven Image Name/Path is Invalid"""'], {}), '(\'Error\',\n """Image not found!\nGiven Image Name/Path is Invalid""")\n', (7207, 7277), False, 'from tkinter import messagebox\n'), ((7420, 7561), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Given message is too long to be encoded in the image.\nPlease try another image with more pixels"""'], {}), '(\'Error\',\n """Given message is too long to be encoded in the image.\nPlease try another image with more pixels"""\n )\n', (7440, 7561), False, 'from tkinter import messagebox\n'), ((8448, 8464), 'os.path.exists', 'path.exists', (['img'], {}), '(img)\n', (8459, 8464), False, 'from os import path\n'), ((8476, 8568), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Image not found!\nFilename/Path Provided is Invalid!"""'], {}), '(\'Error\',\n """Image not found!\nFilename/Path Provided is Invalid!""")\n', (8496, 8568), False, 'from tkinter import messagebox\n'), ((9480, 9582), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""File Name/Path"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text='File Name/Path', font=(\n 'calibre', 9, 'normal'))\n", (9488, 9582), True, 'import tkinter as tk\n'), ((9660, 9731), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'path_var', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=path_var, font=('calibre', 10, 'normal'))\n", (9668, 9731), True, 'import tkinter as tk\n'), ((9813, 9909), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '""" Message"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text=' Message', font=(\n 'calibre', 9, 'normal'))\n", (9821, 9909), True, 'import tkinter as tk\n'), ((9987, 10057), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'msg_var', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=msg_var, font=('calibre', 10, 'normal'))\n", (9995, 10057), True, 'import tkinter as tk\n'), ((10138, 10234), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""Password"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text='Password', font=(\n 'calibre', 9, 'normal'))\n", (10146, 10234), True, 'import tkinter as tk\n'), ((10317, 10406), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'password_var', 'show': '"""*"""', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=password_var, show='*', font=('calibre', 10,\n 'normal'))\n", (10325, 10406), True, 'import tkinter as tk\n'), ((10487, 10604), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""(Leave Empty For No Password)"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text=\n '(Leave Empty For No Password)', font=('calibre', 9, 'normal'))\n", (10495, 10604), True, 'import tkinter as tk\n'), ((10684, 10775), 'tkinter.Button', 'tk.Button', (['window'], {'image': 'enc_button', 'borderwidth': '(2)', 'relief': '"""flat"""', 'command': 'init_encode'}), "(window, image=enc_button, borderwidth=2, relief='flat', command=\n init_encode)\n", (10693, 10775), True, 'import tkinter as tk\n'), ((1016, 1031), 'Crypto.Hash.SHA256.new', 'SHA256.new', (['key'], {}), '(key)\n', (1026, 1031), False, 'from Crypto.Hash import SHA256\n'), ((1108, 1120), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (1118, 1120), False, 'from Crypto import Random\n'), ((1647, 1662), 'Crypto.Hash.SHA256.new', 'SHA256.new', (['key'], {}), '(key)\n', (1657, 1662), False, 'from Crypto.Hash import SHA256\n'), ((2697, 2768), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', 'f"""Couldn\'t convert image to RGB\n{e}"""'], {}), '(\'Error\', f"""Couldn\'t convert image to RGB\n{e}""")\n', (2717, 2768), False, 'from tkinter import messagebox\n'), ((2774, 2781), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (2778, 2781), False, 'from sys import exit\n'), ((5442, 5500), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', 'f"""An error occured\n{e}"""'], {}), '(\'Error\', f"""An error occured\n{e}""")\n', (5462, 5500), False, 'from tkinter import messagebox\n'), ((5506, 5513), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (5510, 5513), False, 'from sys import exit\n'), ((6920, 6978), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', 'f"""An error occured\n{e}"""'], {}), '(\'Error\', f"""An error occured\n{e}""")\n', (6940, 6978), False, 'from tkinter import messagebox\n'), ((6984, 6991), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (6988, 6991), False, 'from sys import exit\n'), ((11010, 11112), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""File Name/Path"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text='File Name/Path', font=(\n 'calibre', 9, 'normal'))\n", (11018, 11112), True, 'import tkinter as tk\n'), ((11190, 11261), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'path_var', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=path_var, font=('calibre', 10, 'normal'))\n", (11198, 11261), True, 'import tkinter as tk\n'), ((11343, 11439), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""Password"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text='Password', font=(\n 'calibre', 9, 'normal'))\n", (11351, 11439), True, 'import tkinter as tk\n'), ((11515, 11571), 'tkinter.Label', 'tk.Label', (['window'], {'text': '""""""', 'font': "('calibre', 9, 'normal')"}), "(window, text='', font=('calibre', 9, 'normal'))\n", (11523, 11571), True, 'import tkinter as tk\n'), ((11595, 11684), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'password_var', 'show': '"""*"""', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=password_var, show='*', font=('calibre', 10,\n 'normal'))\n", (11603, 11684), True, 'import tkinter as tk\n'), ((11764, 11881), 'tkinter.Label', 'tk.Label', (['window'], {'bg': 'mainBgColr', 'fg': '"""white"""', 'text': '"""(Leave Empty For No Password)"""', 'font': "('calibre', 9, 'normal')"}), "(window, bg=mainBgColr, fg='white', text=\n '(Leave Empty For No Password)', font=('calibre', 9, 'normal'))\n", (11772, 11881), True, 'import tkinter as tk\n'), ((11959, 12029), 'tkinter.Entry', 'tk.Entry', (['window'], {'textvariable': 'msg_var', 'font': "('calibre', 10, 'normal')"}), "(window, textvariable=msg_var, font=('calibre', 10, 'normal'))\n", (11967, 12029), True, 'import tkinter as tk\n'), ((12054, 12144), 'tkinter.Button', 'tk.Button', (['window'], {'image': 'dec_button', 'command': 'init_decode', 'borderwidth': '(2)', 'relief': '"""flat"""'}), "(window, image=dec_button, command=init_decode, borderwidth=2,\n relief='flat')\n", (12063, 12144), True, 'import tkinter as tk\n'), ((1479, 1501), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (1495, 1501), False, 'import base64\n')]
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All parameters."""
from makani.config import mconfig
@mconfig.Config(deps={
'control': 'common.control.control_params',
'monitor': 'common.monitor.monitor_params',
'sim': 'common.sim.sim_params',
'system': mconfig.WING_MODEL + '.system_params'
})
def MakeParams(params):
return {
'control': params['control'],
'monitor': params['monitor'],
'sim': params['sim'],
'system': params['system']
}
|
[
"makani.config.mconfig.Config"
] |
[((648, 846), 'makani.config.mconfig.Config', 'mconfig.Config', ([], {'deps': "{'control': 'common.control.control_params', 'monitor':\n 'common.monitor.monitor_params', 'sim': 'common.sim.sim_params',\n 'system': mconfig.WING_MODEL + '.system_params'}"}), "(deps={'control': 'common.control.control_params', 'monitor':\n 'common.monitor.monitor_params', 'sim': 'common.sim.sim_params',\n 'system': mconfig.WING_MODEL + '.system_params'})\n", (662, 846), False, 'from makani.config import mconfig\n')]
|
"""
Measure: modularity (set)
@auth: <NAME>
@date 2015/10/09
@update 2016/02/13
"""
# 模塊性: Newman's modularity
def modularity(G, community_list):
"""
The estimated time complexity of this version (2016/02/13) is approximating
O(V) + O(E)
"""
import copy as c
NODE_DEGREE = 'node_degree'
# ls, ds variables
intra_degree = {i: 0 for i in range(0, len(community_list))} # ds
intra_edges = {i: 0 for i in range(0, len(community_list))} # ls
# calculate ds, time complexity: O(V)
community_index = 0
community_id = {}
for com in community_list:
tmp_index = c.copy(community_index)
for i in com:
intra_degree[tmp_index] += G.node[i][NODE_DEGREE]
community_id[i] = tmp_index
community_index += 1
# calculate ls, time complexity: O(E)
for (ei, ej) in G.edges():
if community_id[ei] == community_id[ej]:
intra_edges[community_id[ei]] += 1
else:
pass
# calculate modularity Q, time complexity: O(C)
modularity = 0
num_edges = G.number_of_edges()
for i in range(0, len(community_list)):
ls = intra_edges[i] / num_edges
ds = pow((intra_degree[i] / (2 * num_edges)), 2)
modularity += (ls - ds)
return modularity
|
[
"copy.copy"
] |
[((623, 646), 'copy.copy', 'c.copy', (['community_index'], {}), '(community_index)\n', (629, 646), True, 'import copy as c\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
from PyQt5 import QtWidgets
import Instance_Widget
from Interface.Property import PropertyInterface
import Connection_Widget
class InstancePropertyWidget(QtWidgets.QGroupBox):
"""
ConnectionPropertyWidget - shows the properties of a connection.
"""
# Connection Widget that this widget represent
@property
def instance_widget(self):
assert self._instance_widget is not None
return self._instance_widget
@instance_widget.setter
def instance_widget(self, value):
assert isinstance(value, Instance_Widget.InstanceWidget)
self._instance_widget = value
# views
@property
def name_widget(self):
if self._name_widget is None:
self._name_widget = QtWidgets.QLabel(self.instance_widget.name)
return self._name_widget
@property
def type_widget(self):
# When this becomes editable
# This will be not a label, but a drop down menu
if self._type_widget is None:
self._type_widget = QtWidgets.QLabel(self.instance_widget.component_type)
return self._type_widget
@property
def hardware_widget(self):
if self._hardware_widget is None and self.instance_widget.hardware:
self._hardware_widget = QtWidgets.QLabel("Hardware")
return self._hardware_widget
@property
def control_widget(self):
if self._control_widget is None and self.instance_widget.control:
self._control_widget = QtWidgets.QLabel("Control")
return self._control_widget
# --- INITIALISATION ---
def __init__(self, instance_widget):
self._instance_widget = None
self._name_widget = None
self._type_widget = None
self._hardware_widget = None
self._control_widget = None
self.instance_widget = instance_widget
super(InstancePropertyWidget, self).__init__()
grid_layout = QtWidgets.QGridLayout()
row = 0
# Following must be done after setting instance widget
# Name
grid_layout.addWidget(QtWidgets.QLabel("Name: "), row, 0)
grid_layout.addWidget(self.name_widget, row, 1)
row = row + 1
# Type
grid_layout.addWidget(QtWidgets.QLabel("Type: "), row, 0)
grid_layout.addWidget(self.type_widget, row, 1)
row = row + 1
# Hardware
if self.hardware_widget:
grid_layout.addWidget(self.hardware_widget, row, 0, 1, -1)
row = row + 1
# Control
if self.control_widget:
grid_layout.addWidget(self.control_widget, row, 0, 1, -1)
row = row + 1
# Separator
separator = QtWidgets.QFrame()
separator.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Plain)
grid_layout.addWidget(separator, row, 0, 1, -1)
row = row + 1
# List all connection
grid_layout.addWidget(QtWidgets.QLabel("Connections"), row, 0, 1, -1)
row = row + 1
for provide_dict in self.instance_widget.provides:
grid_layout.addWidget(QtWidgets.QLabel("Procedure"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (provide_dict["Interface_type"], provide_dict["Name"]) ), row, 1)
row = row + 1
for use_dict in self.instance_widget.uses:
grid_layout.addWidget(QtWidgets.QLabel("Procedure"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (use_dict["Interface_type"], use_dict["Name"]) ), row, 1)
row = row + 1
for emit_dict in self.instance_widget.emits:
grid_layout.addWidget(QtWidgets.QLabel("Event"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (emit_dict["Interface_type"], emit_dict["Name"]) ), row, 1)
row = row + 1
for consume_dict in self.instance_widget.consumes:
grid_layout.addWidget(QtWidgets.QLabel("Event"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (consume_dict["Interface_type"], consume_dict["Name"]) ), row, 1)
row = row + 1
for dataport_dict in self.instance_widget.dataport:
grid_layout.addWidget(QtWidgets.QLabel("Dataport"), row, 0)
grid_layout.addWidget(QtWidgets.QLabel("%s : %s" % (dataport_dict["Interface_type"], dataport_dict["Name"]) ), row, 1)
row = row + 1
self.setLayout(grid_layout)
|
[
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QFrame"
] |
[((2293, 2316), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (2314, 2316), False, 'from PyQt5 import QtWidgets\n'), ((3054, 3072), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', ([], {}), '()\n', (3070, 3072), False, 'from PyQt5 import QtWidgets\n'), ((1106, 1149), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.instance_widget.name'], {}), '(self.instance_widget.name)\n', (1122, 1149), False, 'from PyQt5 import QtWidgets\n'), ((1389, 1442), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.instance_widget.component_type'], {}), '(self.instance_widget.component_type)\n', (1405, 1442), False, 'from PyQt5 import QtWidgets\n'), ((1634, 1662), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Hardware"""'], {}), "('Hardware')\n", (1650, 1662), False, 'from PyQt5 import QtWidgets\n'), ((1854, 1881), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Control"""'], {}), "('Control')\n", (1870, 1881), False, 'from PyQt5 import QtWidgets\n'), ((2442, 2468), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Name: """'], {}), "('Name: ')\n", (2458, 2468), False, 'from PyQt5 import QtWidgets\n'), ((2602, 2628), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Type: """'], {}), "('Type: ')\n", (2618, 2628), False, 'from PyQt5 import QtWidgets\n'), ((3293, 3324), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Connections"""'], {}), "('Connections')\n", (3309, 3324), False, 'from PyQt5 import QtWidgets\n'), ((3457, 3486), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Procedure"""'], {}), "('Procedure')\n", (3473, 3486), False, 'from PyQt5 import QtWidgets\n'), ((3530, 3619), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["('%s : %s' % (provide_dict['Interface_type'], provide_dict['Name']))"], {}), "('%s : %s' % (provide_dict['Interface_type'], provide_dict[\n 'Name']))\n", (3546, 3619), False, 'from PyQt5 import QtWidgets\n'), ((3737, 3766), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Procedure"""'], {}), "('Procedure')\n", (3753, 3766), False, 'from PyQt5 import QtWidgets\n'), ((3810, 3886), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["('%s : %s' % (use_dict['Interface_type'], use_dict['Name']))"], {}), "('%s : %s' % (use_dict['Interface_type'], use_dict['Name']))\n", (3826, 3886), False, 'from PyQt5 import QtWidgets\n'), ((4011, 4036), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Event"""'], {}), "('Event')\n", (4027, 4036), False, 'from PyQt5 import QtWidgets\n'), ((4080, 4158), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["('%s : %s' % (emit_dict['Interface_type'], emit_dict['Name']))"], {}), "('%s : %s' % (emit_dict['Interface_type'], emit_dict['Name']))\n", (4096, 4158), False, 'from PyQt5 import QtWidgets\n'), ((4289, 4314), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Event"""'], {}), "('Event')\n", (4305, 4314), False, 'from PyQt5 import QtWidgets\n'), ((4358, 4447), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["('%s : %s' % (consume_dict['Interface_type'], consume_dict['Name']))"], {}), "('%s : %s' % (consume_dict['Interface_type'], consume_dict[\n 'Name']))\n", (4374, 4447), False, 'from PyQt5 import QtWidgets\n'), ((4574, 4602), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Dataport"""'], {}), "('Dataport')\n", (4590, 4602), False, 'from PyQt5 import QtWidgets\n'), ((4646, 4736), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["('%s : %s' % (dataport_dict['Interface_type'], dataport_dict['Name']))"], {}), "('%s : %s' % (dataport_dict['Interface_type'],\n dataport_dict['Name']))\n", (4662, 4736), False, 'from PyQt5 import QtWidgets\n')]
|
# 70. 爬楼梯
#
# 20210716
# huao
from math import comb
class Solution:
def climbStairs(self, n: int) -> int:
count = 0
for i in range(n // 2 + 1):
count += comb(n - i, i)
return count
print(Solution().climbStairs(2))
print(Solution().climbStairs(3))
|
[
"math.comb"
] |
[((189, 203), 'math.comb', 'comb', (['(n - i)', 'i'], {}), '(n - i, i)\n', (193, 203), False, 'from math import comb\n')]
|
"""
Modular arithmetic
"""
from collections import defaultdict
import numpy as np
class ModInt:
"""
Integers of Z/pZ
"""
def __init__(self, a, n):
self.v = a % n
self.n = n
def __eq__(a, b):
if isinstance(b, ModInt):
return not bool(a - b)
else:
return NotImplemented
def __hash__(self):
return hash((self.v, self.n))
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
return (cd, defaultdict(int))
P = P * (1 / cd)
return (cd, P.factor_unit())
@staticmethod
def ppfactors(fz):
c, Ds = fz
a = str(c) if not Ds or c * c != c else ''
l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D))
+ (v > 1) * ('^%s' % v)
for D, v in sorted(Ds.items(),
key=lambda e: (e[0].deg, e[1]))]
return '⋅'.join(i for i in l if i)
def reduceP(P, p):
return Polynomial([ModInt(c, p) for c in P])
@staticmethod
def sign_changes(l):
return sum(a * b < 0 for a, b in zip(l, l[1:]))
def isreal(P):
return not any(isinstance(c, ModInt) for c in P)
def isinteger(P):
return all(isinstance(c, int) for c in P)
def sturm(P):
"""
Number of distinct real roots
by Sturm's theorem.
Only works on int or float coefficients
"""
inf = float('inf')
assert P.isreal()
A = P
B = A.prime()
l1 = [A(-inf)]
l2 = [A(inf)]
while B:
l1.append(B(-inf))
l2.append(B(inf))
B, A = -A % B, B
return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2)
@property
def r1(P):
"""
Number of real roots with multiplicity
"""
assert P.isreal()
ans = 0
s = P.sturm()
while s:
ans += s
P = P.gcd(P.prime())
s = P.sturm()
return ans
@property
def r2(P):
ans = P.deg - P.r1
assert ans % 2 == 0
return ans // 2
def sylvester(P, Q):
"""
Sylvester's matrix
"""
assert P.isreal()
assert Q.isreal()
p = P.deg
q = Q.deg
P = np.array(P)
Q = np.array(Q)
m = np.zeros((p + q, p + q))
for i in range(q):
m[i][i:i + p + 1] = P
for i in range(p):
m[q + i][i:i + q + 1] = Q
return m
def resultant(P, Q):
"""
Resultant of two real polynomials
"""
return np.linalg.det(P.sylvester(Q))
@property
def disc(P):
"""
Discriminant of a real polynomial
"""
ans = P.resultant(P.prime()) / P[-1]
if P.isinteger():
ans = int(ans.round())
if P.deg % 4 in [0, 1]:
return ans
else:
return -ans
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.C)
@staticmethod
def _formatmonomial(c, d):
assert c
a = b = ''
if c * c != c or not d:
a = str(c) + (d != 0) * '⋅'
if d > 1:
b = 'X^' + str(d)
elif d == 1:
b = 'X'
return a + b
def __str__(self):
if not self.C:
return "0"
ans = '+'.join(self._formatmonomial(c, d)
for (d, c) in reversed(list(enumerate(self))) if c)
return ans.replace("+-", "-").replace('-1⋅', '-')
|
[
"numpy.zeros",
"collections.defaultdict",
"numpy.array"
] |
[((10508, 10519), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (10516, 10519), True, 'import numpy as np\n'), ((10532, 10543), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (10540, 10543), True, 'import numpy as np\n'), ((10556, 10580), 'numpy.zeros', 'np.zeros', (['(p + q, p + q)'], {}), '((p + q, p + q))\n', (10564, 10580), True, 'import numpy as np\n'), ((7378, 7402), 'collections.defaultdict', 'defaultdict', (['int', '{P: 1}'], {}), '(int, {P: 1})\n', (7389, 7402), False, 'from collections import defaultdict\n'), ((7754, 7770), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7765, 7770), False, 'from collections import defaultdict\n'), ((8671, 8687), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8682, 8687), False, 'from collections import defaultdict\n')]
|
import os
import torch
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.utils.data import Dataset
import cv2
from PIL import Image
class CustomDataSet(Dataset):
def __init__(self, main_dir, type='train', resolution=(128,128)):
self.main_dir = main_dir
self.root_dir = main_dir
self.img_dir = os.path.join(self.root_dir,"images")
self.work_img_dir = os.path.join(self.img_dir,type)
self.all_imgs = os.listdir(self.work_img_dir)
self.type = type
self.compose = transforms.Compose(
[transforms.Resize(resolution),
transforms.Grayscale(),
transforms.PILToTensor()]
)
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_name = f'CLEVR_{self.type}_{str(idx).zfill(6)}'
dir = f'{self.work_img_dir}\\{img_name}.png'
img = Image.open(dir)
img = img.convert('RGB')
tensor_image = self.compose(img)
#tensor_image = ((tensor_image / 255.0) - 0.5) * 2.0 # Rescale to [-1, 1].
# tensor_image = self.compose(image)
return torch.tensor(tensor_image, dtype=torch.float32)
# def main():
# root_dir = os.path.join(os.path.dirname(os.getcwd()),"CLEVR_v1.0\\CLEVR_v1.0\\")
# df = CustomDataSet(root_dir)
# img = df[4]
# plt.imshow(img.permute(1, 2, 0))
# plt.show()
#
# if __name__ == '__main__':
# main()
|
[
"torchvision.transforms.PILToTensor",
"PIL.Image.open",
"torchvision.transforms.Grayscale",
"os.path.join",
"os.listdir",
"torch.tensor",
"torchvision.transforms.Resize"
] |
[((353, 390), 'os.path.join', 'os.path.join', (['self.root_dir', '"""images"""'], {}), "(self.root_dir, 'images')\n", (365, 390), False, 'import os\n'), ((418, 450), 'os.path.join', 'os.path.join', (['self.img_dir', 'type'], {}), '(self.img_dir, type)\n', (430, 450), False, 'import os\n'), ((474, 503), 'os.listdir', 'os.listdir', (['self.work_img_dir'], {}), '(self.work_img_dir)\n', (484, 503), False, 'import os\n'), ((921, 936), 'PIL.Image.open', 'Image.open', (['dir'], {}), '(dir)\n', (931, 936), False, 'from PIL import Image\n'), ((1157, 1204), 'torch.tensor', 'torch.tensor', (['tensor_image'], {'dtype': 'torch.float32'}), '(tensor_image, dtype=torch.float32)\n', (1169, 1204), False, 'import torch\n'), ((585, 614), 'torchvision.transforms.Resize', 'transforms.Resize', (['resolution'], {}), '(resolution)\n', (602, 614), False, 'from torchvision import transforms\n'), ((629, 651), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (649, 651), False, 'from torchvision import transforms\n'), ((666, 690), 'torchvision.transforms.PILToTensor', 'transforms.PILToTensor', ([], {}), '()\n', (688, 690), False, 'from torchvision import transforms\n')]
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# type: ignore
"""Configuration definitions for MobilenetEdgeTPU losses, learning rates, optimizers, and training."""
import dataclasses
import os
from typing import Any, Mapping, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.beta.configs import common
from official.vision.beta.configs import image_classification as base_config
@dataclasses.dataclass
class MobilenetEdgeTPUModelConfig(base_config.ImageClassificationModel):
"""Configuration for the MobilenetEdgeTPU model.
Attributes:
name: The name of the model. Defaults to 'MobilenetEdgeTPU'.
model_params: A dictionary that represents the parameters of the
EfficientNet model. These will be passed in to the "from_name" function.
"""
model_params: Mapping[str, Any] = dataclasses.field(
default_factory=lambda: { # pylint: disable=g-long-lambda
'model_name': 'mobilenet_edgetpu_v2_xs',
'model_weights_path': '',
'checkpoint_format': 'tf_checkpoint',
'overrides': {
'batch_norm': 'tpu',
'num_classes': 1001,
'rescale_input': False,
'dtype': 'bfloat16'
}
})
@dataclasses.dataclass
class MobilenetEdgeTPUTaskConfig(base_config.ImageClassificationTask):
"""Task defination for MobileNetEdgeTPU.
Attributes:
model: A `ModelConfig` instance.
saved_model_path: Instead of initializing a model from the model config,
the model can be loaded from a file path.
"""
model: MobilenetEdgeTPUModelConfig = MobilenetEdgeTPUModelConfig()
saved_model_path: Optional[str] = None
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
def mobilenet_edgetpu_base_experiment_config(
model_name: str) -> cfg.ExperimentConfig:
"""Image classification on imagenet with mobilenet_edgetpu.
Experiment config common across all mobilenet_edgetpu variants.
Args:
model_name: Name of the mobilenet_edgetpu model variant
Returns:
ExperimentConfig
"""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
mobilenet_edgetpu_config = MobilenetEdgeTPUModelConfig(
num_classes=1001, input_size=[224, 224, 3])
mobilenet_edgetpu_config.model_params.model_name = model_name
config = cfg.ExperimentConfig(
task=MobilenetEdgeTPUTaskConfig(
model=mobilenet_edgetpu_config,
losses=base_config.Losses(label_smoothing=0.1),
train_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='bfloat16',
aug_type=common.Augmentation(type='autoaug')),
validation_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
dtype='bfloat16',
drop_remainder=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch * 5,
max_to_keep=10,
train_steps=550 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'rmsprop',
'rmsprop': {
'rho': 0.9,
'momentum': 0.9,
'epsilon': 0.001,
}
},
'ema': {
'average_decay': 0.99,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate':
0.008 * (train_batch_size // 128),
'decay_steps':
int(2.4 * steps_per_epoch),
'decay_rate':
0.97,
'staircase':
True
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Registration for MobileNet-EdgeTPU-Search models.
# When this config is used, users need to specify the saved model path via
# --params_override=task.saved_model_path='your/saved_model/path/'.
@exp_factory.register_config_factory('mobilenet_edgetpu_search')
def mobilenet_edgetpu_search() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_search')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_tiny')
def mobilenet_edgetpu_v2_tiny() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_tiny')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_xs')
def mobilenet_edgetpu_v2_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_xs')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_s')
def mobilenet_edgetpu_v2_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_s')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_m')
def mobilenet_edgetpu_v2_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_m')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_l')
def mobilenet_edgetpu_v2_l() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_l')
# Registration for MobileNet-EdgeTPU-V1 models.
@exp_factory.register_config_factory('mobilenet_edgetpu')
def mobilenet_edgetpu() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu')
# Registration for MobileNet-EdgeTPU-V1 models.
# We use 'depth_multiplier' to scale the models.
# E.g. dm1p25 implies depth multiplier of 1.25x
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p25')
def mobilenet_edgetpu_dm1p25() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p25')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p5')
def mobilenet_edgetpu_dm1p5() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p5')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p75')
def mobilenet_edgetpu_dm1p75() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p75')
# Registration for AutoSeg-EdgeTPU backbones
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_xs')
def autoseg_edgetpu_backbone_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_xs')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_s')
def autoseg_edgetpu_backbone_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_s')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_m')
def autoseg_edgetpu_backbone_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_m')
|
[
"official.core.exp_factory.register_config_factory",
"official.vision.beta.configs.common.Augmentation",
"dataclasses.field",
"official.vision.beta.configs.image_classification.Losses",
"os.path.join"
] |
[((5718, 5781), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_search"""'], {}), "('mobilenet_edgetpu_search')\n", (5753, 5781), False, 'from official.core import exp_factory\n'), ((5967, 6031), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_v2_tiny"""'], {}), "('mobilenet_edgetpu_v2_tiny')\n", (6002, 6031), False, 'from official.core import exp_factory\n'), ((6219, 6281), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_v2_xs"""'], {}), "('mobilenet_edgetpu_v2_xs')\n", (6254, 6281), False, 'from official.core import exp_factory\n'), ((6417, 6478), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_v2_s"""'], {}), "('mobilenet_edgetpu_v2_s')\n", (6452, 6478), False, 'from official.core import exp_factory\n'), ((6612, 6673), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_v2_m"""'], {}), "('mobilenet_edgetpu_v2_m')\n", (6647, 6673), False, 'from official.core import exp_factory\n'), ((6807, 6868), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_v2_l"""'], {}), "('mobilenet_edgetpu_v2_l')\n", (6842, 6868), False, 'from official.core import exp_factory\n'), ((7050, 7106), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu"""'], {}), "('mobilenet_edgetpu')\n", (7085, 7106), False, 'from official.core import exp_factory\n'), ((7375, 7438), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_dm1p25"""'], {}), "('mobilenet_edgetpu_dm1p25')\n", (7410, 7438), False, 'from official.core import exp_factory\n'), ((7576, 7638), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_dm1p5"""'], {}), "('mobilenet_edgetpu_dm1p5')\n", (7611, 7638), False, 'from official.core import exp_factory\n'), ((7774, 7837), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""mobilenet_edgetpu_dm1p75"""'], {}), "('mobilenet_edgetpu_dm1p75')\n", (7809, 7837), False, 'from official.core import exp_factory\n'), ((8020, 8086), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""autoseg_edgetpu_backbone_xs"""'], {}), "('autoseg_edgetpu_backbone_xs')\n", (8055, 8086), False, 'from official.core import exp_factory\n'), ((8230, 8295), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""autoseg_edgetpu_backbone_s"""'], {}), "('autoseg_edgetpu_backbone_s')\n", (8265, 8295), False, 'from official.core import exp_factory\n'), ((8437, 8502), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""autoseg_edgetpu_backbone_m"""'], {}), "('autoseg_edgetpu_backbone_m')\n", (8472, 8502), False, 'from official.core import exp_factory\n'), ((1528, 1791), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': "(lambda : {'model_name': 'mobilenet_edgetpu_v2_xs', 'model_weights_path':\n '', 'checkpoint_format': 'tf_checkpoint', 'overrides': {'batch_norm':\n 'tpu', 'num_classes': 1001, 'rescale_input': False, 'dtype': 'bfloat16'}})"}), "(default_factory=lambda : {'model_name':\n 'mobilenet_edgetpu_v2_xs', 'model_weights_path': '',\n 'checkpoint_format': 'tf_checkpoint', 'overrides': {'batch_norm': 'tpu',\n 'num_classes': 1001, 'rescale_input': False, 'dtype': 'bfloat16'}})\n", (1545, 1791), False, 'import dataclasses\n'), ((3232, 3271), 'official.vision.beta.configs.image_classification.Losses', 'base_config.Losses', ([], {'label_smoothing': '(0.1)'}), '(label_smoothing=0.1)\n', (3250, 3271), True, 'from official.vision.beta.configs import image_classification as base_config\n'), ((3343, 3391), 'os.path.join', 'os.path.join', (['IMAGENET_INPUT_PATH_BASE', '"""train*"""'], {}), "(IMAGENET_INPUT_PATH_BASE, 'train*')\n", (3355, 3391), False, 'import os\n'), ((3530, 3565), 'official.vision.beta.configs.common.Augmentation', 'common.Augmentation', ([], {'type': '"""autoaug"""'}), "(type='autoaug')\n", (3549, 3565), False, 'from official.vision.beta.configs import common\n'), ((3643, 3691), 'os.path.join', 'os.path.join', (['IMAGENET_INPUT_PATH_BASE', '"""valid*"""'], {}), "(IMAGENET_INPUT_PATH_BASE, 'valid*')\n", (3655, 3691), False, 'import os\n')]
|
import numpy as np
class Average:
@staticmethod
def aggregate(gradients):
assert len(gradients) > 0, "Empty list of gradient to aggregate"
if len(gradients) > 1:
return np.mean(gradients, axis=0)
else:
return gradients[0]
|
[
"numpy.mean"
] |
[((209, 235), 'numpy.mean', 'np.mean', (['gradients'], {'axis': '(0)'}), '(gradients, axis=0)\n', (216, 235), True, 'import numpy as np\n')]
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
def get_sysctl(module, prefixes):
sysctl_cmd = module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
|
[
"re.split"
] |
[((1085, 1127), 're.split', 're.split', (['"""\\\\s?=\\\\s?|: """', 'line'], {'maxsplit': '(1)'}), "('\\\\s?=\\\\s?|: ', line, maxsplit=1)\n", (1093, 1127), False, 'import re\n')]
|
#!/usr/bin/env python
import os
import sys
import re
params = {"port": 9000,
"target": "./example"}
if len(sys.argv)>1:
for arg in sys.argv:
tokens = re.split("=",arg.strip())
if len(tokens)>1:
var = tokens[0]
value = tokens[1]
params[var] = value
#TODO: make this a more pythonic way of controlling the threads
os.system("python userstate.py &")
os.system("python action.py &")
os.system("python webhost.py {} {}".format(params["target"], params["port"]))
|
[
"os.system"
] |
[((354, 388), 'os.system', 'os.system', (['"""python userstate.py &"""'], {}), "('python userstate.py &')\n", (363, 388), False, 'import os\n'), ((389, 420), 'os.system', 'os.system', (['"""python action.py &"""'], {}), "('python action.py &')\n", (398, 420), False, 'import os\n')]
|
"""Provides a dictionary indexed by object identity with a weak reference."""
import weakref
from typing import Any, Dict, Generic, Iterator, TypeVar
T = TypeVar("T")
class WeakIdDict(Generic[T]):
"""Dictionary using object identity with a weak reference as key."""
data: Dict[int, T]
refs: Dict[int, weakref.ref]
def __init__(self) -> None:
self.data = {}
self.refs = {}
def __getitem__(self, obj_key: Any) -> T:
return self.data[id(obj_key)]
def __setitem__(self, obj_key: Any, value: T) -> None:
id_key = id(obj_key)
def clean_stale_ref(_: weakref.ref) -> None:
del self.data[id_key]
del self.refs[id_key]
self.refs[id_key] = weakref.ref(obj_key, clean_stale_ref)
self.data[id_key] = value
def __delitem__(self, obj_key: Any) -> None:
id_key = id(obj_key)
del self.data[id_key]
del self.refs[id_key]
def __iter__(self) -> Iterator[Any]:
for ref in self.refs.values():
strong_ref = ref()
if strong_ref:
yield strong_ref
def __len__(self) -> int:
return len(self.data)
|
[
"typing.TypeVar",
"weakref.ref"
] |
[((156, 168), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (163, 168), False, 'from typing import Any, Dict, Generic, Iterator, TypeVar\n'), ((735, 772), 'weakref.ref', 'weakref.ref', (['obj_key', 'clean_stale_ref'], {}), '(obj_key, clean_stale_ref)\n', (746, 772), False, 'import weakref\n')]
|
import os
from configparser import ConfigParser
infile = os.path.expanduser("~/.abook/addressbook")
class AddressBook(object):
def __init__(self, contacts):
self.contacts = contacts
for i in self.contacts:
i["email"] = list(filter(None, i.get("email", '').split(",")))
def __getitem__(self, key):
if isinstance(key, str):
names = [i['name'] for i in self.contacts]
if key in names:
return self.contacts[names.index(key)]
for x, name in enumerate(names):
if key.lower() in name.lower():
return self.contacts[x]
elif isinstance(key, int):
return self.contacts[key]
raise KeyError
def __len__(self):
return len(self.contacts)
def __repr__(self):
return str(self.contacts)
def __contains__(self, item):
for i in self.contacts:
if i['name'] == item or item in i['email']:
return True
return False
def get_abook():
parser = ConfigParser()
parser.read(infile)
contacts = [dict(parser[i]) for i in parser if i.isdigit()]
return AddressBook(contacts)
|
[
"configparser.ConfigParser",
"os.path.expanduser"
] |
[((58, 100), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.abook/addressbook"""'], {}), "('~/.abook/addressbook')\n", (76, 100), False, 'import os\n'), ((1061, 1075), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1073, 1075), False, 'from configparser import ConfigParser\n')]
|
if __name__=='__main__':
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys
sys.argv += ['build_ext','--inplace']
ext = Extension("pyclipper",
sources=["pyclipper.pyx", "clipper.cpp"],
language="c++", # this causes Pyrex/Cython to create C++ source
include_dirs=["./../include"],
)
setup(
ext_modules=[ext],
cmdclass = {'build_ext': build_ext},
)
|
[
"distutils.extension.Extension",
"distutils.core.setup"
] |
[((221, 337), 'distutils.extension.Extension', 'Extension', (['"""pyclipper"""'], {'sources': "['pyclipper.pyx', 'clipper.cpp']", 'language': '"""c++"""', 'include_dirs': "['./../include']"}), "('pyclipper', sources=['pyclipper.pyx', 'clipper.cpp'], language=\n 'c++', include_dirs=['./../include'])\n", (230, 337), False, 'from distutils.extension import Extension\n'), ((482, 541), 'distutils.core.setup', 'setup', ([], {'ext_modules': '[ext]', 'cmdclass': "{'build_ext': build_ext}"}), "(ext_modules=[ext], cmdclass={'build_ext': build_ext})\n", (487, 541), False, 'from distutils.core import setup\n')]
|
# *******************************************************************************
#
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# *******************************************************************************
import math, numpy
from coppertop.pipe import *
from coppertop.std.linalg import tvarray
@coppertop
def cov(A:tvarray) -> tvarray:
return numpy.cov(A).view(tvarray)
@coppertop
def mean(ndOrPy):
# should do full numpy?
return numpy.mean(ndOrPy)
@coppertop
def std(ndOrPy, dof=0):
# should do full numpy? std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=<no value>)
return numpy.std(ndOrPy, dof)
@coppertop
def logisticCDF(x, mu, s):
return 1 / (1 + math.exp(-1 * (x - mu) / s))
@coppertop
def logisticCDFInv(p, mu, s):
return mu + -s * math.log(1 / p - 1)
|
[
"math.exp",
"numpy.std",
"numpy.mean",
"math.log",
"numpy.cov"
] |
[((464, 482), 'numpy.mean', 'numpy.mean', (['ndOrPy'], {}), '(ndOrPy)\n', (474, 482), False, 'import math, numpy\n'), ((627, 649), 'numpy.std', 'numpy.std', (['ndOrPy', 'dof'], {}), '(ndOrPy, dof)\n', (636, 649), False, 'import math, numpy\n'), ((368, 380), 'numpy.cov', 'numpy.cov', (['A'], {}), '(A)\n', (377, 380), False, 'import math, numpy\n'), ((709, 736), 'math.exp', 'math.exp', (['(-1 * (x - mu) / s)'], {}), '(-1 * (x - mu) / s)\n', (717, 736), False, 'import math, numpy\n'), ((801, 820), 'math.log', 'math.log', (['(1 / p - 1)'], {}), '(1 / p - 1)\n', (809, 820), False, 'import math, numpy\n')]
|
import numpy as np
from int_tabulated import *
def GetNDVItoDate(NDVI, Time, Start_End, bpy, DaysPerBand, CurrentBand):
#;
#;jzhu,8/9/2011,This program calculates total ndvi integration (ndvi*day) from start of season to currentband, the currentband is the dayindex of interesting day.
#
FILL=-1.0
ny=1
#;DaysPerBand=365./bpy
NowT=CurrentBand #CurrentBand is the index of NDVI, the index start from 0
NowN=NDVI[NowT]
SeasonLength=NowT-Start_End['SOST'][0]
NDVItoDate=np.zeros(ny)+FILL
if SeasonLength < 0:
SeasonLength = FILL
if SeasonLength > 0 and SeasonLength < bpy: #<2>
#index range
segl=int(np.ceil(Start_End['SOST'][0]))
segh=int(np.floor(NowT )) + 1
XSeg= Time[ segl: segh ] #Xseg[Start_End['SOST'][0]:NowT]
NDVILine= NDVI[ segl : segh ]
#if XSeg[0] != Start_End['SOST'][0]: #<3>
# XSeg = np.concatenate([ np.array( [Start_End['SOST'][0] ] ), XSeg])
# NDVILine = np.concatenate([ np.array([ Start_End['SOSN'][0] ] ), NDVILine])
#<3>
#if XSeg[len(XSeg)-1] != NowT : #<4>
# XSeg = np.concatenate( [XSeg, np.array([NowT]) ] )
# NDVILine= np.concatenate( [NDVILine, np.array([NowN]) ] )
#<4>
BaseLine=XSeg*0+Start_End['SOSN'][0]
# get rid of duplicated point and sort the XSeg
XSeg, index=np.unique(XSeg,return_index=True)
NDVILine=NDVILine[index]
BaseLine=BaseLine[index]
IntNDVI=Int_Tabulated(XSeg, NDVILine)
IntBase=Int_Tabulated(XSeg, BaseLine)
NDVItoDate[0]=(IntNDVI-IntBase)*DaysPerBand
else: #<2>
NDVItoDate[0]=FILL
NDVItoDate={'NDVItoDate':NDVItoDate[0],'NowT':NowT,'NowN':NowN}
return NDVItoDate
|
[
"numpy.floor",
"numpy.zeros",
"numpy.ceil",
"numpy.unique"
] |
[((529, 541), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (537, 541), True, 'import numpy as np\n'), ((1508, 1542), 'numpy.unique', 'np.unique', (['XSeg'], {'return_index': '(True)'}), '(XSeg, return_index=True)\n', (1517, 1542), True, 'import numpy as np\n'), ((703, 732), 'numpy.ceil', 'np.ceil', (["Start_End['SOST'][0]"], {}), "(Start_End['SOST'][0])\n", (710, 732), True, 'import numpy as np\n'), ((754, 768), 'numpy.floor', 'np.floor', (['NowT'], {}), '(NowT)\n', (762, 768), True, 'import numpy as np\n')]
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure comments added onto filings and businesses are valid."""
import copy
from registry_schemas import validate
from registry_schemas.example_data import COMMENT_BUSINESS, COMMENT_FILING
def test_valid_comment_filing():
"""Assert that the schema is performing as expected for filing comments."""
is_valid, errors = validate(COMMENT_FILING, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_comment_business():
"""Assert that the schema is performing as expected for business comments."""
comment = copy.deepcopy(COMMENT_BUSINESS)
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_no_timestamp():
"""Assert that the schema does not require a timestamp."""
# check with timestamp set to null
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['timestamp'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
# check with timestamp removed entirely
del comment['comment']['timestamp']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_invalid_filing_and_business_id():
"""Assert that schema fails with both filing and business id set."""
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['businessId'] = 1
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_no_filing_or_business_id():
"""Assert that one of business or filing id is required."""
# check that setting an id to null fails
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['filingId'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
# check that having neither id in the json at all fails
del comment['comment']['filingId']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_null_submitter():
"""Assert that submitter id cannot be null."""
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['submitterId'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_valid_no_submitter():
"""Assert that submitter id is not required."""
comment = copy.deepcopy(COMMENT_FILING)
del comment['comment']['submitterId']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
|
[
"registry_schemas.validate",
"copy.deepcopy"
] |
[((939, 974), 'registry_schemas.validate', 'validate', (['COMMENT_FILING', '"""comment"""'], {}), "(COMMENT_FILING, 'comment')\n", (947, 974), False, 'from registry_schemas import validate\n'), ((1221, 1252), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_BUSINESS'], {}), '(COMMENT_BUSINESS)\n', (1234, 1252), False, 'import copy\n'), ((1277, 1305), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (1285, 1305), False, 'from registry_schemas import validate\n'), ((1568, 1597), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_FILING'], {}), '(COMMENT_FILING)\n', (1581, 1597), False, 'import copy\n'), ((1664, 1692), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (1672, 1692), False, 'from registry_schemas import validate\n'), ((1912, 1940), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (1920, 1940), False, 'from registry_schemas import validate\n'), ((2184, 2213), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_FILING'], {}), '(COMMENT_FILING)\n', (2197, 2213), False, 'import copy\n'), ((2278, 2306), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (2286, 2306), False, 'from registry_schemas import validate\n'), ((2594, 2623), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_FILING'], {}), '(COMMENT_FILING)\n', (2607, 2623), False, 'import copy\n'), ((2689, 2717), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (2697, 2717), False, 'from registry_schemas import validate\n'), ((2956, 2984), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (2964, 2984), False, 'from registry_schemas import validate\n'), ((3202, 3231), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_FILING'], {}), '(COMMENT_FILING)\n', (3215, 3231), False, 'import copy\n'), ((3300, 3328), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (3308, 3328), False, 'from registry_schemas import validate\n'), ((3543, 3572), 'copy.deepcopy', 'copy.deepcopy', (['COMMENT_FILING'], {}), '(COMMENT_FILING)\n', (3556, 3572), False, 'import copy\n'), ((3638, 3666), 'registry_schemas.validate', 'validate', (['comment', '"""comment"""'], {}), "(comment, 'comment')\n", (3646, 3666), False, 'from registry_schemas import validate\n')]
|
import re
import os
import usb
import time
import json
import queue
import struct
import logging
import datetime
from ctypes import *
from typing import TypeVar, Any, Callable
from .SpectrometerSettings import SpectrometerSettings
from .SpectrometerState import SpectrometerState
from .SpectrometerResponse import SpectrometerResponse
from .SpectrometerRequest import SpectrometerRequest
from .SpectrometerResponse import ErrorLevel
from .InterfaceDevice import InterfaceDevice
from .DeviceID import DeviceID
from .Reading import Reading
log = logging.getLogger(__name__)
class AndorDevice(InterfaceDevice):
"""
This is the basic implementation of our interface with Andor cameras
@todo have check_result return a SpectrometerResponse
##########################################################################
This class adopts the external device interface structure
This involves receiving a request through the handle_request function
A request is processed based on the key in the request
The processing function passes the commands to the requested device
Once it receives a response from the connected device it then passes that
back up the chain
Enlighten Request
|
handle_requests
|
------------
/ / | \ \
{ get_laser status, acquire, set_laser_watchdog, etc....}
\ \ | / /
------------
|
{self.driver.some_andor_sdk_call}
############################################################################
"""
SUCCESS = 20002 #!< see load_error_codes()
SHUTTER_SPEED_MS = 50 #!< empirically determined
def __init__(self, device_id, message_queue=None) -> None:
# if passed a string representation of a DeviceID, deserialize it
super().__init__()
if type(device_id) is str:
device_id = DeviceID(label=device_id)
self.device_id = device_id
self.message_queue = message_queue
self.load_error_codes()
self.connected = False
# Receives ENLIGHTEN's 'change settings' commands in the spectrometer
# process. Although a logical queue, has nothing to do with multiprocessing.
self.command_queue = []
self.immediate_mode = False
self.settings = SpectrometerSettings(self.device_id)
self.summed_spectra = None
self.sum_count = 0
self.session_reading_count = 0
self.take_one = False
self.failure_count = 0
self.dll_fail = True
self.toggle_state = True
self.driver = None
self.process_id = os.getpid()
self.last_memory_check = datetime.datetime.now()
self.last_battery_percentage = 0
self.spec_index = 0
self._scan_averaging = 1
self.dark = None
self.boxcar_half_width = 0
# decide appropriate DLL filename for architecture
arch = 64 if 64 == struct.calcsize("P") * 8 else 32
filename = f"atmcd{arch}d.dll"
# Andor libraries may be found in various locations
dll_paths = [ r"C:\Program Files\Andor Driver Pack 2",
r"C:\Program Files\Andor SDK",
r"dist\Andor",
r"dist" ]
# try to find correct DLL in any known location
for path in dll_paths:
pathname = os.path.join(path, filename)
if os.path.exists(pathname):
try:
log.debug(f"attempting to load {pathname}")
self.driver = cdll.LoadLibrary(pathname)
self.dll_fail = False
except Exception as e:
log.error(f"Error loading {pathname}: {e}")
if self.driver is not None:
break
if self.driver is None:
log.error(f"could not find {filename} in search path: {dll_paths}")
# set Andor defaults for important "EEPROM" settings
# (all but has_cooling can be overridden via config file)
# Andor API doesn't have access to detector info
# Note that we use non-iDus cameras, including the Newton
self.settings.eeprom.detector = "iDus"
self.settings.eeprom.wavelength_coeffs = [0,1,0,0]
self.settings.eeprom.has_cooling = True
self.settings.eeprom.startup_integration_time_ms = 10
self.settings.eeprom.startup_temp_degC = -60
self.process_f = self._init_process_funcs()
###############################################################
# Private Methods
###############################################################
def _init_process_funcs(self) -> dict[str, Callable[..., Any]]:
process_f = {}
process_f["connect"] = self.connect
process_f["acquire_data"] = self.acquire_data
process_f["set_shutter_enable"] = self.set_shutter_enable
process_f["set_integration_time_ms"] = self.set_integration_time_ms
process_f["get_serial_number"] = self.get_serial_number
process_f["init_tec_setpoint"] = self.init_tec_setpoint
process_f["set_tec_setpoint"] = self.set_tec_setpoint
process_f["init_detector_area"] = self.init_detector_area
process_f["scans_to_average"] = self.scans_to_average
##################################################################
# What follows is the old init-lambdas that are squashed into process_f
# Long term, the upstream requests should be changed to match the new format
# This is an easy fix for the time being to make things behave
##################################################################
process_f["integration_time_ms"] = lambda x: self.set_integration_time_ms(x) # conversion from millisec to microsec
process_f["fan_enable"] = lambda x: self.set_fan_enable(bool(x))
process_f["shutter_enable"] = lambda x: self.set_shutter_enable(bool(x))
process_f["detector_tec_enable"] = lambda x: self.toggle_tec(bool(x))
process_f["detector_tec_setpoint_degC"] = lambda x: self.set_tec_setpoint(int(round(x)))
return process_f
def _update_wavelength_coeffs(self, coeffs: list[float]) -> None:
self.settings.eeprom.wavelength_coeffs = coeffs
self.config_values['wavelength_coeffs'] = coeffs
f = open(self.config_file, 'w')
json.dump(self.config_values, f)
def set_fan_enable(self, x: bool) -> SpectrometerResponse:
self.check_result(self.driver.SetFanMode(int(x)), f"Andor Fan On {x}")
return SpectrometerResponse()
def _get_default_data_dir(self) -> str:
if os.name == "nt":
return os.path.join(os.path.expanduser("~"), "Documents", "EnlightenSpectra")
return os.path.join(os.environ["HOME"], "EnlightenSpectra")
def _check_config_file(self) -> bool:
self.config_dir = os.path.join(self._get_default_data_dir(), 'config')
self.config_file = os.path.join(self.config_dir, self.serial + '.json')
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
return os.path.isfile(self.config_file)
def _get_spectrum_raw(self) -> list[float]:
log.debug("requesting spectrum");
#################
# read spectrum
#################
#int[] spec = new int[pixels];
spec_arr = c_long * self.pixels
spec_init_vals = [0] * self.pixels
spec = spec_arr(*spec_init_vals)
# ask for spectrum then collect, NOT multithreaded (though we should look into that!), blocks
#spec = new int[pixels]; //defaults to all zeros
self.driver.StartAcquisition();
self.driver.WaitForAcquisition();
success = self.driver.GetAcquiredData(spec, c_ulong(self.pixels));
if (success != self.SUCCESS):
log.debug(f"getting spectra did not succeed. Received code of {success}. Returning")
return
convertedSpec = [x for x in spec]
#if (self.eeprom.featureMask.invertXAxis):
# convertedSpec.reverse()
log.debug(f"getSpectrumRaw: returning {len(spec)} pixels");
return convertedSpec;
def _take_one_averaged_reading(self) -> SpectrometerResponse:
averaging_enabled = (self.settings.state.scans_to_average > 1)
if averaging_enabled and not self.settings.state.free_running_mode:
# collect the entire averaged spectrum at once (added for
# BatchCollection with laser delay)
#
# So: we're NOT in "free-running" mode, so we're basically being
# slaved to parent process and doing exactly what is requested
# "on command." That means we can perform a big, heavy blocking
# scan average all at once, because they requested it.
self.sum_count = 0
loop_count = self.settings.state.scans_to_average
else:
# we're in free-running mode
loop_count = 1
log.debug("take_one_averaged_reading: loop_count = %d", loop_count)
# either take one measurement (normal), or a bunch (blocking averaging)
reading = None
for loop_index in range(0, loop_count):
# start a new reading
# NOTE: reading.timestamp is when reading STARTED, not FINISHED!
reading = Reading(self.device_id)
if self.settings.eeprom.has_cooling and self.toggle_state:
c_temp = c_int()
result = self.driver.GetTemperature(0,c_temp)
if (self.SUCCESS != result):
log.error(f"unable to read tec temp, result was {result}")
else:
log.debug(f"andor read temperature, value of {c_temp.value}")
reading.detector_temperature_degC = c_temp.value
try:
reading.integration_time_ms = self.settings.state.integration_time_ms
reading.laser_power_perc = self.settings.state.laser_power_perc
reading.laser_power_mW = self.settings.state.laser_power_mW
reading.laser_enabled = self.settings.state.laser_enabled
reading.spectrum = self._get_spectrum_raw()
temperature = c_float()
temp_success = self.driver.GetTemperatureF(byref(temperature))
reading.detector_temperature_degC = temperature.value
except usb.USBError:
self.failure_count += 1
log.error(f"Andor Device: encountered USB error in reading for device {self.device}")
if reading.spectrum is None or reading.spectrum == []:
if self.failure_count > 3:
return SpectrometerResponse(data=False,error_msg="exceeded failure for readings")
if not reading.failure:
if averaging_enabled:
if self.sum_count == 0:
self.summed_spectra = [float(i) for i in reading.spectrum]
else:
log.debug("device.take_one_averaged_reading: summing spectra")
for i in range(len(self.summed_spectra)):
self.summed_spectra[i] += reading.spectrum[i]
self.sum_count += 1
log.debug("device.take_one_averaged_reading: summed_spectra : %s ...", self.summed_spectra[0:9])
# count spectra
self.session_reading_count += 1
reading.session_count = self.session_reading_count
reading.sum_count = self.sum_count
# have we completed the averaged reading?
if averaging_enabled:
if self.sum_count >= self.settings.state.scans_to_average:
reading.spectrum = [ x / self.sum_count for x in self.summed_spectra ]
log.debug("device.take_one_averaged_reading: averaged_spectrum : %s ...", reading.spectrum[0:9])
reading.averaged = True
# reset for next average
self.summed_spectra = None
self.sum_count = 0
else:
# if averaging isn't enabled...then a single reading is the
# "averaged" final measurement (check reading.sum_count to confirm)
reading.averaged = True
# were we told to only take one (potentially averaged) measurement?
if self.take_one and reading.averaged:
log.debug("completed take_one")
self.change_setting("cancel_take_one", True)
log.debug("device.take_one_averaged_reading: returning %s", reading)
if reading.spectrum is not None and reading.spectrum != []:
self.failure_count = 0
# reading.dump_area_scan()
return SpectrometerResponse(data=reading)
def _close_ex_shutter(self) -> SpectrometerResponse:
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 2), "SetShutterEx(2)")
self.settings.state.shutter_enabled = False
return SpectrometerResponse(True)
def _open_ex_shutter(self) -> SpectrometerResponse:
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 1), "SetShutterEx(1)")
self.settings.state.shutter_enabled = True
return SpectrometerResponse(True)
###############################################################
# Public Methods
###############################################################
def check_result(self, result, func):
if result != self.SUCCESS:
name = self.get_error_code(result)
msg = f"error calling {func}: {result} ({name})"
log.error(msg)
raise RuntimeError(msg)
log.debug(f"successfully called {func}")
def connect(self) -> SpectrometerResponse:
if self.dll_fail:
return SpectrometerResponse(data=False,error_lvl=ErrorLevel.high,error_msg="couldn't load Andor dll")
cameraHandle = c_int()
self.check_result(self.driver.GetCameraHandle(self.spec_index, byref(cameraHandle)), "GetCameraHandle")
self.check_result(self.driver.SetCurrentCamera(cameraHandle.value), "SetCurrentCamera")
try:
path_to_ini = create_string_buffer(b'\000' * 256)
self.check_result(self.driver.Initialize(path_to_ini), "Initialize")
except:
log.error("Andor.Initialize failed", exc_info=1)
return SpectrometerResponse(data=False, error_lvl=ErrorLevel.high, error_msg="Andor initialization failed")
self.get_serial_number()
self.init_tec_setpoint()
self.init_detector_area()
if not self._check_config_file():
self.config_values = {
'detector_serial_number': self.serial,
'wavelength_coeffs': [0,1,0,0],
'excitation_nm_float': 0,
}
f = open(self.config_file, 'w')
json.dump(self.config_values, f)
else:
self._load_config_values()
self.check_result(self.driver.CoolerON(), "CoolerON")
self.check_result(self.driver.SetAcquisitionMode(1), "SetAcquisitionMode(single_scan)")
self.check_result(self.driver.SetTriggerMode(0), "SetTriggerMode")
self.check_result(self.driver.SetReadMode(0), "SetReadMode(full_vertical_binning)")
self.init_detector_speed()
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 0), "SetShutterEx(fully automatic external with internal always open)")
self.settings.state.shutter_enabled = True
self.set_integration_time_ms(self.settings.eeprom.startup_integration_time_ms)
# success!
log.info("AndorDevice successfully connected")
self.connected = True
self.settings.eeprom.active_pixels_horizontal = self.pixels
self.settings.eeprom.has_cooling = True
return SpectrometerResponse(data=True)
def _load_config_values(self):
f = open(self.config_file,)
self.config_values = dict(json.load(f))
log.debug(f"loaded {self.config_file}: {self.config_values}")
# alternate spellings (deprecated)
if "wp_serial_number" in self.config_values:
self.settings.eeprom.serial_number = self.config_values['wp_serial_number']
if "wp_model" in self.config_values:
self.settings.eeprom.model = self.config_values['wp_model']
# same spelling
for k in [ 'detector',
'model',
'detector',
'serial_number',
'wavelength_coeffs',
'excitation_nm_float',
'startup_temp_degC',
'startup_integration_time_ms' ]:
if k in self.config_values:
setattr(self.settings.eeprom, k, self.config_values[k])
# post-load initialization
if 'startup_temp_degC' in self.config_values:
self.set_tec_setpoint(self.settings.eeprom.startup_temp_degC)
def acquire_data(self) -> SpectrometerResponse:
reading = self._take_one_averaged_reading()
return reading
def set_shutter_enable(self, enable: bool) -> SpectrometerResponse:
if enable:
return self._open_ex_shutter()
else:
return self._close_ex_shutter()
def set_integration_time_ms(self, ms: float) -> SpectrometerResponse:
self.integration_time_ms = ms
log.debug(f"setting integration time to {self.integration_time_ms}ms")
exposure = c_float()
accumulate = c_float()
kinetic = c_float()
sec = ms / 1000.0
self.check_result(self.driver.SetExposureTime(c_float(sec)), f"SetExposureTime({sec})")
self.check_result(self.driver.GetAcquisitionTimings(byref(exposure), byref(accumulate), byref(kinetic)), "GetAcquisitionTimings")
log.debug(f"read integration time of {exposure.value:.3f}sec (expected {ms}ms)")
return SpectrometerResponse(data=True)
def get_serial_number(self) -> SpectrometerResponse:
sn = c_int()
self.check_result(self.driver.GetCameraSerialNumber(byref(sn)), "GetCameraSerialNumber")
self.serial = f"CCD-{sn.value}"
self.settings.eeprom.serial_number = self.serial
log.debug(f"connected to {self.serial}")
return SpectrometerResponse(True)
def init_tec_setpoint(self) -> SpectrometerResponse:
minTemp = c_int()
maxTemp = c_int()
self.check_result(self.driver.GetTemperatureRange(byref(minTemp), byref(maxTemp)), "GetTemperatureRange")
self.settings.eeprom.max_temp_degC = maxTemp.value
self.settings.eeprom.min_temp_degC = minTemp.value
# commenting-out because Andor camera is reporting -120C for a device
# only rated at -60C...leaving hardcoded default for now
#
# self.settings.eeprom.startup_temp_degC = minTemp.value
# however the startup temperature was set (hardcode, JSON, clamped to min)...apply it
self.setpoint_deg_c = self.settings.eeprom.startup_temp_degC
self.check_result(self.driver.SetTemperature(self.setpoint_deg_c), f"SetTemperature({self.setpoint_deg_c})")
log.debug(f"set TEC to {self.setpoint_deg_c}°C (range {self.settings.eeprom.min_temp_degC}, {self.settings.eeprom.max_temp_degC})")
return SpectrometerResponse(True)
def toggle_tec(self, toggle_state):
c_toggle = c_int(toggle_state)
self.toggle_state = c_toggle.value
if toggle_state:
self.check_result(self.driver.CoolerON(), "CoolerON")
else:
self.check_result(self.driver.CoolerOFF(), "CoolerOFF")
return SpectrometerResponse(True)
def set_tec_setpoint(self, set_temp):
if set_temp < self.settings.eeprom.min_temp_degC or set_temp > self.settings.eeprom.max_temp_degC:
log.error(f"requested temp of {set_temp}, but it is outside range ({self.settings.eeprom.min_temp_degC}C, {self.settings.eeprom.max_temp_degC}C)")
return
if not self.toggle_state:
log.error(f"returning because toggle state is {self.toggle_state}")
return
self.setpoint_deg_c = set_temp
# I don't think CoolerON should need to be called, but I'm not seeing temperature changes
# when it is not present here.
self.check_result(self.driver.CoolerON(), "CoolerON")
self.check_result(self.driver.SetTemperature(self.setpoint_deg_c), f"SetTemperature({self.setpoint_deg_c})")
return SpectrometerResponse(True)
def init_detector_area(self) -> SpectrometerResponse:
xPixels = c_int()
yPixels = c_int()
self.check_result(self.driver.GetDetector(byref(xPixels), byref(yPixels)), "GetDetector(x, y)")
log.debug(f"detector {xPixels.value} width x {yPixels.value} height")
self.pixels = xPixels.value
return SpectrometerResponse(True)
def init_detector_speed(self) -> SpectrometerResponse:
# set vertical to recommended
VSnumber = c_int()
speed = c_float()
self.check_result(self.driver.GetFastestRecommendedVSSpeed(byref(VSnumber), byref(speed)), "GetFastestRecommendedVSSpeed")
self.check_result(self.driver.SetVSSpeed(VSnumber.value), f"SetVSSpeed({VSnumber.value})")
# set horizontal to max
nAD = c_int()
sIndex = c_int()
STemp = 0.0
HSnumber = 0
ADnumber = 0
self.check_result(self.driver.GetNumberADChannels(byref(nAD)), "GetNumberADChannels")
for iAD in range(nAD.value):
self.check_result(self.driver.GetNumberHSSpeeds(iAD, 0, byref(sIndex)), f"GetNumberHSSpeeds({iAD})")
for iSpeed in range(sIndex.value):
self.check_result(self.driver.GetHSSpeed(iAD, 0, iSpeed, byref(speed)), f"GetHSSpeed(iAD {iAD}, iSpeed {iSpeed})")
if speed.value > STemp:
STemp = speed.value
HSnumber = iSpeed
ADnumber = iAD
self.check_result(self.driver.SetADChannel(ADnumber), f"SetADChannel({ADnumber})")
self.check_result(self.driver.SetHSSpeed(0, HSnumber), f"SetHSSpeed({HSnumber})")
log.debug(f"set AD channel {ADnumber} with horizontal speed {HSnumber} ({STemp})")
return SpectrometerResponse(True)
def scans_to_average(self, value: int) -> SpectrometerResponse:
self.sum_count = 0
self.settings.state.scans_to_average = int(value)
return SpectrometerResponse(True)
def get_error_code(self, code):
if code in self.error_codes:
return self.error_codes[code]
return "UNKNOWN_ANDOR_ERROR"
## @see ATMCD32D.H
def load_error_codes(self):
self.error_codes = {
20001: "DRV_ERROR_CODES",
20002: "DRV_SUCCESS",
20003: "DRV_VXDNOTINSTALLED",
20004: "DRV_ERROR_SCAN",
20005: "DRV_ERROR_CHECK_SUM",
20006: "DRV_ERROR_FILELOAD",
20007: "DRV_UNKNOWN_FUNCTION",
20008: "DRV_ERROR_VXD_INIT",
20009: "DRV_ERROR_ADDRESS",
20010: "DRV_ERROR_PAGELOCK",
20011: "DRV_ERROR_PAGEUNLOCK",
20012: "DRV_ERROR_BOARDTEST",
20013: "DRV_ERROR_ACK",
20014: "DRV_ERROR_UP_FIFO",
20015: "DRV_ERROR_PATTERN",
20017: "DRV_ACQUISITION_ERRORS",
20018: "DRV_ACQ_BUFFER",
20019: "DRV_ACQ_DOWNFIFO_FULL",
20020: "DRV_PROC_UNKONWN_INSTRUCTION",
20021: "DRV_ILLEGAL_OP_CODE",
20022: "DRV_KINETIC_TIME_NOT_MET",
20023: "DRV_ACCUM_TIME_NOT_MET",
20024: "DRV_NO_NEW_DATA",
20025: "DRV_PCI_DMA_FAIL",
20026: "DRV_SPOOLERROR",
20027: "DRV_SPOOLSETUPERROR",
20028: "DRV_FILESIZELIMITERROR",
20029: "DRV_ERROR_FILESAVE",
20033: "DRV_TEMPERATURE_CODES",
20034: "DRV_TEMPERATURE_OFF",
20035: "DRV_TEMPERATURE_NOT_STABILIZED",
20036: "DRV_TEMPERATURE_STABILIZED",
20037: "DRV_TEMPERATURE_NOT_REACHED",
20038: "DRV_TEMPERATURE_OUT_RANGE",
20039: "DRV_TEMPERATURE_NOT_SUPPORTED",
20040: "DRV_TEMPERATURE_DRIFT",
20033: "DRV_TEMP_CODES",
20034: "DRV_TEMP_OFF",
20035: "DRV_TEMP_NOT_STABILIZED",
20036: "DRV_TEMP_STABILIZED",
20037: "DRV_TEMP_NOT_REACHED",
20038: "DRV_TEMP_OUT_RANGE",
20039: "DRV_TEMP_NOT_SUPPORTED",
20040: "DRV_TEMP_DRIFT",
20049: "DRV_GENERAL_ERRORS",
20050: "DRV_INVALID_AUX",
20051: "DRV_COF_NOTLOADED",
20052: "DRV_FPGAPROG",
20053: "DRV_FLEXERROR",
20054: "DRV_GPIBERROR",
20055: "DRV_EEPROMVERSIONERROR",
20064: "DRV_DATATYPE",
20065: "DRV_DRIVER_ERRORS",
20066: "DRV_P1INVALID",
20067: "DRV_P2INVALID",
20068: "DRV_P3INVALID",
20069: "DRV_P4INVALID",
20070: "DRV_INIERROR",
20071: "DRV_COFERROR",
20072: "DRV_ACQUIRING",
20073: "DRV_IDLE",
20074: "DRV_TEMPCYCLE",
20075: "DRV_NOT_INITIALIZED",
20076: "DRV_P5INVALID",
20077: "DRV_P6INVALID",
20078: "DRV_INVALID_MODE",
20079: "DRV_INVALID_FILTER",
20080: "DRV_I2CERRORS",
20081: "DRV_I2CDEVNOTFOUND",
20082: "DRV_I2CTIMEOUT",
20083: "DRV_P7INVALID",
20084: "DRV_P8INVALID",
20085: "DRV_P9INVALID",
20086: "DRV_P10INVALID",
20087: "DRV_P11INVALID",
20089: "DRV_USBERROR",
20090: "DRV_IOCERROR",
20091: "DRV_VRMVERSIONERROR",
20092: "DRV_GATESTEPERROR",
20093: "DRV_USB_INTERRUPT_ENDPOINT_ERROR",
20094: "DRV_RANDOM_TRACK_ERROR",
20095: "DRV_INVALID_TRIGGER_MODE",
20096: "DRV_LOAD_FIRMWARE_ERROR",
20097: "DRV_DIVIDE_BY_ZERO_ERROR",
20098: "DRV_INVALID_RINGEXPOSURES",
20099: "DRV_BINNING_ERROR",
20100: "DRV_INVALID_AMPLIFIER",
20101: "DRV_INVALID_COUNTCONVERT_MODE",
20102: "DRV_USB_INTERRUPT_ENDPOINT_TIMEOUT",
20990: "DRV_ERROR_NOCAMERA",
20991: "DRV_NOT_SUPPORTED",
20992: "DRV_NOT_AVAILABLE",
20115: "DRV_ERROR_MAP",
20116: "DRV_ERROR_UNMAP",
20117: "DRV_ERROR_MDL",
20118: "DRV_ERROR_UNMDL",
20119: "DRV_ERROR_BUFFSIZE",
20121: "DRV_ERROR_NOHANDLE",
20130: "DRV_GATING_NOT_AVAILABLE",
20131: "DRV_FPGA_VOLTAGE_ERROR",
20150: "DRV_OW_CMD_FAIL",
20151: "DRV_OWMEMORY_BAD_ADDR",
20152: "DRV_OWCMD_NOT_AVAILABLE",
20153: "DRV_OW_NO_SLAVES",
20154: "DRV_OW_NOT_INITIALIZED",
20155: "DRV_OW_ERROR_SLAVE_NUM",
20156: "DRV_MSTIMINGS_ERROR",
20173: "DRV_OA_NULL_ERROR",
20174: "DRV_OA_PARSE_DTD_ERROR",
20175: "DRV_OA_DTD_VALIDATE_ERROR",
20176: "DRV_OA_FILE_ACCESS_ERROR",
20177: "DRV_OA_FILE_DOES_NOT_EXIST",
20178: "DRV_OA_XML_INVALID_OR_NOT_FOUND_ERROR",
20179: "DRV_OA_PRESET_FILE_NOT_LOADED",
20180: "DRV_OA_USER_FILE_NOT_LOADED",
20181: "DRV_OA_PRESET_AND_USER_FILE_NOT_LOADED",
20182: "DRV_OA_INVALID_FILE",
20183: "DRV_OA_FILE_HAS_BEEN_MODIFIED",
20184: "DRV_OA_BUFFER_FULL",
20185: "DRV_OA_INVALID_STRING_LENGTH",
20186: "DRV_OA_INVALID_CHARS_IN_NAME",
20187: "DRV_OA_INVALID_NAMING",
20188: "DRV_OA_GET_CAMERA_ERROR",
20189: "DRV_OA_MODE_ALREADY_EXISTS",
20190: "DRV_OA_STRINGS_NOT_EQUAL",
20191: "DRV_OA_NO_USER_DATA",
20192: "DRV_OA_VALUE_NOT_SUPPORTED",
20193: "DRV_OA_MODE_DOES_NOT_EXIST",
20194: "DRV_OA_CAMERA_NOT_SUPPORTED",
20195: "DRV_OA_FAILED_TO_GET_MODE",
20196: "DRV_OA_CAMERA_NOT_AVAILABLE",
20211: "DRV_PROCESSING_FAILED"
}
|
[
"os.path.expanduser",
"json.dump",
"json.load",
"os.getpid",
"os.path.join",
"os.makedirs",
"os.path.exists",
"struct.calcsize",
"os.path.isfile",
"datetime.datetime.now",
"logging.getLogger"
] |
[((636, 663), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (653, 663), False, 'import logging\n'), ((3095, 3106), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3104, 3106), False, 'import os\n'), ((3140, 3163), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3161, 3163), False, 'import datetime\n'), ((6894, 6926), 'json.dump', 'json.dump', (['self.config_values', 'f'], {}), '(self.config_values, f)\n', (6903, 6926), False, 'import json\n'), ((7286, 7338), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""EnlightenSpectra"""'], {}), "(os.environ['HOME'], 'EnlightenSpectra')\n", (7298, 7338), False, 'import os\n'), ((7488, 7540), 'os.path.join', 'os.path.join', (['self.config_dir', "(self.serial + '.json')"], {}), "(self.config_dir, self.serial + '.json')\n", (7500, 7540), False, 'import os\n'), ((7645, 7677), 'os.path.isfile', 'os.path.isfile', (['self.config_file'], {}), '(self.config_file)\n', (7659, 7677), False, 'import os\n'), ((3843, 3871), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (3855, 3871), False, 'import os\n'), ((3887, 3911), 'os.path.exists', 'os.path.exists', (['pathname'], {}), '(pathname)\n', (3901, 3911), False, 'import os\n'), ((7556, 7587), 'os.path.exists', 'os.path.exists', (['self.config_dir'], {}), '(self.config_dir)\n', (7570, 7587), False, 'import os\n'), ((7601, 7629), 'os.makedirs', 'os.makedirs', (['self.config_dir'], {}), '(self.config_dir)\n', (7612, 7629), False, 'import os\n'), ((15641, 15673), 'json.dump', 'json.dump', (['self.config_values', 'f'], {}), '(self.config_values, f)\n', (15650, 15673), False, 'import json\n'), ((16781, 16793), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16790, 16793), False, 'import json\n'), ((7213, 7236), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (7231, 7236), False, 'import os\n'), ((3414, 3434), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (3429, 3434), False, 'import struct\n')]
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, \
pandasDF2MD, keyValues2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import from_under
import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu
import itertools
def mann_whitney_test(table, group_by=None, **params):
check_required_parameters(_mann_whitney_test, params, ['table'])
params = get_default_from_parameters_if_required(params, _mann_whitney_test)
if group_by is not None:
return _function_by_group(_mann_whitney_test, table, group_by=group_by, **params)
else:
return _mann_whitney_test(table, **params)
def _mann_whitney_test(table, response_col, factor_col, use_continuity=True):
result = dict()
rb = BrtcReprBuilder()
rb.addMD("""## Mann Whitney test Result""")
groups = dict()
uniq_factor = table[factor_col].unique()
for name in uniq_factor:
groups[name] = np.array(table[response_col])[np.where(table[factor_col] == name)]
for name1, name2 in itertools.combinations(uniq_factor, 2):
stats, pval = mannwhitneyu(groups[name1], groups[name2], use_continuity=use_continuity)
rb.addMD(strip_margin("""
| ## {name1} vs {name2}
|
| ### Statistics U value: {stats}
|
| ### P value: {pval}
""".format(name1=name1, name2=name2, stats=stats, pval=pval)))
name = str(name1) + '_' + str(name2)
result[name] = dict()
result[name]['Statistics'] = stats
result[name]['P value'] = pval
result['_repr_brtc_'] = rb.get()
return {'result': result}
|
[
"brightics.common.utils.get_default_from_parameters_if_required",
"scipy.stats.mannwhitneyu",
"brightics.common.repr.BrtcReprBuilder",
"itertools.combinations",
"numpy.where",
"numpy.array",
"brightics.common.utils.check_required_parameters",
"brightics.common.groupby._function_by_group"
] |
[((1165, 1229), 'brightics.common.utils.check_required_parameters', 'check_required_parameters', (['_mann_whitney_test', 'params', "['table']"], {}), "(_mann_whitney_test, params, ['table'])\n", (1190, 1229), False, 'from brightics.common.utils import check_required_parameters\n'), ((1248, 1315), 'brightics.common.utils.get_default_from_parameters_if_required', 'get_default_from_parameters_if_required', (['params', '_mann_whitney_test'], {}), '(params, _mann_whitney_test)\n', (1287, 1315), False, 'from brightics.common.utils import get_default_from_parameters_if_required\n'), ((1610, 1627), 'brightics.common.repr.BrtcReprBuilder', 'BrtcReprBuilder', ([], {}), '()\n', (1625, 1627), False, 'from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, pandasDF2MD, keyValues2MD\n'), ((1889, 1927), 'itertools.combinations', 'itertools.combinations', (['uniq_factor', '(2)'], {}), '(uniq_factor, 2)\n', (1911, 1927), False, 'import itertools\n'), ((1365, 1439), 'brightics.common.groupby._function_by_group', '_function_by_group', (['_mann_whitney_test', 'table'], {'group_by': 'group_by'}), '(_mann_whitney_test, table, group_by=group_by, **params)\n', (1383, 1439), False, 'from brightics.common.groupby import _function_by_group\n'), ((1951, 2024), 'scipy.stats.mannwhitneyu', 'mannwhitneyu', (['groups[name1]', 'groups[name2]'], {'use_continuity': 'use_continuity'}), '(groups[name1], groups[name2], use_continuity=use_continuity)\n', (1963, 2024), False, 'from scipy.stats import mannwhitneyu\n'), ((1798, 1827), 'numpy.array', 'np.array', (['table[response_col]'], {}), '(table[response_col])\n', (1806, 1827), True, 'import numpy as np\n'), ((1828, 1863), 'numpy.where', 'np.where', (['(table[factor_col] == name)'], {}), '(table[factor_col] == name)\n', (1836, 1863), True, 'import numpy as np\n')]
|
from common.page_object import PageObject, PageNotLoaded
from pages.footer import Footer
from pages.locators import HomePageLocators
from pages.signin_page import SigninPage
from pages.top_bar import TopBarNav
class HomePage(PageObject):
""" Quandl's page object """
def is_loaded(self):
"""A Top Bar Navigation is loaded if the top bar element is visible"""
return self._is_loaded_helper(HomePageLocators.HOME_HERO)
def _check_page(self, page_object, name):
"""A helper method to check if a part of the page has not loaded correctly"""
if not page_object.is_loaded():
raise PageNotLoaded("{} was not loaded on home page".format(name))
@property
def top_bar(self):
"""The top bar navigation page object"""
# check to see if the top bar is loaded, return the page object if it is
top_bar = TopBarNav(self._webdriver)
self._check_page(top_bar, "Top bar navigation")
return top_bar
@property
def footer(self):
"""The footer page object found on the home page"""
# check to see if the footer is visible and return it if so
footer = Footer(self._webdriver)
self._check_page(footer, "Footer")
return footer
def navigate_to_sign_in(self):
self.top_bar.navigate_to_signin()
def sign_in(self, username, password):
"""sign a user in
:param username: username string
:param password: <PASSWORD>
"""
# fill in the form and submit it
self.top_bar.navigate_to_signin()
sign_in = SigninPage(self._webdriver)
sign_in.username = username
sign_in.password = password
sign_in.log_in()
def logout(self):
"""Log out of a user account"""
self.top_bar.logout()
|
[
"pages.footer.Footer",
"pages.top_bar.TopBarNav",
"pages.signin_page.SigninPage"
] |
[((882, 908), 'pages.top_bar.TopBarNav', 'TopBarNav', (['self._webdriver'], {}), '(self._webdriver)\n', (891, 908), False, 'from pages.top_bar import TopBarNav\n'), ((1170, 1193), 'pages.footer.Footer', 'Footer', (['self._webdriver'], {}), '(self._webdriver)\n', (1176, 1193), False, 'from pages.footer import Footer\n'), ((1606, 1633), 'pages.signin_page.SigninPage', 'SigninPage', (['self._webdriver'], {}), '(self._webdriver)\n', (1616, 1633), False, 'from pages.signin_page import SigninPage\n')]
|
"""Unit tests for socket timeout feature."""
import unittest
from test import support
# This requires the 'network' resource as given on the regrtest command line.
skip_expected = not support.is_resource_enabled('network')
import time
import errno
import socket
class CreationTestCase(unittest.TestCase):
"""Test case for socket.gettimeout() and socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), None,
"timeout not disabled by default")
def testFloatReturnValue(self):
# Test return value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(None)
self.assertEqual(self.sock.gettimeout(), None)
def testReturnType(self):
# Test return type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(None)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
class TimeoutTestCase(unittest.TestCase):
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
localhost = '127.0.0.1'
def setUp(self):
raise NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method is run at most `count` times and must raise a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
for i in range(count):
t1 = time.time()
try:
method(*args)
except socket.timeout as e:
delta = time.time() - t1
break
else:
self.fail('socket.timeout was not raised')
# These checks should account for timing unprecision
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
class TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = ('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Choose a private address that is unlikely to exist to prevent
# failures due to the connect succeeding before the timeout.
# Use a dotted IP address to avoid including the DNS lookup time
# with the connect time. This avoids failing the assertion that
# the timeout occurred fast enough.
addr = ('10.0.0.0', 12345)
with support.transient_internet(addr[0]):
self._sock_operation(1, 0.001, 'connect', addr)
def testRecvTimeout(self):
# Test recv() timeout
with support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen(5)
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# The address argument is ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
class UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already in use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
if __name__ == "__main__":
test_main()
|
[
"test.support.requires",
"socket.socket",
"test.support.transient_internet",
"time.time",
"test.support.bind_port",
"test.support.run_unittest",
"test.support.is_resource_enabled"
] |
[((186, 224), 'test.support.is_resource_enabled', 'support.is_resource_enabled', (['"""network"""'], {}), "('network')\n", (213, 224), False, 'from test import support\n'), ((7361, 7388), 'test.support.requires', 'support.requires', (['"""network"""'], {}), "('network')\n", (7377, 7388), False, 'from test import support\n'), ((7393, 7471), 'test.support.run_unittest', 'support.run_unittest', (['CreationTestCase', 'TCPTimeoutTestCase', 'UDPTimeoutTestCase'], {}), '(CreationTestCase, TCPTimeoutTestCase, UDPTimeoutTestCase)\n', (7413, 7471), False, 'from test import support\n'), ((420, 469), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (433, 469), False, 'import socket\n'), ((4498, 4547), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4511, 4547), False, 'import socket\n'), ((5462, 5506), 'test.support.bind_port', 'support.bind_port', (['self.sock', 'self.localhost'], {}), '(self.sock, self.localhost)\n', (5479, 5506), False, 'from test import support\n'), ((6999, 7047), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (7012, 7047), False, 'import socket\n'), ((7238, 7282), 'test.support.bind_port', 'support.bind_port', (['self.sock', 'self.localhost'], {}), '(self.sock, self.localhost)\n', (7255, 7282), False, 'from test import support\n'), ((3956, 3967), 'time.time', 'time.time', ([], {}), '()\n', (3965, 3967), False, 'import time\n'), ((5064, 5099), 'test.support.transient_internet', 'support.transient_internet', (['addr[0]'], {}), '(addr[0])\n', (5090, 5099), False, 'from test import support\n'), ((5236, 5283), 'test.support.transient_internet', 'support.transient_internet', (['self.addr_remote[0]'], {}), '(self.addr_remote[0])\n', (5262, 5283), False, 'from test import support\n'), ((5650, 5699), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5663, 5699), False, 'import socket\n'), ((5721, 5760), 'test.support.bind_port', 'support.bind_port', (['serv', 'self.localhost'], {}), '(serv, self.localhost)\n', (5738, 5760), False, 'from test import support\n'), ((6056, 6105), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (6069, 6105), False, 'import socket\n'), ((6127, 6166), 'test.support.bind_port', 'support.bind_port', (['serv', 'self.localhost'], {}), '(serv, self.localhost)\n', (6144, 6166), False, 'from test import support\n'), ((6513, 6562), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (6526, 6562), False, 'import socket\n'), ((6584, 6623), 'test.support.bind_port', 'support.bind_port', (['serv', 'self.localhost'], {}), '(serv, self.localhost)\n', (6601, 6623), False, 'from test import support\n'), ((4079, 4090), 'time.time', 'time.time', ([], {}), '()\n', (4088, 4090), False, 'import time\n')]
|
import logging
from objective_turk import objective_turk
logger = logging.getLogger(__name__)
EXTERNAL_URL_QUESTION = """<?xml version="1.0"?>
<ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd">
<ExternalURL>{}</ExternalURL>
<FrameHeight>600</FrameHeight>
</ExternalQuestion>
"""
HTML_QUESTION = """<?xml version="1.0"?>
<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">
<HTMLContent><![CDATA[
{}
]]></HTMLContent>
<FrameHeight>0</FrameHeight>
</HTMLQuestion>
"""
def get_external_question(url: str):
"""
Return a Question string for an External URL HIT pointing to the given URL.
"""
return EXTERNAL_URL_QUESTION.format(url)
def get_html_question(html: str):
"""
Return a Question string for an HTMLQuestion with the given content.
"""
return HTML_QUESTION.format(html)
class BuiltinQualificationType:
"""
An Enum for QualificationTypeId constants
https://github.com/nmalkin/mturk-python/blob/master/mturk/mturk.py#L16
"""
P_SUBMITTED = "00000000000000000000"
P_ABANDONED = "00000000000000000070"
P_RETURNED = "000000000000000000E0"
P_APPROVED = "000000000000000000L0"
P_REJECTED = "000000000000000000S0"
N_APPROVED = "00000000000000000040"
LOCALE = "00000000000000000071"
ADULT = "00000000000000000060"
S_MASTERS = "2ARFPLSP75KLA8M8DH1HTEQVJT3SY6"
MASTERS = "2F1QJWKUDD8XADTFD2Q0G6UTO95ALH"
S_CATMASTERS = "2F1KVCNHMVHV8E9PBUB2A4J79LU20F"
CATMASTERS = "2NDP2L92HECWY8NS8H3CK0CP5L9GHO"
S_PHOTOMASTERS = "2TGBB6BFMFFOM08IBMAFGGESC1UWJX"
PHOTOMASTERS = "21VZU98JHSTLZ5BPP4A9NOBJEK3DPG"
MINIMUM_PERCENTAGE_APPROVED = 95
def get_qualifications(exclude: str = None, include: str = None):
qualifications = [
{
'QualificationTypeId': BuiltinQualificationType.LOCALE,
'Comparator': 'EqualTo',
'LocaleValues': [{'Country': 'US'}],
'RequiredToPreview': True,
},
{
'QualificationTypeId': BuiltinQualificationType.P_APPROVED,
'Comparator': 'GreaterThan',
'IntegerValues': [MINIMUM_PERCENTAGE_APPROVED],
'RequiredToPreview': True,
},
]
if exclude is not None:
for qualification_id in exclude:
logging.debug('excluding workers with qualification %s', qualification_id)
qualifications.append(
{
'QualificationTypeId': qualification_id,
'Comparator': 'DoesNotExist',
'RequiredToPreview': True,
}
)
if include is not None:
for qualification_id in include:
logging.debug(
'allowing only workers with qualification %s', qualification_id
)
qualifications.append(
{
'QualificationTypeId': qualification_id,
'Comparator': 'Exists',
'RequiredToPreview': True,
}
)
return qualifications
def create_hit_with_hit_type(**kwargs):
"""
Create HIT using provided HITTypeId.
You still need to pass 'LifetimeInSeconds', 'MaxAssignments', 'Question'.
Full list of valid parameters:
HITTypeId, MaxAssignments, LifetimeInSeconds, Question, RequesterAnnotation, UniqueRequestToken, AssignmentReviewPolicy, HITReviewPolicy, HITLayoutId, HITLayoutParameters
Other fields will be ignored:
Title, Description, Reward, and Keywords
"""
if 'HITTypeId' not in kwargs:
raise ValueError('missing required argument HITTypeId')
elif 'Question' not in kwargs:
raise ValueError('missing required argument Question')
elif 'MaxAssignments' not in kwargs:
raise ValueError('missing required argument MaxAssignments')
hit_type = kwargs['HITTypeId']
logger.info(
'creating HIT using HITTypeId %s. Title, Description, Reward, and Keywords from calling script will be ignored.',
hit_type,
)
response = objective_turk.client().create_hit_with_hit_type(**kwargs)
logger.debug(response)
#pylint: disable=protected-access
return objective_turk.Hit._new_from_response(response['HIT'])
def create_hit(**kwargs):
"""
Create a HIT with the given arguments.
For arguments, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/mturk.html#MTurk.Client.create_hit
"""
response = objective_turk.client().create_hit(**kwargs)
logger.debug(response)
#pylint: disable=protected-access
return objective_turk.Hit._new_from_response(response['HIT'])
|
[
"objective_turk.objective_turk.Hit._new_from_response",
"objective_turk.objective_turk.client",
"logging.debug",
"logging.getLogger"
] |
[((68, 95), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (85, 95), False, 'import logging\n'), ((4303, 4357), 'objective_turk.objective_turk.Hit._new_from_response', 'objective_turk.Hit._new_from_response', (["response['HIT']"], {}), "(response['HIT'])\n", (4340, 4357), False, 'from objective_turk import objective_turk\n'), ((4724, 4778), 'objective_turk.objective_turk.Hit._new_from_response', 'objective_turk.Hit._new_from_response', (["response['HIT']"], {}), "(response['HIT'])\n", (4761, 4778), False, 'from objective_turk import objective_turk\n'), ((2406, 2480), 'logging.debug', 'logging.debug', (['"""excluding workers with qualification %s"""', 'qualification_id'], {}), "('excluding workers with qualification %s', qualification_id)\n", (2419, 2480), False, 'import logging\n'), ((2806, 2884), 'logging.debug', 'logging.debug', (['"""allowing only workers with qualification %s"""', 'qualification_id'], {}), "('allowing only workers with qualification %s', qualification_id)\n", (2819, 2884), False, 'import logging\n'), ((4168, 4191), 'objective_turk.objective_turk.client', 'objective_turk.client', ([], {}), '()\n', (4189, 4191), False, 'from objective_turk import objective_turk\n'), ((4603, 4626), 'objective_turk.objective_turk.client', 'objective_turk.client', ([], {}), '()\n', (4624, 4626), False, 'from objective_turk import objective_turk\n')]
|
from src.templating import Request, url_path, redirect, form, render_template
lang = {
"ru": {
"title": "Редирект",
"route": {
"panel": "Панель управления",
"redirect": "Редирект",
},
"redirect_index": "Редирект на главную",
},
}
async def response(request: Request) -> render_template:
if "redirect" in form():
return redirect(url=url_path("index"))
return await render_template("route/panel/redirect.html", context={
"lc": lang[request.lang],
})
|
[
"src.templating.render_template",
"src.templating.form",
"src.templating.url_path"
] |
[((375, 381), 'src.templating.form', 'form', ([], {}), '()\n', (379, 381), False, 'from src.templating import Request, url_path, redirect, form, render_template\n'), ((447, 532), 'src.templating.render_template', 'render_template', (['"""route/panel/redirect.html"""'], {'context': "{'lc': lang[request.lang]}"}), "('route/panel/redirect.html', context={'lc': lang[request.lang]}\n )\n", (462, 532), False, 'from src.templating import Request, url_path, redirect, form, render_template\n'), ((411, 428), 'src.templating.url_path', 'url_path', (['"""index"""'], {}), "('index')\n", (419, 428), False, 'from src.templating import Request, url_path, redirect, form, render_template\n')]
|
"""
This is the official list of CEA colors to use in plots
"""
import os
import pandas as pd
import yaml
import warnings
import functools
from typing import List, Callable
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
COLORS_TO_RGB = {"red": "rgb(240,75,91)",
"red_light": "rgb(246,148,143)",
"red_lighter": "rgb(252,217,210)",
"blue": "rgb(63,192,194)",
"blue_light": "rgb(171,221,222)",
"blue_lighter": "rgb(225,242,242)",
"yellow": "rgb(255,209,29)",
"yellow_light": "rgb(255,225,133)",
"yellow_lighter": "rgb(255,243,211)",
"brown": "rgb(174,148,72)",
"brown_light": "rgb(201,183,135)",
"brown_lighter": "rgb(233,225,207)",
"purple": "rgb(171,95,127)",
"purple_light": "rgb(198,149,167)",
"purple_lighter": "rgb(231,214,219)",
"green": "rgb(126,199,143)",
"green_light": "rgb(178,219,183)",
"green_lighter": "rgb(227,241,228)",
"grey": "rgb(68,76,83)",
"grey_light": "rgb(126,127,132)",
"black": "rgb(35,31,32)",
"white": "rgb(255,255,255)",
"orange": "rgb(245,131,69)",
"orange_light": "rgb(248,159,109)",
"orange_lighter": "rgb(254,220,198)"}
def color_to_rgb(color):
try:
return COLORS_TO_RGB[color]
except KeyError:
import re
if re.match("rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)", color):
# already an rgb formatted color
return color
return COLORS_TO_RGB["black"]
|
[
"re.match"
] |
[((1784, 1850), 're.match', 're.match', (['"""rgb\\\\(\\\\s*\\\\d+\\\\s*,\\\\s*\\\\d+\\\\s*,\\\\s*\\\\d+\\\\s*\\\\)"""', 'color'], {}), "('rgb\\\\(\\\\s*\\\\d+\\\\s*,\\\\s*\\\\d+\\\\s*,\\\\s*\\\\d+\\\\s*\\\\)', color)\n", (1792, 1850), False, 'import re\n')]
|
import math
import statistics
def fuzzyAnd(m):
"""
fuzzy anding
m = list of membership values to be anded
returns smallest value in the list
"""
return min(m)
FuzzyAnd = fuzzyAnd
def fuzzyOr(m):
"""
fuzzy oring
m = list of membership values to be ored
returns largest value in the list
"""
return max(m)
FuzzyOr = fuzzyOr
def fuzzyNot(x):
"""
fuzzy not
x = single membership value to be noted
returns the inverse membership value
"""
return 1 - x
def compensatoryAnd(m, g=0.5):
"""
anding function
m = list of membership values for x derived from n membership functions
g = gamma value 0=product 1=algebraic sum
returns compensatory AND value of x
"""
g = float(g)
product1 = 1
product2 = 1
for mem in m:
product1 *= mem
product2 *= (1 - mem)
return math.pow(product1, 1 - g) * math.pow((1 - product2), g)
CompensatoryAnd = compensatoryAnd
def gowa(w, wm, l=1.0):
"""
Generalized Ordered Weighted Averaging Operator
More info can be found here:
https://pdfs.semanticscholar.org/2810/c971af0d01d085c799fb2295dc5668d055c8.pdf
l = -1 = Ordered Weighted Harmonic Averaging Operator
l = -.000000000001 = Ordered Weighted Geometric Averaging Operator
l = 1 = Ordered Weighted Arithmetic Averaging Operator
l = 2 = Ordered Weighted Quadratic Averaging Operator
w = list of weights
wm = list of importance weighted membership values
l = lambda real number specifying type of owa to use
returns ordered weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
if l == 0:
raise ZeroDivisionError("Param l cannot be 0. Use -.000000000001 for owg.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * math.pow(wm[i], l)
return math.pow(s, 1/l)
Gowa = gowa
def owa(w, wm):
"""
Ordered Weighted Arithmetic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Arithmetic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered arithmetic weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * wm[i]
return s
Owa = owa
def owg(w, wm):
"""
Ordered Weighted Geometric Averaging Operator
More info can be found here:
ftp://decsai.ugr.es/pub/arai/tech_rep/decision/libroOWG.pdf
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Geometric Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered geometric weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 1
for i in range(len(w)):
s *= math.pow(wm[i], w[i])
return s
Owg = owa
def owh(w, wm):
"""
Ordered Weighted Harmonic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Harmonic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered harmonic weighted average
"""
return gowa(w, wm, -1)
Owh = owh
def owq(w, wm):
"""
Ordered Weighted Quadratic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Quadratic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered quadratic weighted average
"""
return gowa(w, wm, 2)
Owq = owq
def median(wm):
"""
Median Operator
wm = list of importance weighted membership values
returns the middle value in the set
"""
return statistics.median(wm)
Median = median
|
[
"statistics.median",
"math.pow"
] |
[((1999, 2017), 'math.pow', 'math.pow', (['s', '(1 / l)'], {}), '(s, 1 / l)\n', (2007, 2017), False, 'import math\n'), ((4215, 4236), 'statistics.median', 'statistics.median', (['wm'], {}), '(wm)\n', (4232, 4236), False, 'import statistics\n'), ((910, 935), 'math.pow', 'math.pow', (['product1', '(1 - g)'], {}), '(product1, 1 - g)\n', (918, 935), False, 'import math\n'), ((938, 963), 'math.pow', 'math.pow', (['(1 - product2)', 'g'], {}), '(1 - product2, g)\n', (946, 963), False, 'import math\n'), ((3243, 3264), 'math.pow', 'math.pow', (['wm[i]', 'w[i]'], {}), '(wm[i], w[i])\n', (3251, 3264), False, 'import math\n'), ((1968, 1986), 'math.pow', 'math.pow', (['wm[i]', 'l'], {}), '(wm[i], l)\n', (1976, 1986), False, 'import math\n')]
|
import bcrypt
from functools import lru_cache, wraps
import os
import pytest
from pyrsistent import freeze, thaw
import yaml
from app import create_app
from app.config import Config
from app.models import db, BaseModel, User, SiteMetadata
from app.caching import cache
from app.auth import auth_provider
from test.utilities import recursively_update, add_config_to_site_metadata
@pytest.fixture
def test_config():
"""Extra configuration values to be used in a test."""
return {}
def _make_config(config_dict) -> Config:
"""Create a standard testing Config from the given dictionary."""
return Config(
config_dict=config_dict,
use_environment=False,
model=SiteMetadata,
cache=cache,
)
def _freeze_dict_arg(func):
"""Freeze the argument of a function that takes a single dictionary.
This decorator exists purely to get a hashable dictionary that can
be cached with lru_cache, so func should be a function wrapped with
the lru_cache decorator.
"""
@wraps(func)
def inner(arg: dict):
return func(freeze(arg))
return inner
@_freeze_dict_arg
@lru_cache()
def get_app(frozen_config_dict):
"""Create the Flask application, cached by config dictionary."""
config = _make_config(thaw(frozen_config_dict))
app = create_app(config)
return app, config
@pytest.fixture
def app_before_init_db(test_config):
"""Create the Flask app with an uninitialized database."""
db_name = ":memory:"
config_filename = os.environ.get("TEST_CONFIG", None)
if config_filename is None:
custom_config = {}
else:
with open(config_filename) as stream:
custom_config = yaml.safe_load(stream)
# Set some things that make sense for testing.
test_defaults = {
"app": {
"debug": False,
"development": False,
"languages": ["en"],
"testing": True,
},
# TODO set Redis database number to different than dev-server and use Redis here.
"cache": {"type": "simple"},
"database": {"engine": "SqliteDatabase", "name": db_name},
"mail": {
"server": "smtp.example.com",
"port": 8025,
"default_from": "<EMAIL>",
},
"ratelimit": {"enabled": False},
}
config = {}
recursively_update(config, test_defaults)
recursively_update(config, custom_config)
recursively_update(config, test_config)
app, conf_obj = get_app(config)
app_context = app.app_context()
app_context.push()
yield app, conf_obj
app_context.pop()
# The fixture "client" is generated from this one by pytest-flask.
@pytest.fixture
def app(app_before_init_db):
"""Create the Flask app with an intialized database."""
app, conf_obj = app_before_init_db
cache.clear()
if conf_obj.database.engine == "PostgresqlDatabase":
db.execute_sql("DROP SCHEMA public CASCADE;")
db.execute_sql("CREATE SCHEMA public;")
db.execute_sql("GRANT ALL ON SCHEMA public TO public;")
db.create_tables(BaseModel.__subclasses__())
add_config_to_site_metadata(conf_obj)
yield app
if conf_obj.auth.provider != "LOCAL":
for user in User.select():
try:
auth_provider.actually_delete_user(user)
except Exception as err:
print(f"Error trying to clean up {user.name} in Keycloak realm:", err)
raise err
if conf_obj.database.engine == "Sqlitedatabase":
db.detach(conf_obj.database.name)
@pytest.fixture(autouse=True)
def fast_hashing(monkeypatch):
def just_add_salt(data, salt):
assert isinstance(data, bytes)
assert isinstance(salt, bytes)
data = bytearray(data)
data.append(salt[-1])
return bytes(data)
monkeypatch.setattr(bcrypt, "hashpw", just_add_salt)
@pytest.fixture
def user_info():
return dict(
username="supertester", email="<EMAIL>", password="<PASSWORD>"
)
@pytest.fixture
def user2_info():
return dict(
username="administrator", email="<EMAIL>", password="<PASSWORD>###"
)
@pytest.fixture
def user3_info():
return dict(
username="moderator", email="<EMAIL>", password="<PASSWORD>###"
)
|
[
"test.utilities.recursively_update",
"app.auth.auth_provider.actually_delete_user",
"pytest.fixture",
"app.caching.cache.clear",
"app.create_app",
"os.environ.get",
"pyrsistent.thaw",
"app.models.db.detach",
"app.config.Config",
"yaml.safe_load",
"functools.wraps",
"app.models.BaseModel.__subclasses__",
"functools.lru_cache",
"test.utilities.add_config_to_site_metadata",
"app.models.db.execute_sql",
"pyrsistent.freeze",
"app.models.User.select"
] |
[((1142, 1153), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (1151, 1153), False, 'from functools import lru_cache, wraps\n'), ((3591, 3619), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (3605, 3619), False, 'import pytest\n'), ((615, 706), 'app.config.Config', 'Config', ([], {'config_dict': 'config_dict', 'use_environment': '(False)', 'model': 'SiteMetadata', 'cache': 'cache'}), '(config_dict=config_dict, use_environment=False, model=SiteMetadata,\n cache=cache)\n', (621, 706), False, 'from app.config import Config\n'), ((1032, 1043), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1037, 1043), False, 'from functools import lru_cache, wraps\n'), ((1318, 1336), 'app.create_app', 'create_app', (['config'], {}), '(config)\n', (1328, 1336), False, 'from app import create_app\n'), ((1526, 1561), 'os.environ.get', 'os.environ.get', (['"""TEST_CONFIG"""', 'None'], {}), "('TEST_CONFIG', None)\n", (1540, 1561), False, 'import os\n'), ((2352, 2393), 'test.utilities.recursively_update', 'recursively_update', (['config', 'test_defaults'], {}), '(config, test_defaults)\n', (2370, 2393), False, 'from test.utilities import recursively_update, add_config_to_site_metadata\n'), ((2398, 2439), 'test.utilities.recursively_update', 'recursively_update', (['config', 'custom_config'], {}), '(config, custom_config)\n', (2416, 2439), False, 'from test.utilities import recursively_update, add_config_to_site_metadata\n'), ((2444, 2483), 'test.utilities.recursively_update', 'recursively_update', (['config', 'test_config'], {}), '(config, test_config)\n', (2462, 2483), False, 'from test.utilities import recursively_update, add_config_to_site_metadata\n'), ((2845, 2858), 'app.caching.cache.clear', 'cache.clear', ([], {}), '()\n', (2856, 2858), False, 'from app.caching import cache\n'), ((3137, 3174), 'test.utilities.add_config_to_site_metadata', 'add_config_to_site_metadata', (['conf_obj'], {}), '(conf_obj)\n', (3164, 3174), False, 'from test.utilities import recursively_update, add_config_to_site_metadata\n'), ((1282, 1306), 'pyrsistent.thaw', 'thaw', (['frozen_config_dict'], {}), '(frozen_config_dict)\n', (1286, 1306), False, 'from pyrsistent import freeze, thaw\n'), ((2925, 2970), 'app.models.db.execute_sql', 'db.execute_sql', (['"""DROP SCHEMA public CASCADE;"""'], {}), "('DROP SCHEMA public CASCADE;')\n", (2939, 2970), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((2979, 3018), 'app.models.db.execute_sql', 'db.execute_sql', (['"""CREATE SCHEMA public;"""'], {}), "('CREATE SCHEMA public;')\n", (2993, 3018), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((3027, 3082), 'app.models.db.execute_sql', 'db.execute_sql', (['"""GRANT ALL ON SCHEMA public TO public;"""'], {}), "('GRANT ALL ON SCHEMA public TO public;')\n", (3041, 3082), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((3105, 3131), 'app.models.BaseModel.__subclasses__', 'BaseModel.__subclasses__', ([], {}), '()\n', (3129, 3131), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((3253, 3266), 'app.models.User.select', 'User.select', ([], {}), '()\n', (3264, 3266), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((3554, 3587), 'app.models.db.detach', 'db.detach', (['conf_obj.database.name'], {}), '(conf_obj.database.name)\n', (3563, 3587), False, 'from app.models import db, BaseModel, User, SiteMetadata\n'), ((1090, 1101), 'pyrsistent.freeze', 'freeze', (['arg'], {}), '(arg)\n', (1096, 1101), False, 'from pyrsistent import freeze, thaw\n'), ((1705, 1727), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1719, 1727), False, 'import yaml\n'), ((3301, 3341), 'app.auth.auth_provider.actually_delete_user', 'auth_provider.actually_delete_user', (['user'], {}), '(user)\n', (3335, 3341), False, 'from app.auth import auth_provider\n')]
|
from google.appengine.ext import ndb
from protorpc import messages
class Session(ndb.Model):
"""Session -- Session object"""
organizerUserId = ndb.StringProperty()
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty(repeated=True)
speaker = ndb.StringProperty()
duration = ndb.StringProperty()
typeOfSession = ndb.StringProperty(repeated=True)
date = ndb.DateProperty()
startTime = ndb.IntegerProperty()
conferenceKeyBelongTo = ndb.StringProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
organizerUserId = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3, repeated=True)
speaker = messages.StringField(4)
duration = messages.StringField(5)
typeOfSession = messages.EnumField('TypeOfSession', 6, repeated=True)
date = messages.StringField(7)
startTime = messages.StringField(8)
organizerDisplayName = messages.StringField(9)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
class TypeOfSession(messages.Enum):
"""TypeOfSession -- session type enumeration value"""
NOT_SPECIFIED = 1
LECTURE = 2
KEYNOTE = 3
WORKSHOP = 4
DEMO = 5
SOCIAL = 6
class FeaturedSpeaker(messages.Message):
"""FeaturedSpeaker -- Featured speaker info. outbound message"""
speaker = messages.StringField(1)
sessionNames = messages.StringField(2, repeated=True)
class FeaturedSpeakerList(messages.Message):
"""FeaturedSpeakerList
-- multiple Featured speaker info. outbound message"""
items = messages.MessageField(FeaturedSpeaker, 1, repeated=True)
|
[
"protorpc.messages.StringField",
"google.appengine.ext.ndb.IntegerProperty",
"google.appengine.ext.ndb.StringProperty",
"google.appengine.ext.ndb.DateProperty",
"protorpc.messages.MessageField",
"protorpc.messages.EnumField"
] |
[((153, 173), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (171, 173), False, 'from google.appengine.ext import ndb\n'), ((185, 218), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)'}), '(required=True)\n', (203, 218), False, 'from google.appengine.ext import ndb\n'), ((236, 269), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (254, 269), False, 'from google.appengine.ext import ndb\n'), ((284, 304), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (302, 304), False, 'from google.appengine.ext import ndb\n'), ((320, 340), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (338, 340), False, 'from google.appengine.ext import ndb\n'), ((361, 394), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (379, 394), False, 'from google.appengine.ext import ndb\n'), ((406, 424), 'google.appengine.ext.ndb.DateProperty', 'ndb.DateProperty', ([], {}), '()\n', (422, 424), False, 'from google.appengine.ext import ndb\n'), ((441, 462), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (460, 462), False, 'from google.appengine.ext import ndb\n'), ((491, 511), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (509, 511), False, 'from google.appengine.ext import ndb\n'), ((628, 651), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (648, 651), False, 'from protorpc import messages\n'), ((663, 686), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {}), '(2)\n', (683, 686), False, 'from protorpc import messages\n'), ((704, 742), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {'repeated': '(True)'}), '(3, repeated=True)\n', (724, 742), False, 'from protorpc import messages\n'), ((757, 780), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (777, 780), False, 'from protorpc import messages\n'), ((796, 819), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (816, 819), False, 'from protorpc import messages\n'), ((840, 893), 'protorpc.messages.EnumField', 'messages.EnumField', (['"""TypeOfSession"""', '(6)'], {'repeated': '(True)'}), "('TypeOfSession', 6, repeated=True)\n", (858, 893), False, 'from protorpc import messages\n'), ((905, 928), 'protorpc.messages.StringField', 'messages.StringField', (['(7)'], {}), '(7)\n', (925, 928), False, 'from protorpc import messages\n'), ((945, 968), 'protorpc.messages.StringField', 'messages.StringField', (['(8)'], {}), '(8)\n', (965, 968), False, 'from protorpc import messages\n'), ((996, 1019), 'protorpc.messages.StringField', 'messages.StringField', (['(9)'], {}), '(9)\n', (1016, 1019), False, 'from protorpc import messages\n'), ((1137, 1189), 'protorpc.messages.MessageField', 'messages.MessageField', (['SessionForm', '(1)'], {'repeated': '(True)'}), '(SessionForm, 1, repeated=True)\n', (1158, 1189), False, 'from protorpc import messages\n'), ((1311, 1334), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (1331, 1334), False, 'from protorpc import messages\n'), ((1350, 1373), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {}), '(2)\n', (1370, 1373), False, 'from protorpc import messages\n'), ((1386, 1409), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {}), '(3)\n', (1406, 1409), False, 'from protorpc import messages\n'), ((1547, 1604), 'protorpc.messages.MessageField', 'messages.MessageField', (['SessionQueryForm', '(1)'], {'repeated': '(True)'}), '(SessionQueryForm, 1, repeated=True)\n', (1568, 1604), False, 'from protorpc import messages\n'), ((1926, 1949), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (1946, 1949), False, 'from protorpc import messages\n'), ((1969, 2007), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {'repeated': '(True)'}), '(2, repeated=True)\n', (1989, 2007), False, 'from protorpc import messages\n'), ((2158, 2214), 'protorpc.messages.MessageField', 'messages.MessageField', (['FeaturedSpeaker', '(1)'], {'repeated': '(True)'}), '(FeaturedSpeaker, 1, repeated=True)\n', (2179, 2214), False, 'from protorpc import messages\n')]
|
from boc_python_demo import my_sum
def test_my_sum():
assert my_sum(1) == 1
assert my_sum(2) == 2
assert my_sum(3) == 3
assert my_sum(4) == 5
assert my_sum(5) == 8
assert my_sum(6) == 13
|
[
"boc_python_demo.my_sum"
] |
[((67, 76), 'boc_python_demo.my_sum', 'my_sum', (['(1)'], {}), '(1)\n', (73, 76), False, 'from boc_python_demo import my_sum\n'), ((93, 102), 'boc_python_demo.my_sum', 'my_sum', (['(2)'], {}), '(2)\n', (99, 102), False, 'from boc_python_demo import my_sum\n'), ((119, 128), 'boc_python_demo.my_sum', 'my_sum', (['(3)'], {}), '(3)\n', (125, 128), False, 'from boc_python_demo import my_sum\n'), ((145, 154), 'boc_python_demo.my_sum', 'my_sum', (['(4)'], {}), '(4)\n', (151, 154), False, 'from boc_python_demo import my_sum\n'), ((171, 180), 'boc_python_demo.my_sum', 'my_sum', (['(5)'], {}), '(5)\n', (177, 180), False, 'from boc_python_demo import my_sum\n'), ((197, 206), 'boc_python_demo.my_sum', 'my_sum', (['(6)'], {}), '(6)\n', (203, 206), False, 'from boc_python_demo import my_sum\n')]
|
import tensorflow as tf
from capsule.utils import squash
import numpy as np
layers = tf.keras.layers
models = tf.keras.models
class GammaCapsule(tf.keras.Model):
def __init__(self, in_capsules, in_dim, out_capsules, out_dim, stdev=0.2, routing_iterations=2, use_bias=True, name=''):
super(GammaCapsule, self).__init__(name=name)
self.in_capsules = in_capsules
self.in_dim = in_dim
self.out_capsules = out_capsules
self.out_dim = out_dim
self.routing_iterations = routing_iterations
self.use_bias = use_bias
with tf.name_scope(self.name):
w_init = tf.random_normal_initializer(stddev=stdev)
self.W = tf.Variable(name="W", initial_value=w_init(shape=(1, out_capsules, in_capsules, out_dim, in_dim),
dtype='float32'),
trainable=True)
if self.use_bias:
bias_init = tf.constant_initializer(0.1)
self.bias = tf.Variable(name="bias", initial_value=bias_init(shape=(1, out_capsules, out_dim),
dtype='float32'),
trainable=True)
def call(self, u):
"""
param: u - (batch_size, in_caps, in_dim)
"""
batch_size = tf.shape(u)[0]
u_norm = tf.norm(u, axis=-1) # (batch_size, in_caps)
# Reshape u into (batch_size, out_caps, in_caps, out_dim, in_dim)
u = tf.expand_dims(u, 1)
u = tf.expand_dims(u, 3)
u = tf.tile(u, [1, self.out_capsules, 1, 1, 1])
u = tf.tile(u, [1, 1, 1, self.out_dim, 1])
# Duplicate transformation matrix for each batch
w = tf.tile(self.W, [batch_size, 1, 1, 1, 1])
# Dotwise product between u and w to get all votes
# shape = (batch_size, out_caps, in_caps, out_dim)
u_hat = tf.reduce_sum(u * w, axis=-1)
# Ensure that ||u_hat|| <= ||v_i||
u_hat_norm = tf.norm(u_hat, axis=-1, keepdims=True)
u_norm = tf.expand_dims(u_norm, axis=1)
u_norm = tf.expand_dims(u_norm, axis=3)
u_norm = tf.tile(u_norm, [1, self.out_capsules, 1, self.out_dim])
new_u_hat_norm = tf.math.minimum(u_hat_norm, u_norm)
u_hat = u_hat / u_hat_norm * new_u_hat_norm
# Scaled-distance-agreement routing
bias = tf.tile(self.bias, [batch_size, 1, 1])
b_ij = tf.zeros(shape=[batch_size, self.out_capsules, self.in_capsules, 1])
for r in range(self.routing_iterations):
c_ij = tf.nn.softmax(b_ij, axis=1)
c_ij_tiled = tf.tile(c_ij, [1, 1, 1, self.out_dim])
s_j = tf.reduce_sum(c_ij_tiled * u_hat, axis=2) + bias
v_j = squash(s_j)
if(r < self.routing_iterations - 1):
v_j = tf.expand_dims(v_j, 2)
v_j = tf.tile(v_j, [1, 1, self.in_capsules, 1]) # (batch_size, out_caps, in_caps, out_dim)
# Calculate scale factor t
p_p = 0.9
d = tf.norm(v_j - u_hat, axis=-1, keepdims=True)
d_o = tf.reduce_mean(tf.reduce_mean(d))
d_p = d_o * 0.5
t = tf.constant(np.log(p_p * (self.out_capsules - 1)) - np.log(1 - p_p), dtype=tf.float32) \
/ (d_p - d_o + 1e-12)
t = tf.expand_dims(t, axis=-1)
# Calc log prior using inverse distances
b_ij = t * d
return v_j
|
[
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.constant_initializer",
"tensorflow.reduce_mean",
"tensorflow.tile",
"tensorflow.zeros",
"tensorflow.random_normal_initializer",
"tensorflow.shape",
"capsule.utils.squash",
"tensorflow.name_scope",
"tensorflow.norm",
"tensorflow.math.minimum",
"tensorflow.expand_dims"
] |
[((1410, 1429), 'tensorflow.norm', 'tf.norm', (['u'], {'axis': '(-1)'}), '(u, axis=-1)\n', (1417, 1429), True, 'import tensorflow as tf\n'), ((1545, 1565), 'tensorflow.expand_dims', 'tf.expand_dims', (['u', '(1)'], {}), '(u, 1)\n', (1559, 1565), True, 'import tensorflow as tf\n'), ((1579, 1599), 'tensorflow.expand_dims', 'tf.expand_dims', (['u', '(3)'], {}), '(u, 3)\n', (1593, 1599), True, 'import tensorflow as tf\n'), ((1614, 1657), 'tensorflow.tile', 'tf.tile', (['u', '[1, self.out_capsules, 1, 1, 1]'], {}), '(u, [1, self.out_capsules, 1, 1, 1])\n', (1621, 1657), True, 'import tensorflow as tf\n'), ((1670, 1708), 'tensorflow.tile', 'tf.tile', (['u', '[1, 1, 1, self.out_dim, 1]'], {}), '(u, [1, 1, 1, self.out_dim, 1])\n', (1677, 1708), True, 'import tensorflow as tf\n'), ((1779, 1820), 'tensorflow.tile', 'tf.tile', (['self.W', '[batch_size, 1, 1, 1, 1]'], {}), '(self.W, [batch_size, 1, 1, 1, 1])\n', (1786, 1820), True, 'import tensorflow as tf\n'), ((1956, 1985), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(u * w)'], {'axis': '(-1)'}), '(u * w, axis=-1)\n', (1969, 1985), True, 'import tensorflow as tf\n'), ((2051, 2089), 'tensorflow.norm', 'tf.norm', (['u_hat'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u_hat, axis=-1, keepdims=True)\n', (2058, 2089), True, 'import tensorflow as tf\n'), ((2107, 2137), 'tensorflow.expand_dims', 'tf.expand_dims', (['u_norm'], {'axis': '(1)'}), '(u_norm, axis=1)\n', (2121, 2137), True, 'import tensorflow as tf\n'), ((2155, 2185), 'tensorflow.expand_dims', 'tf.expand_dims', (['u_norm'], {'axis': '(3)'}), '(u_norm, axis=3)\n', (2169, 2185), True, 'import tensorflow as tf\n'), ((2203, 2259), 'tensorflow.tile', 'tf.tile', (['u_norm', '[1, self.out_capsules, 1, self.out_dim]'], {}), '(u_norm, [1, self.out_capsules, 1, self.out_dim])\n', (2210, 2259), True, 'import tensorflow as tf\n'), ((2285, 2320), 'tensorflow.math.minimum', 'tf.math.minimum', (['u_hat_norm', 'u_norm'], {}), '(u_hat_norm, u_norm)\n', (2300, 2320), True, 'import tensorflow as tf\n'), ((2433, 2471), 'tensorflow.tile', 'tf.tile', (['self.bias', '[batch_size, 1, 1]'], {}), '(self.bias, [batch_size, 1, 1])\n', (2440, 2471), True, 'import tensorflow as tf\n'), ((2487, 2555), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[batch_size, self.out_capsules, self.in_capsules, 1]'}), '(shape=[batch_size, self.out_capsules, self.in_capsules, 1])\n', (2495, 2555), True, 'import tensorflow as tf\n'), ((587, 611), 'tensorflow.name_scope', 'tf.name_scope', (['self.name'], {}), '(self.name)\n', (600, 611), True, 'import tensorflow as tf\n'), ((634, 676), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stdev'}), '(stddev=stdev)\n', (662, 676), True, 'import tensorflow as tf\n'), ((1378, 1389), 'tensorflow.shape', 'tf.shape', (['u'], {}), '(u)\n', (1386, 1389), True, 'import tensorflow as tf\n'), ((2624, 2651), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['b_ij'], {'axis': '(1)'}), '(b_ij, axis=1)\n', (2637, 2651), True, 'import tensorflow as tf\n'), ((2677, 2715), 'tensorflow.tile', 'tf.tile', (['c_ij', '[1, 1, 1, self.out_dim]'], {}), '(c_ij, [1, 1, 1, self.out_dim])\n', (2684, 2715), True, 'import tensorflow as tf\n'), ((2801, 2812), 'capsule.utils.squash', 'squash', (['s_j'], {}), '(s_j)\n', (2807, 2812), False, 'from capsule.utils import squash\n'), ((981, 1009), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (1004, 1009), True, 'import tensorflow as tf\n'), ((2734, 2775), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(c_ij_tiled * u_hat)'], {'axis': '(2)'}), '(c_ij_tiled * u_hat, axis=2)\n', (2747, 2775), True, 'import tensorflow as tf\n'), ((2885, 2907), 'tensorflow.expand_dims', 'tf.expand_dims', (['v_j', '(2)'], {}), '(v_j, 2)\n', (2899, 2907), True, 'import tensorflow as tf\n'), ((2930, 2971), 'tensorflow.tile', 'tf.tile', (['v_j', '[1, 1, self.in_capsules, 1]'], {}), '(v_j, [1, 1, self.in_capsules, 1])\n', (2937, 2971), True, 'import tensorflow as tf\n'), ((3121, 3165), 'tensorflow.norm', 'tf.norm', (['(v_j - u_hat)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(v_j - u_hat, axis=-1, keepdims=True)\n', (3128, 3165), True, 'import tensorflow as tf\n'), ((3426, 3452), 'tensorflow.expand_dims', 'tf.expand_dims', (['t'], {'axis': '(-1)'}), '(t, axis=-1)\n', (3440, 3452), True, 'import tensorflow as tf\n'), ((3203, 3220), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d'], {}), '(d)\n', (3217, 3220), True, 'import tensorflow as tf\n'), ((3286, 3323), 'numpy.log', 'np.log', (['(p_p * (self.out_capsules - 1))'], {}), '(p_p * (self.out_capsules - 1))\n', (3292, 3323), True, 'import numpy as np\n'), ((3326, 3341), 'numpy.log', 'np.log', (['(1 - p_p)'], {}), '(1 - p_p)\n', (3332, 3341), True, 'import numpy as np\n')]
|
"""
store the current version info of the server.
"""
from jupyter_packaging import get_version_info
# Version string must appear intact for tbump versioning
__version__ = '1.6.2'
version_info = get_version_info(__version__)
|
[
"jupyter_packaging.get_version_info"
] |
[((197, 226), 'jupyter_packaging.get_version_info', 'get_version_info', (['__version__'], {}), '(__version__)\n', (213, 226), False, 'from jupyter_packaging import get_version_info\n')]
|
import pygame, math, time
from enum import Enum
class WeaponType(Enum):
MELEE = 1
LOADABLE = 2
DOUBLE_SHOT = 3
# bazuka, granat, paluch, strzelba
class Weapon(object):
def __init__(self, team, battle, game):
self.team = team
self.owner = team.get_selected_worm()
self.force = 0
self.ammo = -1
self.gravity = game.gravity
self.type = WeaponType.LOADABLE
self.shooting = False
self.bullet_x = int(self.owner.x + int(self.owner.face_right) * self.owner.worm_size[0])
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
self.bullet_current_y = 0
self.t_init_shot = 0
self.t_shot = 10e8
self.bullet_v_vertical = 0
self.bullet_v_horizontal = 0
self.battle = battle
def set_current_owner(self):
"""
Sets current owner of the weapon.
"""
self.owner = self.team.get_selected_worm()
self.bullet_x = int(self.owner.x + self.owner.worm_size[0]/2)
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
def draw(self, screen):
"""
Draws bullet and rectangle of shot force.
"""
if self.type == WeaponType.LOADABLE:
if self.shooting == False and self.force != 0:
pygame.draw.rect(screen, (255,255,255), pygame.Rect(1100, 650, 70, 20), 1)
pygame.draw.rect(screen, (255,255,255), pygame.Rect(1100, 650, int(self.force), 20))
if self.shooting == True:
pygame.draw.circle(screen, (200,20,0), (int(self.bullet_x), int(self.bullet_y)), 4)
def action(self, key, key_event_type):
"""
Makes alien shoot if Spacebar is pressed.
"""
if key == pygame.K_SPACE and self.type == WeaponType.LOADABLE:
self.force += 0.1
if self.force >= 70 or key_event_type == pygame.KEYUP:
self.__shoot()
# if self.battle.get_preparation() == False and self.battle.get_time() <=1:
# self.__shoot()
# self.force = 0
if self.battle.sound:
shooting_sound = pygame.mixer.Sound("shoot.wav")
shooting_sound.play()
self.check_preparation()
def check_preparation(self):
"""
Starts new round if alien starts to shoot during preparation time.
"""
if self.battle.get_preparation() and self.shooting:
self.battle.next_round()
def update(self):
"""
Updates bullet coordinates.
"""
self.__update_bullet_position()
def __shoot(self):
if self.type == WeaponType.LOADABLE and self.shooting == False :
self.shooting = True
self.bullet_x = int(self.owner.x + self.owner.worm_size[0]/2)
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
self.bullet_current_y = self.bullet_y + self.owner.worm_size[1]/2
self.t_init_shot = time.clock()
self.t_shot = self.t_init_shot
angle_radians = math.radians(self.owner.angle)
self.bullet_v_vertical = 20*self.force*math.sin(angle_radians)
if self.owner.face_right:
self.bullet_v_horizontal = 20*self.force*math.cos(angle_radians)
if self.owner.face_right == False:
self.bullet_v_horizontal = -20*self.force*math.cos(angle_radians)
self.force = 0
def __update_bullet_position(self):
"""
Sets bullet coordinates.
Checks if any alien is hit.
Removes alien when its hp = 0.
"""
if self.shooting:
delta = 1/300
if(time.clock()-self.t_shot >= delta):
self.t_shot = time.clock()
v_vert = self.bullet_v_vertical - (self.t_shot - self.t_init_shot) * self.gravity
self.bullet_x = self.bullet_x + self.bullet_v_horizontal*delta
self.bullet_y = self.bullet_y - v_vert*delta
if self.bullet_y >= 700:
self.shooting = False
for worm in self.battle.get_all_worms():
if (worm != self.owner and worm.is_alive and
int(self.bullet_x) in range(int(worm.x), int(worm.x)+worm.worm_size[0]) and int(self.bullet_y) in range(int(worm.y), int(worm.y)+worm.worm_size[1])):
self.shooting = False
worm.update_hp(50)
if worm.is_alive == False:
for team in self.battle.teams:
if worm in team.worms:
team.worms.remove(worm)
if len(team.worms) == 0:
self.battle.teams.remove(team)
if len(self.battle.teams) == 1:
self.battle.show = False
self.battle.end_show = True
|
[
"math.radians",
"pygame.Rect",
"math.sin",
"time.clock",
"math.cos",
"pygame.mixer.Sound"
] |
[((3048, 3060), 'time.clock', 'time.clock', ([], {}), '()\n', (3058, 3060), False, 'import pygame, math, time\n'), ((3132, 3162), 'math.radians', 'math.radians', (['self.owner.angle'], {}), '(self.owner.angle)\n', (3144, 3162), False, 'import pygame, math, time\n'), ((3214, 3237), 'math.sin', 'math.sin', (['angle_radians'], {}), '(angle_radians)\n', (3222, 3237), False, 'import pygame, math, time\n'), ((3815, 3827), 'time.clock', 'time.clock', ([], {}), '()\n', (3825, 3827), False, 'import pygame, math, time\n'), ((1368, 1398), 'pygame.Rect', 'pygame.Rect', (['(1100)', '(650)', '(70)', '(20)'], {}), '(1100, 650, 70, 20)\n', (1379, 1398), False, 'import pygame, math, time\n'), ((2188, 2219), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""shoot.wav"""'], {}), "('shoot.wav')\n", (2206, 2219), False, 'import pygame, math, time\n'), ((3333, 3356), 'math.cos', 'math.cos', (['angle_radians'], {}), '(angle_radians)\n', (3341, 3356), False, 'import pygame, math, time\n'), ((3462, 3485), 'math.cos', 'math.cos', (['angle_radians'], {}), '(angle_radians)\n', (3470, 3485), False, 'import pygame, math, time\n'), ((3749, 3761), 'time.clock', 'time.clock', ([], {}), '()\n', (3759, 3761), False, 'import pygame, math, time\n')]
|
import asyncio
import ssl
import aiohttp
# if sys.version_info >= (3, 5):
# EventLoopType = t.Union[asyncio.BaseEventLoop, asyncio.AbstractEventLoop]
# else:
# EventLoopType = asyncio.AbstractEventLoop
def get_or_create_event_loop() -> asyncio.AbstractEventLoop:
try:
loop = asyncio.get_event_loop()
return loop
except (RuntimeError, AssertionError):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
# noinspection PyUnresolvedReferences
class TCPConnectorMixIn:
# noinspection PyUnresolvedReferences
def get_tcp_connector(self) -> aiohttp.TCPConnector:
if not self._connector_owner:
return self._tcp_connector
# return valid connector
if self._tcp_connector and not self._tcp_connector.closed:
return self._tcp_connector
# create ssl context if no valid connector is present
_ssl = ssl.create_default_context(cafile=self.cafile)
# memoize tcp_connector for reuse
# noinspection PyAttributeOutsideInit
self._tcp_connector = aiohttp.TCPConnector(
loop=self.loop,
ssl=_ssl,
keepalive_timeout=self.keepalive_timeout,
)
return self._tcp_connector
def __del__(self):
"""
Properly close owned connector on exit
:return:
"""
if self._connector_owner:
connector = self.get_tcp_connector()
not connector.closed and connector.close()
IMPORT_EXCEPTION_NAMES = ['ImportError', 'ImproperlyConfigured', 'ModuleNotFoundError']
|
[
"asyncio.get_event_loop",
"asyncio.set_event_loop",
"ssl.create_default_context",
"aiohttp.TCPConnector",
"asyncio.new_event_loop"
] |
[((299, 323), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (321, 323), False, 'import asyncio\n'), ((943, 989), 'ssl.create_default_context', 'ssl.create_default_context', ([], {'cafile': 'self.cafile'}), '(cafile=self.cafile)\n', (969, 989), False, 'import ssl\n'), ((1109, 1202), 'aiohttp.TCPConnector', 'aiohttp.TCPConnector', ([], {'loop': 'self.loop', 'ssl': '_ssl', 'keepalive_timeout': 'self.keepalive_timeout'}), '(loop=self.loop, ssl=_ssl, keepalive_timeout=self.\n keepalive_timeout)\n', (1129, 1202), False, 'import aiohttp\n'), ((402, 426), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (424, 426), False, 'import asyncio\n'), ((435, 463), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (457, 463), False, 'import asyncio\n')]
|
'''
This is the central location for driving the other modules. It should primarily
contain seasons and SCVL specific location.
'''
import facility
from optimizer import make_schedule, save_schedules
from optimizer import make_round_robin_game, get_default_potential_sch_loc
import datetime
from facility import SCVL_Facility_Day
from facility import Facility
from pprint import pprint
def make_round_robin_schedule(sch_template_path, team_counts):
canned_path = 'test/scratch/'
total_schedules = 1 # 12000
summary, schedules = make_round_robin_game(team_counts, sch_template_path, total_schedules)
choosing_a_winner = True
if choosing_a_winner:
preferred_winner = 'min hour and no-sit'
sch = summary[preferred_winner]['sch']
print(sch.get_audit_text())
current_sum = summary[preferred_winner]
make_final_report = True
if make_final_report:
file_path = 'scratch/{}_round_robin_sch.csv'.format(datetime.date.today())
sch.gen_csv(file_path)
print(sch.get_team_round_robin_audit())
print('''\n\n\n\nThe final schedule has these properties:
{}
was seed {} and looks like this:
{}'''.format(current_sum['team_sit_report'], current_sum['seed'], sch))
def make_2018_spring_round_robin_schedule():
dir_name = '2018-1-spring'
file_name = 'round_robin_input_template_maker_2018_1_spring.csv - machine_version.csv'
sch_template_path = 'inputs/{}/{}'.format(dir_name, file_name)
team_counts = [6, 10, 13, 11, 4]
make_round_robin_schedule(sch_template_path, team_counts)
if __name__ == '__main__':
make_2018_spring_round_robin_schedule()
#make_regular_season_fall_2016()
#make_regular_season_spring_2017()
|
[
"datetime.date.today",
"optimizer.make_round_robin_game"
] |
[((543, 613), 'optimizer.make_round_robin_game', 'make_round_robin_game', (['team_counts', 'sch_template_path', 'total_schedules'], {}), '(team_counts, sch_template_path, total_schedules)\n', (564, 613), False, 'from optimizer import make_round_robin_game, get_default_potential_sch_loc\n'), ((976, 997), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (995, 997), False, 'import datetime\n')]
|
# code-checked
# server-checked
import cv2
import numpy as np
import os
import os.path as osp
import random
import torch
from torch.utils import data
import pickle
def generate_scale_label(image, label):
f_scale = 0.5 + random.randint(0, 16)/10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(label, id_to_trainid):
label_copy = label.copy()
for k, v in id_to_trainid.items():
label_copy[label == k] = v
return label_copy
################################################################################
# Cityscapes
################################################################################
class DatasetCityscapesAugmentation(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetCityscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEval(data.Dataset):
def __init__(self, root, list_path, ignore_label=255):
self.root = root
self.list_path = list_path
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEvalSeq(data.Dataset):
def __init__(self, data_path, sequence="00"):
self.data_path = data_path
self.img_dir = self.data_path + "/leftImg8bit/demoVideo/stuttgart_" + sequence + "/"
self.examples = []
file_names = os.listdir(self.img_dir)
for file_name in file_names:
img_id = file_name.split("_leftImg8bit.png")[0]
img_path = self.img_dir + file_name
example = {}
example["img_path"] = img_path
example["img_id"] = img_id
self.examples.append(example)
self.num_examples = len(self.examples)
print ("DatasetCityscapesEvalSeq - num examples: %d" % self.num_examples)
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
datafiles = self.examples[index]
image = cv2.imread(datafiles["img_path"], cv2.IMREAD_COLOR)
size = image.shape
name = datafiles["img_id"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
################################################################################
# Synscapes
################################################################################
class DatasetSynscapesAugmentation(data.Dataset):
def __init__(self, root, root_meta, type="train", max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.root_meta = root_meta
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetSynscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetSynscapesEval(data.Dataset):
def __init__(self, root, root_meta, type="val", ignore_label=255):
self.root = root
self.root_meta = root_meta
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
|
[
"random.randint",
"os.path.basename",
"numpy.asarray",
"cv2.copyMakeBorder",
"os.path.exists",
"cv2.imread",
"pickle.load",
"numpy.array",
"numpy.random.choice",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((266, 345), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': 'f_scale', 'fy': 'f_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)\n', (276, 345), False, 'import cv2\n'), ((358, 443), 'cv2.resize', 'cv2.resize', (['label', 'None'], {'fx': 'f_scale', 'fy': 'f_scale', 'interpolation': 'cv2.INTER_NEAREST'}), '(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST\n )\n', (368, 443), False, 'import cv2\n'), ((2677, 2723), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (2687, 2723), False, 'import cv2\n'), ((2740, 2792), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (2750, 2792), False, 'import cv2\n'), ((2983, 3012), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (2993, 3012), True, 'import numpy as np\n'), ((3684, 3722), 'random.randint', 'random.randint', (['(0)', '(img_h - self.crop_h)'], {}), '(0, img_h - self.crop_h)\n', (3698, 3722), False, 'import random\n'), ((3739, 3777), 'random.randint', 'random.randint', (['(0)', '(img_w - self.crop_w)'], {}), '(0, img_w - self.crop_w)\n', (3753, 3777), False, 'import random\n'), ((3794, 3883), 'numpy.asarray', 'np.asarray', (['img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (3804, 3883), True, 'import numpy as np\n'), ((3896, 3987), 'numpy.asarray', 'np.asarray', (['label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (3906, 3987), True, 'import numpy as np\n'), ((5755, 5801), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (5765, 5801), False, 'import cv2\n'), ((5818, 5870), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (5828, 5870), False, 'import cv2\n'), ((6132, 6161), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (6142, 6161), True, 'import numpy as np\n'), ((6648, 6672), 'os.listdir', 'os.listdir', (['self.img_dir'], {}), '(self.img_dir)\n', (6658, 6672), False, 'import os\n'), ((7249, 7300), 'cv2.imread', 'cv2.imread', (["datafiles['img_path']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img_path'], cv2.IMREAD_COLOR)\n", (7259, 7300), False, 'import cv2\n'), ((7379, 7408), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (7389, 7408), True, 'import numpy as np\n'), ((9879, 9925), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (9889, 9925), False, 'import cv2\n'), ((9942, 9994), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (9952, 9994), False, 'import cv2\n'), ((10313, 10342), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (10323, 10342), True, 'import numpy as np\n'), ((11014, 11052), 'random.randint', 'random.randint', (['(0)', '(img_h - self.crop_h)'], {}), '(0, img_h - self.crop_h)\n', (11028, 11052), False, 'import random\n'), ((11069, 11107), 'random.randint', 'random.randint', (['(0)', '(img_w - self.crop_w)'], {}), '(0, img_w - self.crop_w)\n', (11083, 11107), False, 'import random\n'), ((11124, 11213), 'numpy.asarray', 'np.asarray', (['img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (11134, 11213), True, 'import numpy as np\n'), ((11226, 11317), 'numpy.asarray', 'np.asarray', (['label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (11236, 11317), True, 'import numpy as np\n'), ((13305, 13351), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (13315, 13351), False, 'import cv2\n'), ((13368, 13420), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (13378, 13420), False, 'import cv2\n'), ((13682, 13711), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (13692, 13711), True, 'import numpy as np\n'), ((227, 248), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (241, 248), False, 'import random\n'), ((1668, 1699), 'os.path.join', 'osp.join', (['self.root', 'image_path'], {}), '(self.root, image_path)\n', (1676, 1699), True, 'import os.path as osp\n'), ((1725, 1756), 'os.path.join', 'osp.join', (['self.root', 'label_path'], {}), '(self.root, label_path)\n', (1733, 1756), True, 'import os.path as osp\n'), ((3295, 3389), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(0.0, 0.0, 0.0)'}), '(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n 0.0, 0.0, 0.0))\n', (3313, 3389), False, 'import cv2\n'), ((3441, 3540), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['label', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(self.ignore_label,)'}), '(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n self.ignore_label,))\n', (3459, 3540), False, 'import cv2\n'), ((4182, 4196), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (4190, 4196), True, 'import numpy as np\n'), ((4746, 4777), 'os.path.join', 'osp.join', (['self.root', 'image_path'], {}), '(self.root, image_path)\n', (4754, 4777), True, 'import os.path as osp\n'), ((4803, 4834), 'os.path.join', 'osp.join', (['self.root', 'label_path'], {}), '(self.root, label_path)\n', (4811, 4834), True, 'import os.path as osp\n'), ((5887, 5919), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (5901, 5919), False, 'import os\n'), ((6351, 6365), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (6359, 6365), True, 'import numpy as np\n'), ((7584, 7598), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (7592, 7598), True, 'import numpy as np\n'), ((10011, 10043), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (10025, 10043), False, 'import os\n'), ((10625, 10719), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(0.0, 0.0, 0.0)'}), '(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n 0.0, 0.0, 0.0))\n', (10643, 10719), False, 'import cv2\n'), ((10771, 10870), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['label', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(self.ignore_label,)'}), '(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n self.ignore_label,))\n', (10789, 10870), False, 'import cv2\n'), ((11512, 11526), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (11520, 11526), True, 'import numpy as np\n'), ((13437, 13469), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (13451, 13469), False, 'import os\n'), ((13901, 13915), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (13909, 13915), True, 'import numpy as np\n'), ((4043, 4062), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (4059, 4062), True, 'import numpy as np\n'), ((8246, 8263), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8257, 8263), False, 'import pickle\n'), ((11373, 11392), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (11389, 11392), True, 'import numpy as np\n'), ((11902, 11919), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11913, 11919), False, 'import pickle\n'), ((1616, 1640), 'os.path.basename', 'osp.basename', (['label_path'], {}), '(label_path)\n', (1628, 1640), True, 'import os.path as osp\n'), ((4694, 4718), 'os.path.basename', 'osp.basename', (['label_path'], {}), '(label_path)\n', (4706, 4718), True, 'import os.path as osp\n'), ((8415, 8432), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8426, 8432), False, 'import pickle\n'), ((12071, 12088), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (12082, 12088), False, 'import pickle\n')]
|
import search
from math import(cos, pi)
stl_map = search.UndirectedGraph(dict(
Kirkwood=dict(Webster=10, Clayton=17, MapleWood=17, Oakland=5, Glendale=7,),
St_Louis=dict(Clayton=12),
Glendale=dict(St_Louis=19),
Oakland=dict(Glendale=4),
MapleWood=dict(St_Louis=11),
Clayton=dict(Webster=14, St_Louis=12, Kirkwood=17),
Webster=dict(Kirkwood=10, Clayton=14, MapleWood=8),
))
stl_map.locations = dict(
St_Louis=(38.6270, 90.1994),Webster=(38.5926, 90.3573),Kirkwood=(38.5834, 90.4068),
Glendale=(38.5959, 90.3771), MapleWood=(38.6104, 90.3228), Clayton=(38.6426, 90.3237),
Oakland=(38.5764, 90.3856),
)
stl_puzzle = search.GraphProblem('Kirkwood', 'St_Louis', stl_map)
stl_puzzle1 = search.GraphProblem('Oakland', 'Webster', stl_map)
stl_puzzle2 = search.GraphProblem('MapleWood', 'Oakland', stl_map)
stl_puzzle.description = '''
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
class LightSwitch(search.Problem):
game_state = [[0,2],[0,3],[0,4],[1,2],[1,3],[1,4],]
def actions(self, state):
return ['jump up', 'jump down','jump left', 'jump right']
def result(self, state, action):
if action == 'jump up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
myPuzzles = [
stl_puzzle,
stl_puzzle1,
stl_puzzle2,
switch_puzzle,
]
|
[
"search.GraphProblem"
] |
[((655, 707), 'search.GraphProblem', 'search.GraphProblem', (['"""Kirkwood"""', '"""St_Louis"""', 'stl_map'], {}), "('Kirkwood', 'St_Louis', stl_map)\n", (674, 707), False, 'import search\n'), ((722, 772), 'search.GraphProblem', 'search.GraphProblem', (['"""Oakland"""', '"""Webster"""', 'stl_map'], {}), "('Oakland', 'Webster', stl_map)\n", (741, 772), False, 'import search\n'), ((787, 839), 'search.GraphProblem', 'search.GraphProblem', (['"""MapleWood"""', '"""Oakland"""', 'stl_map'], {}), "('MapleWood', 'Oakland', stl_map)\n", (806, 839), False, 'import search\n')]
|
import argparse
from etoLib.log_logger import log_make_logger
from etoLib.s3_func import s3_hello
from etoLib.util_func import unique
from etoLib.util_func import grepfxn
def get_parser():
parser = argparse.ArgumentParser(description='Run the eto code')
parser.add_argument('tile', metavar='TILE', type=str, nargs='*',
help='the tile to process - example: 40N-80E')
parser.add_argument('-c', '--configdir', help='specify and alternate config_dict dir example: -c sample_config ', default='./sample_config', type=str)
parser.add_argument('-o', '--optimize', help='optimize caching on ', default='yes', type=str)
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['configdir']:
print("configdir", args['configdir'])
optimize = False
opt = args['optimize']
config_directory = args['configdir']
log.info('USing configdir {}'.format(config_directory))
log.info('this is just a starter kit for our cmdline api for eto - Help Greg!')
log.info('or logging agents and logging backends ... docker deployments')
# RUN the class Veget
#myveg = VegET(config_directory, tile, shp, optimize)
#myveg.run_veg_et()
log.info('this is how you call one of your functions')
s3_hello('Greg')
if __name__ == '__main__':
log = log_make_logger('THE_ETO_CREATOR')
command_line_runner()
|
[
"etoLib.log_logger.log_make_logger",
"argparse.ArgumentParser",
"etoLib.s3_func.s3_hello"
] |
[((206, 261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the eto code"""'}), "(description='Run the eto code')\n", (229, 261), False, 'import argparse\n'), ((1315, 1331), 'etoLib.s3_func.s3_hello', 's3_hello', (['"""Greg"""'], {}), "('Greg')\n", (1323, 1331), False, 'from etoLib.s3_func import s3_hello\n'), ((1371, 1405), 'etoLib.log_logger.log_make_logger', 'log_make_logger', (['"""THE_ETO_CREATOR"""'], {}), "('THE_ETO_CREATOR')\n", (1386, 1405), False, 'from etoLib.log_logger import log_make_logger\n')]
|
"""Utility functions for file manipulation"""
import logging
import os
import shutil
import sys
import urllib.error
import urllib.request
import zipfile
def download_file(source, dest, verbose=False, overwrite=None):
"""Get a file from a url and save it locally"""
if verbose:
print(f"Downloading {source} to {dest}")
if os.path.exists(dest):
if overwrite is None:
if verbose:
logging.warning(f"WARNING: {dest} already exists, not downloading")
return
if not overwrite:
raise OSError(f"{dest} already exists")
try:
urllib.request.urlretrieve(source, dest)
except urllib.error.HTTPError as e:
print(f"Url {source} does not exist", file=sys.stderr)
raise e
def make_directory(path, overwrite=None, verbose=False):
"""Convenience function to create a directory and handle cases where
it already exists.
Args
----
path: str
The path of the directory to create
overwrite: boolean or None
If the path already exists, if overwrite is: True - delete the
existing path; False - return error; None - leave the existing
path as it is and throw a warning
verbose: bool
Verbosity of printing
"""
if verbose:
print(f"Making directory at {path}")
mkdir = os.makedirs
try:
mkdir(path)
except FileExistsError as e:
if overwrite is True:
if verbose:
print(f"Deleting existing directory: {path}")
shutil.rmtree(path)
mkdir(path)
elif overwrite is None:
if verbose:
logging.warning(
f"WARNING: {path} already exists, writing "
"files only if they do not already exist.",
)
elif overwrite is False:
raise e
else:
raise ValueError(
"overwrite should be boolean or None, not " f'"{overwrite}"'
)
def extract_zip(zip_path, out_path, overwrite=None, verbose=False):
"""Convenience function to extract zip file to out_path."""
if verbose:
print(f"Extracting {zip_path} to {out_path}")
dirname = os.path.splitext(os.path.basename(zip_path))[0]
extracted_path = os.path.join(out_path, dirname)
if os.path.exists(extracted_path):
if overwrite is True:
if verbose:
logging.warning("Deleting existing directory: " f"{extracted_path}")
shutil.rmtree(extracted_path)
elif overwrite is None:
if verbose:
logging.warning(
f"{extracted_path} already exists. Assuming "
"this zip has already been extracted, not "
"extracting.",
)
return extracted_path
elif overwrite is False:
raise FileExistsError(f"{extracted_path} already exists")
with zipfile.ZipFile(zip_path, "r") as zz:
zz.extractall(path=out_path)
return extracted_path
def copy_file(filepath, output_path, overwrite=None, mkdir=False):
"""Convenience function to copy a file from filepath to output_path."""
path = os.path.join(output_path, os.path.basename(filepath))
if os.path.exists(path):
if overwrite is True:
shutil.copy(filepath, output_path)
elif overwrite is None:
return
elif overwrite is False:
raise FileExistsError(f"{path} already exists")
else:
shutil.copy(filepath, output_path)
|
[
"zipfile.ZipFile",
"os.path.basename",
"logging.warning",
"os.path.exists",
"shutil.rmtree",
"os.path.join",
"shutil.copy"
] |
[((343, 363), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (357, 363), False, 'import os\n'), ((2358, 2389), 'os.path.join', 'os.path.join', (['out_path', 'dirname'], {}), '(out_path, dirname)\n', (2370, 2389), False, 'import os\n'), ((2397, 2427), 'os.path.exists', 'os.path.exists', (['extracted_path'], {}), '(extracted_path)\n', (2411, 2427), False, 'import os\n'), ((3347, 3367), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3361, 3367), False, 'import os\n'), ((3029, 3059), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (3044, 3059), False, 'import zipfile\n'), ((3312, 3338), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (3328, 3338), False, 'import os\n'), ((3608, 3642), 'shutil.copy', 'shutil.copy', (['filepath', 'output_path'], {}), '(filepath, output_path)\n', (3619, 3642), False, 'import shutil\n'), ((2306, 2332), 'os.path.basename', 'os.path.basename', (['zip_path'], {}), '(zip_path)\n', (2322, 2332), False, 'import os\n'), ((2580, 2609), 'shutil.rmtree', 'shutil.rmtree', (['extracted_path'], {}), '(extracted_path)\n', (2593, 2609), False, 'import shutil\n'), ((3411, 3445), 'shutil.copy', 'shutil.copy', (['filepath', 'output_path'], {}), '(filepath, output_path)\n', (3422, 3445), False, 'import shutil\n'), ((435, 502), 'logging.warning', 'logging.warning', (['f"""WARNING: {dest} already exists, not downloading"""'], {}), "(f'WARNING: {dest} already exists, not downloading')\n", (450, 502), False, 'import logging\n'), ((1604, 1623), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (1617, 1623), False, 'import shutil\n'), ((2499, 2564), 'logging.warning', 'logging.warning', (['f"""Deleting existing directory: {extracted_path}"""'], {}), "(f'Deleting existing directory: {extracted_path}')\n", (2514, 2564), False, 'import logging\n'), ((2682, 2806), 'logging.warning', 'logging.warning', (['f"""{extracted_path} already exists. Assuming this zip has already been extracted, not extracting."""'], {}), "(\n f'{extracted_path} already exists. Assuming this zip has already been extracted, not extracting.'\n )\n", (2697, 2806), False, 'import logging\n'), ((1720, 1830), 'logging.warning', 'logging.warning', (['f"""WARNING: {path} already exists, writing files only if they do not already exist."""'], {}), "(\n f'WARNING: {path} already exists, writing files only if they do not already exist.'\n )\n", (1735, 1830), False, 'import logging\n')]
|
#!/usr/bin/env python
import sys
import os
import time
import json
import golfir.model
import golfir.utils
import yaml
def run(root, argv=[]):
#ds9 = None
defaults = {'ds9': None,
'patch_arcmin': 1.0, # Size of patch to fit
'patch_overlap': 0.2, # Overlap of automatic patches
'mag_limit': [24, 27], # Two-pass modeling. Fit sources brighter than mag_limit in HST catalog
'run_alignment': True, # Run fine alignment between IRAC-HST, in between two steps of `mag_limit`
'galfit_flux_limit': -50, # Brightness limit (uJy) of objects to fit with GALFIT. Or S/N if negative
'refine_brightest': True, # Refine masked bright objects with galfit
'any_limit': 16, # Brightness limit below which to mask *any* sources
'point_limit': 16, # Brightness limit below which to mask point-like sources
'bright_sn': 30, # S/N threshold for masked pixels of bright object
'bkg_kwargs': {'order_npix': 32}, # Arguments to the local background routine
'channels': ['ch1', 'ch2'], # Channels to try
'psf_only': False,
'use_saved_components': False, # Use models from a "components" file if found
'window': None, # PSF-match windowing
'fetch': True,
'PATH': '/GrizliImaging/',
'use_patches': True,
'sync_results': True,
'clean_PATH': True,
'skip_if_exists': True}
print('xxx', defaults)
defaults['patch_arcmin'] = -1
args, kwargs = golfir.utils.argv_to_dict(argv, defaults=defaults)
print('xxx', kwargs)
run_dir = os.path.join(kwargs['PATH'], root)
if os.path.exists(run_dir) & kwargs['skip_if_exists']:
print('directory {0} exists'.format(run_dir))
return True
if not os.path.exists(run_dir):
os.mkdir(run_dir)
if os.path.exists('/tmp/{0}.finished.txt'.format(root)):
print('/tmp/{0}.finished.txt'.format(root))
return True
with open(os.path.join(run_dir, root + '.golfir.yml'), 'w') as fp:
yaml.dump(kwargs, fp)
if isinstance(kwargs['ds9'], str):
if kwargs['ds9'] == 'connect':
target = 'DS9:*'
else:
target = kwargs['ds9']
import grizli.ds9
print('Use DS9: ', target)
kwargs['ds9'] = grizli.ds9.DS9(target=target)
golfir.model.run_all_patches(root, **kwargs)
if kwargs['clean_PATH']:
os.chdir(kwargs['PATH'])
os.system(f'rm -rf ./{root}')
fp = open(f'/tmp/{root}.finished.txt','w')
fp.write(time.ctime())
fp.close()
return True
if __name__ == '__main__':
root = sys.argv[1]
print('xxx run', root, sys.argv[1:])
run(root, argv=sys.argv[1:])
|
[
"os.mkdir",
"yaml.dump",
"os.path.exists",
"os.system",
"time.ctime",
"os.path.join",
"os.chdir"
] |
[((1846, 1880), 'os.path.join', 'os.path.join', (["kwargs['PATH']", 'root'], {}), "(kwargs['PATH'], root)\n", (1858, 1880), False, 'import os\n'), ((1888, 1911), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (1902, 1911), False, 'import os\n'), ((2026, 2049), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (2040, 2049), False, 'import os\n'), ((2059, 2076), 'os.mkdir', 'os.mkdir', (['run_dir'], {}), '(run_dir)\n', (2067, 2076), False, 'import os\n'), ((2303, 2324), 'yaml.dump', 'yaml.dump', (['kwargs', 'fp'], {}), '(kwargs, fp)\n', (2312, 2324), False, 'import yaml\n'), ((2727, 2751), 'os.chdir', 'os.chdir', (["kwargs['PATH']"], {}), "(kwargs['PATH'])\n", (2735, 2751), False, 'import os\n'), ((2760, 2789), 'os.system', 'os.system', (['f"""rm -rf ./{root}"""'], {}), "(f'rm -rf ./{root}')\n", (2769, 2789), False, 'import os\n'), ((2855, 2867), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2865, 2867), False, 'import time\n'), ((2238, 2281), 'os.path.join', 'os.path.join', (['run_dir', "(root + '.golfir.yml')"], {}), "(run_dir, root + '.golfir.yml')\n", (2250, 2281), False, 'import os\n')]
|
import re
from setuptools import setup
with open('wumpus/__init__.py') as f:
contents = f.read()
try:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', contents, re.M
).group(1)
except AttributeError:
raise RuntimeError('Could not identify version') from None
# look at this boilerplate code
try:
author = re.search(
r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]', contents, re.M
).group(1)
except AttributeError:
author = 'jay3332'
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('requirements.txt', encoding='utf-8') as f:
requirements = f.readlines()
setup(
name='wumpus.py',
author=author,
url='https://github.com/jay3332/wumpus.py',
project_urls={
"Issue tracker": "https://github.com/jay3332/wumpus.py/issues",
"Discord": "https://discord.gg/FqtZ6akWpd"
},
version='0.0.0', # version (Reserve 0.1.0 for the finished release)
packages=[
'wumpus',
'wumpus.core',
'wumpus.models',
'wumpus.typings'
],
license='MIT',
description="An asynchronous wrapper around Discord's API.",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=requirements,
extras_require={
'docs': [
'sphinx>=4.1.1',
'furo',
],
'performance': [
'orjson>=1.3.0'
]
},
python_requires='>=3.8.0',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
|
[
"re.search",
"setuptools.setup"
] |
[((701, 1779), 'setuptools.setup', 'setup', ([], {'name': '"""wumpus.py"""', 'author': 'author', 'url': '"""https://github.com/jay3332/wumpus.py"""', 'project_urls': "{'Issue tracker': 'https://github.com/jay3332/wumpus.py/issues', 'Discord':\n 'https://discord.gg/FqtZ6akWpd'}", 'version': '"""0.0.0"""', 'packages': "['wumpus', 'wumpus.core', 'wumpus.models', 'wumpus.typings']", 'license': '"""MIT"""', 'description': '"""An asynchronous wrapper around Discord\'s API."""', 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'include_package_data': '(True)', 'install_requires': 'requirements', 'extras_require': "{'docs': ['sphinx>=4.1.1', 'furo'], 'performance': ['orjson>=1.3.0']}", 'python_requires': '""">=3.8.0"""', 'classifiers': "['License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9', 'Topic :: Internet',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities']"}), '(name=\'wumpus.py\', author=author, url=\n \'https://github.com/jay3332/wumpus.py\', project_urls={\'Issue tracker\':\n \'https://github.com/jay3332/wumpus.py/issues\', \'Discord\':\n \'https://discord.gg/FqtZ6akWpd\'}, version=\'0.0.0\', packages=[\'wumpus\',\n \'wumpus.core\', \'wumpus.models\', \'wumpus.typings\'], license=\'MIT\',\n description="An asynchronous wrapper around Discord\'s API.",\n long_description=readme, long_description_content_type=\'text/markdown\',\n include_package_data=True, install_requires=requirements,\n extras_require={\'docs\': [\'sphinx>=4.1.1\', \'furo\'], \'performance\': [\n \'orjson>=1.3.0\']}, python_requires=\'>=3.8.0\', classifiers=[\n \'License :: OSI Approved :: MIT License\',\n \'Intended Audience :: Developers\', \'Natural Language :: English\',\n \'Operating System :: OS Independent\',\n \'Programming Language :: Python :: 3.8\',\n \'Programming Language :: Python :: 3.9\', \'Topic :: Internet\',\n \'Topic :: Software Development :: Libraries\',\n \'Topic :: Software Development :: Libraries :: Python Modules\',\n \'Topic :: Utilities\'])\n', (706, 1779), False, 'from setuptools import setup\n'), ((131, 206), 're.search', 're.search', (['"""^__version__\\\\s*=\\\\s*[\\\\\'"]([^\\\\\'"]*)[\\\\\'"]"""', 'contents', 're.M'], {}), '(\'^__version__\\\\s*=\\\\s*[\\\\\\\'"]([^\\\\\\\'"]*)[\\\\\\\'"]\', contents, re.M)\n', (140, 206), False, 'import re\n'), ((388, 462), 're.search', 're.search', (['"""^__author__\\\\s*=\\\\s*[\\\\\'"]([^\\\\\'"]*)[\\\\\'"]"""', 'contents', 're.M'], {}), '(\'^__author__\\\\s*=\\\\s*[\\\\\\\'"]([^\\\\\\\'"]*)[\\\\\\\'"]\', contents, re.M)\n', (397, 462), False, 'import re\n')]
|
import unittest
import spydrnet as sdn
from spydrnet.ir.first_class_element import FirstClassElement
class TestWire(unittest.TestCase):
def setUp(self):
self.definition_top = sdn.Definition()
self.port_top = self.definition_top.create_port()
self.inner_pin = self.port_top.create_pin()
self.cable = self.definition_top.create_cable()
self.wire = self.cable.create_wire()
self.definition_leaf = sdn.Definition()
self.port = self.definition_leaf.create_port()
self.pin1 = self.port.create_pin()
self.pin2 = self.port.create_pin()
self.instance = self.definition_top.create_child()
self.instance.reference = self.definition_leaf
def test_constructor(self):
self.assertFalse(isinstance(self.wire, FirstClassElement), "Wire should not extend element")
wire2 = sdn.Wire()
self.assertNotEqual(self.wire, wire2, "Unique items are considered equal")
def test_pins_assignement(self):
self.wire.connect_pin(self.instance.pins[self.pin1])
self.wire.connect_pin(self.instance.pins[self.pin2])
self.assertEqual(self.wire.pins, [self.instance.pins[self.pin1], self.instance.pins[self.pin2]])
self.wire.pins = [self.instance.pins[self.pin2], self.instance.pins[self.pin1]]
self.assertEqual(self.wire.pins, [self.instance.pins[self.pin2], self.instance.pins[self.pin1]])
def test_connect_and_disconnect_inner_port(self):
self.wire.connect_pin(self.inner_pin)
self.assertTrue(self.inner_pin in self.wire.pins)
self.assertEqual(self.inner_pin.wire, self.wire)
self.assertEqual(len(self.wire.pins), 1)
self.wire.disconnect_pin(self.inner_pin)
self.assertFalse(self.inner_pin in self.wire.pins)
self.assertIsNone(self.inner_pin.wire)
self.assertEqual(len(self.wire.pins), 0)
def test_connect_and_disconnect_outer_pin_by_reference(self):
self.wire.connect_pin(self.instance.pins[self.pin1])
self.assertEqual(len(self.wire.pins), 1)
self.assertTrue(all(x is self.instance.pins[x] for x in self.wire.pins))
self.assertTrue(all(x.wire is self.wire for x in self.wire.pins))
self.assertTrue(all(x.instance is self.instance for x in self.wire.pins))
self.assertEqual(self.instance.pins[self.pin1].inner_pin, self.pin1)
self.wire.disconnect_pin(self.instance.pins[self.pin1])
self.assertEqual(len(self.wire.pins), 0)
self.assertFalse(self.instance.pins[self.pin1] in self.wire.pins)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertTrue(self.pin1 in self.instance.pins)
def test_connect_and_disconnect_outer_pin_by_object(self):
self.wire.connect_pin(sdn.OuterPin.from_instance_and_inner_pin(self.instance, self.pin2), position=0)
self.assertEqual(len(self.wire.pins), 1)
self.assertTrue(all(x is self.instance.pins[x] for x in self.wire.pins))
self.assertTrue(all(x.wire is self.wire for x in self.wire.pins))
self.assertTrue(all(x.instance is self.instance for x in self.wire.pins))
self.assertEqual(self.instance.pins[self.pin2].inner_pin, self.pin2)
self.wire.disconnect_pin(sdn.OuterPin(self.instance, self.pin2))
self.assertEqual(len(self.wire.pins), 0)
self.assertFalse(self.instance.pins[self.pin2] in self.wire.pins)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertTrue(self.pin1 in self.instance.pins)
def test_disconnect_pin_from(self):
self.wire.connect_pin(self.inner_pin)
self.wire.connect_pin(self.instance.pins[self.pin1])
self.wire.connect_pin(self.instance.pins[self.pin2])
self.wire.disconnect_pins_from(iter((self.inner_pin, self.instance.pins[self.pin1])))
self.wire.disconnect_pins_from({self.instance.pins[self.pin2]})
self.assertEqual(len(self.wire.pins), 0)
self.assertTrue(self.pin1 in self.instance.pins and isinstance(self.instance.pins[self.pin1], sdn.OuterPin) and
self.instance.pins[self.pin1].inner_pin == self.pin1)
self.assertIsNone(self.inner_pin.wire)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertIsNone(self.instance.pins[self.pin2].wire)
self.assertTrue(self.pin1 in self.instance.pins and isinstance(self.instance.pins[self.pin2], sdn.OuterPin) and
self.instance.pins[self.pin2].inner_pin == self.pin2)
@unittest.expectedFailure
def test_disconnect_inner_pin_from_outside_wire(self):
inner_pin = sdn.InnerPin()
self.wire.disconnect_pins_from([inner_pin])
@unittest.expectedFailure
def test_disconnect_outer_pin_from_outside_wire(self):
outer_pin = sdn.OuterPin()
self.wire.disconnect_pins_from([outer_pin])
|
[
"spydrnet.Definition",
"spydrnet.OuterPin",
"spydrnet.Wire",
"spydrnet.OuterPin.from_instance_and_inner_pin",
"spydrnet.InnerPin"
] |
[((190, 206), 'spydrnet.Definition', 'sdn.Definition', ([], {}), '()\n', (204, 206), True, 'import spydrnet as sdn\n'), ((449, 465), 'spydrnet.Definition', 'sdn.Definition', ([], {}), '()\n', (463, 465), True, 'import spydrnet as sdn\n'), ((871, 881), 'spydrnet.Wire', 'sdn.Wire', ([], {}), '()\n', (879, 881), True, 'import spydrnet as sdn\n'), ((4645, 4659), 'spydrnet.InnerPin', 'sdn.InnerPin', ([], {}), '()\n', (4657, 4659), True, 'import spydrnet as sdn\n'), ((4822, 4836), 'spydrnet.OuterPin', 'sdn.OuterPin', ([], {}), '()\n', (4834, 4836), True, 'import spydrnet as sdn\n'), ((2785, 2851), 'spydrnet.OuterPin.from_instance_and_inner_pin', 'sdn.OuterPin.from_instance_and_inner_pin', (['self.instance', 'self.pin2'], {}), '(self.instance, self.pin2)\n', (2825, 2851), True, 'import spydrnet as sdn\n'), ((3262, 3300), 'spydrnet.OuterPin', 'sdn.OuterPin', (['self.instance', 'self.pin2'], {}), '(self.instance, self.pin2)\n', (3274, 3300), True, 'import spydrnet as sdn\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import cint
from erpnext.healthcare.utils import render_docs_as_html
@frappe.whitelist()
def get_feed(name, document_types=None, date_range=None, start=0, page_length=20):
"""get feed"""
filters = get_filters(name, document_types, date_range)
result = frappe.db.get_all('Patient Medical Record',
fields=['name', 'owner', 'communication_date',
'reference_doctype', 'reference_name', 'subject'],
filters=filters,
order_by='communication_date DESC',
limit=cint(page_length),
start=cint(start)
)
return result
def get_filters(name, document_types=None, date_range=None):
filters = {'patient': name}
if document_types:
document_types = json.loads(document_types)
if len(document_types):
filters['reference_doctype'] = ['IN', document_types]
if date_range:
try:
date_range = json.loads(date_range)
if date_range:
filters['communication_date'] = ['between', [date_range[0], date_range[1]]]
except json.decoder.JSONDecodeError:
pass
return filters
@frappe.whitelist()
def get_feed_for_dt(doctype, docname):
"""get feed"""
result = frappe.db.get_all('Patient Medical Record',
fields=['name', 'owner', 'communication_date',
'reference_doctype', 'reference_name', 'subject'],
filters={
'reference_doctype': doctype,
'reference_name': docname
},
order_by='communication_date DESC'
)
return result
@frappe.whitelist()
def get_patient_history_doctypes():
document_types = []
settings = frappe.get_single("Patient History Settings")
for entry in settings.standard_doctypes:
document_types.append(entry.document_type)
for entry in settings.custom_doctypes:
document_types.append(entry.document_type)
return document_types
|
[
"json.loads",
"frappe.whitelist",
"frappe.db.get_all",
"frappe.utils.cint",
"frappe.get_single"
] |
[((277, 295), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (293, 295), False, 'import frappe\n'), ((1202, 1220), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (1218, 1220), False, 'import frappe\n'), ((1571, 1589), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (1587, 1589), False, 'import frappe\n'), ((1286, 1542), 'frappe.db.get_all', 'frappe.db.get_all', (['"""Patient Medical Record"""'], {'fields': "['name', 'owner', 'communication_date', 'reference_doctype',\n 'reference_name', 'subject']", 'filters': "{'reference_doctype': doctype, 'reference_name': docname}", 'order_by': '"""communication_date DESC"""'}), "('Patient Medical Record', fields=['name', 'owner',\n 'communication_date', 'reference_doctype', 'reference_name', 'subject'],\n filters={'reference_doctype': doctype, 'reference_name': docname},\n order_by='communication_date DESC')\n", (1303, 1542), False, 'import frappe\n'), ((1659, 1704), 'frappe.get_single', 'frappe.get_single', (['"""Patient History Settings"""'], {}), "('Patient History Settings')\n", (1676, 1704), False, 'import frappe\n'), ((864, 890), 'json.loads', 'json.loads', (['document_types'], {}), '(document_types)\n', (874, 890), False, 'import json\n'), ((675, 692), 'frappe.utils.cint', 'cint', (['page_length'], {}), '(page_length)\n', (679, 692), False, 'from frappe.utils import cint\n'), ((702, 713), 'frappe.utils.cint', 'cint', (['start'], {}), '(start)\n', (706, 713), False, 'from frappe.utils import cint\n'), ((1014, 1036), 'json.loads', 'json.loads', (['date_range'], {}), '(date_range)\n', (1024, 1036), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import math
import pandas
def parseErrorCode(code):
"""에러코드 메시지
:param code: 에러 코드
:type code: str
:return: 에러코드 메시지를 반환
::
parseErrorCode("00310") # 모의투자 조회가 완료되었습니다
"""
code = str(code)
ht = {
"-1" : "통신소켓 생성에 실패하였습니다",
"-2" : "서버접속에 실패하였습니다",
"-3" : "서버주소가 틀렸습니다",
"-4" : "서버 접속시간이 초과되었습니다",
"-5" : "이미 서버에 연결중입니다",
"-6" : "해당TR은 사용할수 없습니다",
"-7" : "로그인을 해야 사용이 가능합니다",
"-8" : "시세전용에서는 사용이 불가능합니다",
"-9" : "해당 계좌번호를 가지고 있지 않습니다",
"-10" : "패킷의 크기가 잘못되었습니다",
"-11" : "Data의 크기가 다릅니다",
"-12" : "계좌가 존재하지 않습니다",
"-13" : "Request ID 부족",
"-14" : "소켓이 생성되지 않았습니다",
"-15" : "암호화 생성에 실패했습니다",
"-16" : "데이터 전송에 실패했습니다",
"-17" : "암호화(RTN)처리에 실패했습니다",
"-18" : "공인인증 파일이 없습니다",
"-19" : "공인인증 Function이 없습니다",
"-20" : "메모리가 충분하지 않습니다",
"-21" : "TR의 시간당 전송제한에 걸렸습니다",
"-22" : "해당 TR은 해당 함수를 이용할 수 없습니다",
"-23" : "로그인이 안되었거나, TR에 대한 정보를 찾을 수 없습니다",
"-24" : "계좌위치가 지정되지 않았습니다",
"-25" : "계좌를 가지고 있지 않습니다",
"-26" : "파일 읽기에 실패했습니다 (종목 검색 조회 시, 파일이 없는 경우)",
"0000" : "정상완료되었습니다",
"00310" : "모의투자 조회가 완료되었습니다",
"00136" : "조회가 완료되었습니다",
"00020" : "application program exit[TR:CSPAQ]",
"03669" : "비밀번호 오류입니다. (5회중 4회 남았습니다)",
"01796" : "비밀번호 연속 오류허용횟수를 초과하였습니다. 콜센터로 문의하시기 바랍니다"
}
return ht[code] + " (%s)" % code if code in ht else code
def parseTR(trCode):
"""요청 TR 코드 파싱
:param trCode: TR 코드
:type trCode: str
:return: TR코드 내역을 반환
::
parseTR("t0425") # 주식체결/미체결
"""
ht = {
"t0424" : "주식잔고",
"t0425" : "주식체결/미체결",
"t8407" : "멀티현재가조회",
"t8412" : "주식챠트(N분)",
"t8413" : "주식챠트(일주월)",
"t8430" : "주식종목조회",
"t1833" : "종목검색(씽API용)",
"t1101" : "주식현재가호가조회",
"t1102" : "주식현재가(시세)조회",
"t1411" : "증거금율별종목조회",
"t1702" : "외인기관종목별동향",
"t1301" : "주식시간대별체결조회",
"t0167" : "서버시간조회",
"t9945" : "주식마스터조회API용",
"CSPAQ12200" : "현물계좌예수금 주문가능금액 총평가 조회",
"CSPAT00600" : "현물주문",
"CSPAT00700" : "현물정정주문",
"CSPAT00800" : "현물취소주문",
"CSPBQ00200" : "현물계좌 증거금률별 주문가능 수량 조회",
"HA_" : "KOSDAQ호가잔량",
"H1_" : "KOSPI호가잔량",
"SC0" : "주식주문접수",
"SC1" : "주식주문체결",
"SC2" : "주식주문정정",
"SC3" : "주식주문취소",
"SC4" : "주식주문거부",
"JIF" : "장운영정보"
}
return ht[trCode] if trCode in ht else ""
def parseJstatus(jstatus):
"""장 운영시간 파싱
:param jstatus: 장 운영시간 코드
:type jstatus: str
:return: 장 운영시간 내역을 반환
::
parseJstatus("66") # 사이드카 매수발동
.. note::
- 코스피로 장시간을 확인해야함.
- 선물/옵션 장마감 5분전, 1분전, 10초전은 들어오지 않음
"""
ht = {
"11" : "장전동시호가개시",
"21" : "장시작",
"22" : "장개시10초전",
"23" : "장개시1분전",
"24" : "장개시5분전",
"25" : "장개시10분전",
"31" : "장후동시호가개시",
"41" : "장마감",
"42" : "장마감10초전",
"43" : "장마감1분전",
"44" : "장마감5분전",
"51" : "시간외종가매매개시",
"52" : "시간외종가매매종료",
"53" : "시간외단일가매매개시",
"54" : "시간외단일가매매종료",
"61" : "서킷브레이크발동",
"62" : "서킷브레이크해제",
"63" : "서킷브레이크단일가접수",
"64" : "사이드카 매도발동",
"65" : "사이드카 매도해제",
"66" : "사이드카 매수발동"
}
return ht[jstatus] if jstatus in ht else ""
def parseMarket(jangubun):
"""장 구분
:param jangubun: 시장 구분 코드
:type jangubun: str
:return: 시장 내역을 반환
::
parseMarket("1") # 코스피
"""
ht = {
"1" : "코스피",
"2" : "코스닥",
"5" : "선물/옵션",
"7" : "CME야간선물",
"8" : "EUREX야간옵션선물"
}
return ht[jangubun] if jangubun in ht else ""
def timeType(base = None):
"""장 전,후 시간을 반환
:param base: 기준일시
:type base: datetime
:return: 기준일시에 맞는 타입문자를 반환
BEFORE(장시작 전),SHOWTIME(장 운영시간),AFTER(장종료 후)
::
timeType()
timeType(datetime.today())
"""
today = base if base else datetime.today()
mainStart = today.replace(hour=8, minute=50, second=0, microsecond=0)
mainEnd = today.replace(hour=15, minute=0, second=0, microsecond=0)
if today.weekday() < 5:
if today >= mainStart and today <= mainEnd:
return "SHOWTIME"
else:
if today < mainStart:
return "BEFORE"
elif today > mainEnd:
return "AFTER"
else:
return "NONE"
def today():
"""오늘 날자를 yyyymmdd 형태로 반환
::
today() # 20160101
"""
return datetime.today().strftime("%Y%m%d")
def latestBusinessDay():
"""가장 최근 영업일을 yyyymmdd 형태로 반환
::
latestBusinessDay() # 20160104
"""
baseday = datetime.today()
if baseday.weekday() > 4:
while baseday.weekday() > 4:
baseday = baseday - timedelta(days=1)
return baseday.strftime("%Y%m%d")
# def printMax(x):
# pandas.set_option("display.max_rows", len(x))
# pandas.set_option("display.max_columns", len(x.columns))
# print(x)
# pandas.reset_option("display.max_rows")
# pandas.reset_option("display.max_columns")
#
# def split(arr, size):
# arrs = []
# while len(arr) > size:
# pice = arr[:size]
# arrs.append(pice)
# arr = arr[size:]
# arrs.append(arr)
# return arrs
# # 호가 단위
# def callValueUnit(price, isKospi = False):
# unit = None
# price = int(price)
# if price < 1000:
# unit = 1
# elif price >= 1000 and price < 5000:
# unit = 5
# elif price >= 5000 and price < 10000:
# unit = 10
# elif price >= 10000 and price < 50000:
# unit = 50
# elif price >= 50000:
# if isKospi:
# if price < 100000:
# unit = 100
# elif price >= 100000 and price < 500000:
# unit = 500
# elif price >= 500000:
# unit = 1000
# else:
# unit = 100
# return unit
#
# # 구분
# def sign(type):
# result = None
# type = int(type)
# if type < 3:
# #상승
# result = 1
# elif type == 3:
# #보합
# result = 0
# elif type > 3:
# #하락
# result = -1
# return result
#
# # candle
# def candle(price, open, high, low):
# # print(price, open, high, low)
# p = int(price)
# o = int(open)
# h = int(high)
# l = int(low)
# height = h-l
# body = 0 if height == 0 else round((p-o)/height,2)
#
# if body > 0:
# #양봉
# type = 1
# top = (h-p)/height
# bottom = (o-l)/height
# elif body < 0:
# #음봉
# type = -1
# top = (h-o)/height
# bottom = (p-l)/height
# else:
# #보합
# type = 0.0
# top = 0.0
# bottom = 0.0
#
# return {
# "type" : type,
# "top" : round(top,2) * 100,
# "bottom" : round(bottom,2) * 100,
# "body" : math.fabs(body) * 100
# }
#
# def profit(buy, sell):
# #매매수수료
# fee = (float(buy) * 0.00015) + (float(sell) * 0.00315)
# profit = sell - buy - fee
#
# return {
# "profit" : profit,
# "rate" : round(profit/buy * 100,2)
# }
|
[
"datetime.timedelta",
"datetime.datetime.today"
] |
[((4842, 4858), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (4856, 4858), False, 'from datetime import datetime, timedelta\n'), ((4238, 4254), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (4252, 4254), False, 'from datetime import datetime, timedelta\n'), ((4691, 4707), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (4705, 4707), False, 'from datetime import datetime, timedelta\n'), ((4940, 4957), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4949, 4957), False, 'from datetime import datetime, timedelta\n')]
|
import json # pylint: disable=import-error
import os # pylint: disable=import-error
import time # pylint: disable=import-error
import requests # pylint: disable=import-error
from flask import Flask, request # pylint: disable=import-error
app = Flask(__name__)
print("app",app)
@app.route("/", methods=["POST"])
def webhook():
# Store incoming json data from webhook
payload = request.get_json()
user = "learnazcloud"
cred = os.environ["GH_TOKEN"]
if payload is None:
print("POST was not formatted in JSON")
# Verify the repo was created
try:
if payload["action"] == "created" or payload["action"] == "publicized" or payload["ref"] == "main":
# Delay needed for server to be create the page, otherwise a 404 returns
time.sleep(1)
# Create branch protection for the master branch of the repo
branch_protection = {
"required_status_checks": None,
"pull_request_reviews_enforcement_level": "off",
"required_approving_review_count": 1,
"dismiss_stale_reviews_on_push": True,
"require_code_owner_review": True,
"authorized_dismissal_actors_only": False,
"ignore_approvals_from_contributors": False,
"required_status_checks_enforcement_level": "non_admins",
"strict_required_status_checks_policy": False,
"signature_requirement_enforcement_level": "off",
"linear_history_requirement_enforcement_level": "off",
"enforce_admins": False,
"allow_force_pushes_enforcement_level": "off",
"allow_deletions_enforcement_level": "off",
"merge_queue_enforcement_level": "off",
"required_deployments_enforcement_level": "off",
"required_conversation_resolution_level": "off",
"authorized_actors_only": True,
"authorized_actor_names": [
"learnazcloud-user00"
],
"required_pull_request_reviews": None,
"restrictions": None,
}
session = requests.session()
session.auth = (user, cred)
response_1 = session.put(
payload["repository"]["url"] + "/branches/main/protection",
json.dumps(branch_protection),
)
if response_1.status_code == 200:
print(
"Branch protection created successfully. Status code: ",
response_1.status_code,
)
# Create issue in repo notifying user of branch protection
try:
if payload["repository"]["has_issues"]:
issue = {
"title": "New Protection Added",
"body": "@"
+ user
+ " @learnazcloud-secteam A new branch protection was added to the master branch.",
}
session = requests.session()
session.auth = (user, cred)
response_2 = session.post(
payload["repository"]["url"] + "/issues", json.dumps(issue)
)
if response_2.status_code == 201:
print(
"Issue created successfully. Status code: ",
response_2.status_code,
)
else:
print(
"Unable to create issue. Status code: ",
response_2.status_code,
)
else:
print(
"This repo has no issues so one cannot be created at this time."
)
except KeyError:
# Request did not contain information about if the repository has issues enabled
pass
else:
print(response_1.content)
print(
"Unable to create branch protection. Status code: ",
response_1.status_code,
"No Branch found- Creating one",
)
except KeyError:
# Ignore POST payload since it is not a create action
pass
return "OK"
if __name__ == "__main__":
app.run()
|
[
"requests.session",
"flask.Flask",
"json.dumps",
"time.sleep",
"flask.request.get_json"
] |
[((251, 266), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (256, 266), False, 'from flask import Flask, request\n'), ((392, 410), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (408, 410), False, 'from flask import Flask, request\n'), ((797, 810), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (807, 810), False, 'import time\n'), ((1951, 1969), 'requests.session', 'requests.session', ([], {}), '()\n', (1967, 1969), False, 'import requests\n'), ((2140, 2169), 'json.dumps', 'json.dumps', (['branch_protection'], {}), '(branch_protection)\n', (2150, 2169), False, 'import json\n'), ((2893, 2911), 'requests.session', 'requests.session', ([], {}), '()\n', (2909, 2911), False, 'import requests\n'), ((3085, 3102), 'json.dumps', 'json.dumps', (['issue'], {}), '(issue)\n', (3095, 3102), False, 'import json\n')]
|
import base64
import string
from random import randint, choice
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random as CryptoRandom
class Encryption():
def __init__(self, key):
self.key = key # Key in bytes
self.salted_key = None # Placeholder for optional salted key
def digest_key(self):
"""
Use SHA-256 over our key to get a proper-sized AES key
"""
# Add optional salt to key
key = self.key
if self.salted_key:
key = self.salted_key
return SHA256.new(key).digest()
def get_aes(self, IV):
"""
AES instance
"""
return AES.new(self.digest_key(), AES.MODE_CBC, IV)
def gen_salt(self, set_=True):
"""
Generate a random salt
"""
min_char = 8
max_char = 12
allchar = string.ascii_letters + string.punctuation + string.digits
salt = "".join(choice(allchar)
for x in range(randint(min_char, max_char))).encode()
# Set the salt in the same instance if required
if set_:
self.set_salt(salt)
return salt
def set_salt(self, salt=None):
"""
Add a salt to the secret key for this specific encryption or decryption
"""
if salt:
self.salted_key = salt + self.key
else:
self.salted_key = None
def encrypt(self, secret):
"""
Encrypt a secret
"""
# generate IV
IV = CryptoRandom.new().read(AES.block_size)
# Retrieve AES instance
aes = self.get_aes(IV)
# calculate needed padding
padding = AES.block_size - len(secret) % AES.block_size
# Python 2.x: secret += chr(padding) * padding
secret += bytes([padding]) * padding
# store the IV at the beginning and encrypt
data = IV + aes.encrypt(secret)
# Reset salted key
self.set_salt()
# Return base 64 encoded bytes
return base64.b64encode(data)
def decrypt(self, enc_secret):
"""
Decrypt a secret
"""
# Decode base 64
enc_secret = base64.b64decode(enc_secret)
# extract the IV from the beginning
IV = enc_secret[:AES.block_size]
# Retrieve AES instance
aes = self.get_aes(IV)
# Decrypt
data = aes.decrypt(enc_secret[AES.block_size:])
# pick the padding value from the end; Python 2.x: ord(data[-1])
padding = data[-1]
# Python 2.x: chr(padding) * padding
if data[-padding:] != bytes([padding]) * padding:
raise ValueError("Invalid padding...")
# Reset salted key
self.set_salt()
# Remove the padding and return the bytes
return data[:-padding]
|
[
"Crypto.Hash.SHA256.new",
"random.randint",
"random.choice",
"base64.b64decode",
"base64.b64encode",
"Crypto.Random.new"
] |
[((2087, 2109), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (2103, 2109), False, 'import base64\n'), ((2246, 2274), 'base64.b64decode', 'base64.b64decode', (['enc_secret'], {}), '(enc_secret)\n', (2262, 2274), False, 'import base64\n'), ((583, 598), 'Crypto.Hash.SHA256.new', 'SHA256.new', (['key'], {}), '(key)\n', (593, 598), False, 'from Crypto.Hash import SHA256\n'), ((1582, 1600), 'Crypto.Random.new', 'CryptoRandom.new', ([], {}), '()\n', (1598, 1600), True, 'from Crypto import Random as CryptoRandom\n'), ((984, 999), 'random.choice', 'choice', (['allchar'], {}), '(allchar)\n', (990, 999), False, 'from random import randint, choice\n'), ((1038, 1065), 'random.randint', 'randint', (['min_char', 'max_char'], {}), '(min_char, max_char)\n', (1045, 1065), False, 'from random import randint, choice\n')]
|
import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='maddress', # should match the package folder
packages=['maddress'], # should match the package folder
version='1.0.0-alpha', # important for updates
license='MIT', # should match your chosen license
description='Testing installation of Package',
long_description=long_description, # loads your README.md
long_description_content_type='text/markdown', # README.md is of type 'markdown'
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/scottdraper8/maddress',
install_requires=[], # list all packages that your package uses
keywords=['pypi', 'maddress', 'email', 'phone number', 'address', 'geolocation', 'data cleaning'], #descriptive meta-data
classifiers=[ # https://pypi.org/classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
download_url="https://github.com/scottdraper8/maddress/archive/refs/tags/v1.0.0-alpha.tar.gz",
)
|
[
"setuptools.setup"
] |
[((190, 1120), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""maddress"""', 'packages': "['maddress']", 'version': '"""1.0.0-alpha"""', 'license': '"""MIT"""', 'description': '"""Testing installation of Package"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/scottdraper8/maddress"""', 'install_requires': '[]', 'keywords': "['pypi', 'maddress', 'email', 'phone number', 'address', 'geolocation',\n 'data cleaning']", 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Documentation',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9']", 'download_url': '"""https://github.com/scottdraper8/maddress/archive/refs/tags/v1.0.0-alpha.tar.gz"""'}), "(name='maddress', packages=['maddress'], version=\n '1.0.0-alpha', license='MIT', description=\n 'Testing installation of Package', long_description=long_description,\n long_description_content_type='text/markdown', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/scottdraper8/maddress',\n install_requires=[], keywords=['pypi', 'maddress', 'email',\n 'phone number', 'address', 'geolocation', 'data cleaning'], classifiers\n =['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Documentation',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'], download_url=\n 'https://github.com/scottdraper8/maddress/archive/refs/tags/v1.0.0-alpha.tar.gz'\n )\n", (206, 1120), False, 'import setuptools\n')]
|
from googleapiclient.discovery import build
from os import getenv
from auth import get_credentials
class AppsScript():
def __init__(self, id: str):
self._name = getenv("API_SERVICE_NAME")
self._version = getenv("API_VERSION")
self._id = id
def run(self, function: str):
body = {"function": function}
with build(self._name, self._version, credentials=get_credentials()) as service:
return service.scripts().run(scriptId=self._id, body=body).execute()
|
[
"auth.get_credentials",
"os.getenv"
] |
[((176, 202), 'os.getenv', 'getenv', (['"""API_SERVICE_NAME"""'], {}), "('API_SERVICE_NAME')\n", (182, 202), False, 'from os import getenv\n'), ((227, 248), 'os.getenv', 'getenv', (['"""API_VERSION"""'], {}), "('API_VERSION')\n", (233, 248), False, 'from os import getenv\n'), ((402, 419), 'auth.get_credentials', 'get_credentials', ([], {}), '()\n', (417, 419), False, 'from auth import get_credentials\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ObjectID test."""
import json
from unittest import TestCase
from bson import ObjectId
from mongoengine.document import Document
from mongoengine.errors import ValidationError
from mongoengine.fields import StringField
from mongoengine_goodjson.fields import ObjectIDField
from mongoengine_goodjson.document import Document as JSONDoc
class NormalSchema(Document):
"""Normal document schama."""
uid = ObjectIDField()
name = StringField(required=True)
class CustomSchema(JSONDoc):
"""Original document schema."""
uid = ObjectIDField()
name = StringField(required=True)
class NormalDocumentTest(TestCase):
"""Normal (or Standard) Document Test."""
def setUp(self):
"""Set up."""
self.model = NormalSchema
self.doc = self.model(
uid=ObjectId(),
name="Test",
)
# self.doc.save()
self.expected_dict = {
"uid": {"$oid": str(self.doc.uid)},
"name": self.doc.name,
}
def test_encode(self):
"""Should be serialized."""
dct = json.loads(self.doc.to_json())
self.assertEqual(dct, self.expected_dict)
def test_decode(self):
"""Should be deserialized."""
doc = self.model.from_json(json.dumps(self.expected_dict))
# In pytohn checking equivalence between objects is impossible.
# And therefore, it needs to convert into dict with to_mongo.
self.assertEqual(doc.uid, self.doc.uid)
self.assertEqual(doc.to_mongo(), self.doc.to_mongo())
class NormalSchemaCastingTest(NormalDocumentTest):
"""Normal Schema casting test."""
def setUp(self):
"""Set up."""
super().setUp()
oid = str(ObjectId())
self.doc = self.model(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": {"$oid": oid}, "name": self.doc.name}
class NormalSchemaInvalidCastingTest(TestCase):
"""Normal Schema casting test."""
def setUp(self):
"""Set up."""
oid = str("あばばばばばば")
self.doc = NormalSchema(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": {"$oid": oid}, "name": self.doc.name}
def test_encode(self):
"""Serialization should be failed."""
with self.assertRaises(ValidationError) as e:
self.doc.to_json()
self.assertEqual(e.exception.field_name, "uid")
class CustomSchemaTest(NormalDocumentTest):
"""Custom schema document test."""
def setUp(self):
"""Set up."""
super().setUp()
self.model = CustomSchema
self.doc = self.model(
uid=ObjectId(),
name="Test",
)
# self.doc.save()
self.expected_dict = {
"uid": str(self.doc.uid),
"name": self.doc.name,
}
class CustomSchemaCastingTest(CustomSchemaTest):
"""Custom Schema casting test."""
def setUp(self):
"""Set up."""
super().setUp()
oid = str(ObjectId())
self.doc = self.model(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": oid, "name": self.doc.name}
class CustomSchemaInvalidCastingTest(TestCase):
"""Custom Schema casting test."""
def setUp(self):
"""Set up."""
oid = str("あばばばばばば")
self.doc = CustomSchema(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": oid, "name": self.doc.name}
def test_encode(self):
"""Serialization should be failed."""
with self.assertRaises(ValidationError) as e:
self.doc.to_json()
self.assertEqual(e.exception.field_name, "uid")
|
[
"mongoengine.fields.StringField",
"bson.ObjectId",
"mongoengine_goodjson.fields.ObjectIDField",
"json.dumps"
] |
[((464, 479), 'mongoengine_goodjson.fields.ObjectIDField', 'ObjectIDField', ([], {}), '()\n', (477, 479), False, 'from mongoengine_goodjson.fields import ObjectIDField\n'), ((491, 517), 'mongoengine.fields.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (502, 517), False, 'from mongoengine.fields import StringField\n'), ((596, 611), 'mongoengine_goodjson.fields.ObjectIDField', 'ObjectIDField', ([], {}), '()\n', (609, 611), False, 'from mongoengine_goodjson.fields import ObjectIDField\n'), ((623, 649), 'mongoengine.fields.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (634, 649), False, 'from mongoengine.fields import StringField\n'), ((1316, 1346), 'json.dumps', 'json.dumps', (['self.expected_dict'], {}), '(self.expected_dict)\n', (1326, 1346), False, 'import json\n'), ((1777, 1787), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (1785, 1787), False, 'from bson import ObjectId\n'), ((3139, 3149), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (3147, 3149), False, 'from bson import ObjectId\n'), ((859, 869), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (867, 869), False, 'from bson import ObjectId\n'), ((2777, 2787), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (2785, 2787), False, 'from bson import ObjectId\n')]
|
from collections import Counter
def partition_labels(s: str) -> list:
res = []
count = Counter(s)
addr = {}
for i,c in enumerate(s):
if c in addr:
addr[c].append(i)
else:
addr[c] = [i]
lst = []
added = set()
for c in s:
if c in added:
continue
loc = addr[c]
item = (c, loc[0], loc[-1])
lst.append(item)
added.add(c)
total = count[lst[0][0]]
prev = lst[0][2]
for i in range(1, len(lst)):
item = lst[i]
if item[1] < prev:
total += count[item[0]]
if item[2] > prev:
prev = item[2]
else:
res.append(total)
total = count[item[0]]
prev = item[2]
res.append(total)
return res
def standard_solution(s: str) -> list:
res = []
last = {c:i for i,c in enumerate(s)}
j = anchor = 0
for i,c in enumerate(s):
j = max(j, last[c])
if i == j:
res.append(i - anchor + 1)
anchor = i + 1
return res
s = "ababcbacadefegdehijhklij"
res = partition_labels(s)
print(res)
res = standard_solution(s)
print(res)
|
[
"collections.Counter"
] |
[((98, 108), 'collections.Counter', 'Counter', (['s'], {}), '(s)\n', (105, 108), False, 'from collections import Counter\n')]
|
#!/usr/bin/env python3
# Copyright 2018 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from varsome_api.vcf import VCFAnnotator
__author__ = 'ckopanos'
def annotate_vcf(argv):
parser = argparse.ArgumentParser(description='VCF Annotator command line')
parser.add_argument('-k', help='Your key to the API', type=str, metavar='API Key', required=True)
parser.add_argument('-g', help='Reference genome either hg19 or hg38', type=str, metavar='Reference Genome',
required=False, default='hg19')
parser.add_argument('-i',
help='Path to vcf file',
type=str, metavar='Input VCF File', required=True)
parser.add_argument('-o',
help='Path to output vcf file',
type=str, metavar='Output VCF File', required=False)
parser.add_argument('-p',
help='Request parameters e.g. add-all-data=1 expand-pubmed-articles=0',
type=str, metavar='Request Params', required=False, nargs='+')
parser.add_argument('-t', help='Run vcf annotator using x threads', type=int, default=3, required=False,
metavar='Number of threads')
args = parser.parse_args()
api_key = args.k
vcf_file = args.i
output_vcf_file = args.o
ref_genome = args.g
num_threads = args.t
request_parameters = None
if args.p:
request_parameters = {param[0]: param[1] for param in [param.split("=") for param in args.p]}
vcf_annotator = VCFAnnotator(api_key=api_key, ref_genome=ref_genome, get_parameters=request_parameters,
max_threads=num_threads)
vcf_annotator.annotate(vcf_file, output_vcf_file)
if __name__ == "__main__":
annotate_vcf(sys.argv[1:])
|
[
"varsome_api.vcf.VCFAnnotator",
"argparse.ArgumentParser"
] |
[((729, 794), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""VCF Annotator command line"""'}), "(description='VCF Annotator command line')\n", (752, 794), False, 'import argparse\n'), ((2077, 2194), 'varsome_api.vcf.VCFAnnotator', 'VCFAnnotator', ([], {'api_key': 'api_key', 'ref_genome': 'ref_genome', 'get_parameters': 'request_parameters', 'max_threads': 'num_threads'}), '(api_key=api_key, ref_genome=ref_genome, get_parameters=\n request_parameters, max_threads=num_threads)\n', (2089, 2194), False, 'from varsome_api.vcf import VCFAnnotator\n')]
|
#!/usr/bin/env python
from setuptools import setup
import subprocess
import sys
import pkg_resources
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_semantic_version():
global VERSION
proc1 = subprocess.Popen("git describe --tags", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out = proc1.communicate()
if proc1.returncode != 0:
sys.stdout.write("fourbars must install from cloned folder. make sure .git folder exists\n")
sys.stdout.write(out[1])
raise SystemExit(32)
v = out[0].decode('ascii').replace('\n', '')
if v.startswith('v.'):
v = v[2:]
elif v.startswith('v'):
v = v[1:]
li = v.split('.')
lii = li[1].split('-')
if len(lii) == 3:
v = '{0}.{1}.{2}'.format(li[0],lii[0],lii[1])
else:
v = '{0}.{1}'.format(li[0], li[1])
return v
VERSION = get_semantic_version()
setup(
name = 'fourbars',
version = VERSION,
description = 'Ableton Live CLI - High Precision Loop Production and Asset Management',
long_description = long_description,
long_description_content_type = "text/markdown",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/styk-tv/4bars',
packages = ['fourbars'],
install_requires = [
'Cython==0.29.13',
'pyliblo >= 0.9.1',
'termcolor==1.1.0',
'randomnames@git+https://github.com/styk-tv/python-randomnames.git@beaa1<PASSWORD>3bf03ac5bc6f3ace2eaed119585f80#egg=randomnames',
'yamlordereddictloader==0.4.0',
'pyliblo >= 0.9.1'
],
keywords = ['sound', 'music', 'ableton', 'osc', 'pylive'],
classifiers = [
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Artistic Software',
'Topic :: Communications',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers'
]
)
|
[
"sys.stdout.write",
"subprocess.Popen",
"setuptools.setup",
"os.path.dirname",
"os.path.join"
] |
[((1039, 1900), 'setuptools.setup', 'setup', ([], {'name': '"""fourbars"""', 'version': 'VERSION', 'description': '"""Ableton Live CLI - High Precision Loop Production and Asset Management"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/styk-tv/4bars"""', 'packages': "['fourbars']", 'install_requires': "['Cython==0.29.13', 'pyliblo >= 0.9.1', 'termcolor==1.1.0',\n 'randomnames@git+https://github.com/styk-tv/python-randomnames.git@beaa1<PASSWORD>3bf03ac5bc6f3ace2eaed119585f80#egg=randomnames'\n , 'yamlordereddictloader==0.4.0', 'pyliblo >= 0.9.1']", 'keywords': "['sound', 'music', 'ableton', 'osc', 'pylive']", 'classifiers': "['Topic :: Multimedia :: Sound/Audio', 'Topic :: Artistic Software',\n 'Topic :: Communications', 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers']"}), "(name='fourbars', version=VERSION, description=\n 'Ableton Live CLI - High Precision Loop Production and Asset Management',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/styk-tv/4bars', packages=['fourbars'],\n install_requires=['Cython==0.29.13', 'pyliblo >= 0.9.1',\n 'termcolor==1.1.0',\n 'randomnames@git+https://github.com/styk-tv/python-randomnames.git@beaa1<PASSWORD>3bf03ac5bc6f3ace2eaed119585f80#egg=randomnames'\n , 'yamlordereddictloader==0.4.0', 'pyliblo >= 0.9.1'], keywords=[\n 'sound', 'music', 'ableton', 'osc', 'pylive'], classifiers=[\n 'Topic :: Multimedia :: Sound/Audio', 'Topic :: Artistic Software',\n 'Topic :: Communications', 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers'])\n", (1044, 1900), False, 'from setuptools import setup\n'), ((154, 176), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'from os import path\n'), ((346, 450), 'subprocess.Popen', 'subprocess.Popen', (['"""git describe --tags"""'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), "('git describe --tags', stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, shell=True)\n", (362, 450), False, 'import subprocess\n'), ((188, 226), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (197, 226), False, 'from os import path\n'), ((515, 612), 'sys.stdout.write', 'sys.stdout.write', (['"""fourbars must install from cloned folder. make sure .git folder exists\n"""'], {}), "(\n 'fourbars must install from cloned folder. make sure .git folder exists\\n')\n", (531, 612), False, 'import sys\n'), ((616, 640), 'sys.stdout.write', 'sys.stdout.write', (['out[1]'], {}), '(out[1])\n', (632, 640), False, 'import sys\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
# NB: see head of `datasets.py'
from training_utils import *
from utils_io import os, tempdir
from datasets import image_kinds
print ("Using TensorFlow version:", tf.__version__)
def train_n_save_classifier (model, class_names, input_kind,
train_data, test_data = None,
optimizer = 'adam',
kind = 'sparse_categorical',
outdir = tempdir,
early_stopping = True,
validate_on_test_data = False,
cm_plot_args = {},
**kwds):
x_train, y_train = train_data
path = os.path.join (outdir, model.name)
log_dir = path + '_logs'
fw_train, fw_confision_matrix = \
tf.summary.create_file_writer (os.path.join (log_dir, 'train')), \
tf.summary.create_file_writer (os.path.join (log_dir, 'confusion_matrix'))
# Very basic & dumb test for detecting images...
if input_kind in image_kinds:
log_25_img_dataset_grid (fw_train, class_names, 'Training data (some)', train_data)
model.summary ()
loss, metric = (tf.losses.SparseCategoricalCrossentropy (from_logits=True),
tf.metrics.SparseCategoricalAccuracy ()) # if kind = 'sparse_categorical' else ?
model.compile (optimizer = optimizer,
loss = loss,
metrics = [metric])
callbacks = [
tf.keras.callbacks.ModelCheckpoint (
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath = path + "_{epoch}",
save_best_only = True, # Only save a model if `val_loss` has improved.
monitor = "val_loss",
verbose = 1,
),
tf.keras.callbacks.TensorBoard (
log_dir = log_dir,
histogram_freq = 1, # How often to log histogram visualizations
embeddings_freq = 1, # How often to log embedding visualizations
update_freq = "epoch", # How often to write logs (default: once per epoch)
),
] + ([
# https://www.tensorflow.org/guide/keras/train_and_evaluate#checkpointing_models
tf.keras.callbacks.EarlyStopping (
# Stop training when `val_loss` is no longer improving
monitor = "val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta = 1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience = 3,
verbose = 1,
),
] if early_stopping else []) + ([
log_confusion_matrix_callback (
fw_confision_matrix,
model, class_names, test_data,
**cm_plot_args),
] if test_data is not None else [])
valargs = dict (validation_data = test_data) \
if validate_on_test_data and test_data is not None \
else {}
model.fit (x_train, y_train,
callbacks = callbacks,
**{'epochs': 20, # some defaults:
'shuffle': True,
'batch_size': 64,
'validation_split': 0.2,
**valargs,
**kwds})
if test_data is not None:
x_test, y_test = test_data
print ('Performing final validation on given test data:')
# Just check and show accuracy on "official" test data:
_, test_accuracy = model.evaluate (x_test, y_test, verbose = 1)
print ('Validation accuracy on given test data:', test_accuracy)
print ('Saving model in', path + '.h5')
model.save (path + '.h5')
# ---
def classifier (load_data, make_model, model_name = None,
load_data_args = {}, make_model_args = {}, **kwds):
train_data, test_data, input_shape, input_kind, class_names = load_data (**load_data_args)
train_n_save_classifier (make_model (input_shape, name = model_name, **make_model_args),
class_names, input_kind, train_data, test_data, **kwds)
# ---
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Reshape, Dense
def make_dense (input_shape, n_neurons = (100,), n_classes = 5,
input_reshape = False, **kwds):
"""Builds a very basic DNN.
n_neurons: gives the number of neurons for each layer, as a list or
tuple
n_classes: number of output neurons (= |classes|)
input_reshape: whether to include a dummy reshape input layer
(useful to access input features as activations, for DeepConcolic's
internal statistical analysis and layerwise abstractions).
"""
assert len (n_neurons) > 0
layer_args = [dict (activation = 'relu') for _ in n_neurons]
layer_args[0]['input_shape'] = input_shape
layer_args[-1]['activation'] = 'softmax'
layers = (Reshape (input_shape = input_shape, target_shape = input_shape),) if input_reshape else ()
layers += tuple (Dense (n, **args) for n, args in zip (n_neurons, layer_args))
return Sequential (layers, **kwds)
# ---
def make_dense_classifier (load_data, prefix, n_features, n_classes, n_neurons, **kwds):
"""A wrapper for training DNNs built using {make_dense}."""
model_name = (f'{prefix}{n_features}_{n_classes}_dense'
f'_{"_".join (str (c) for c in n_neurons)}')
model_args = dict (n_classes = n_classes, n_neurons = n_neurons)
classifier (load_data, make_dense, epochs = 50,
model_name = model_name, make_model_args = model_args,
**kwds)
# ---
|
[
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.models.Sequential",
"utils_io.os.path.join",
"tensorflow.keras.layers.Dense"
] |
[((781, 813), 'utils_io.os.path.join', 'os.path.join', (['outdir', 'model.name'], {}), '(outdir, model.name)\n', (793, 813), False, 'from utils_io import os, tempdir\n'), ((5227, 5253), 'tensorflow.keras.models.Sequential', 'Sequential', (['layers'], {}), '(layers, **kwds)\n', (5237, 5253), False, 'from tensorflow.keras.models import Sequential\n'), ((921, 951), 'utils_io.os.path.join', 'os.path.join', (['log_dir', '"""train"""'], {}), "(log_dir, 'train')\n", (933, 951), False, 'from utils_io import os, tempdir\n'), ((996, 1037), 'utils_io.os.path.join', 'os.path.join', (['log_dir', '"""confusion_matrix"""'], {}), "(log_dir, 'confusion_matrix')\n", (1008, 1037), False, 'from utils_io import os, tempdir\n'), ((5046, 5104), 'tensorflow.keras.layers.Reshape', 'Reshape', ([], {'input_shape': 'input_shape', 'target_shape': 'input_shape'}), '(input_shape=input_shape, target_shape=input_shape)\n', (5053, 5104), False, 'from tensorflow.keras.layers import Reshape, Dense\n'), ((5156, 5172), 'tensorflow.keras.layers.Dense', 'Dense', (['n'], {}), '(n, **args)\n', (5161, 5172), False, 'from tensorflow.keras.layers import Reshape, Dense\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright 2017
@licence: 2-clause BSD licence
This file contains the main code for the phase-state machine
"""
import numpy as _np
import pandas as _pd
import itertools
from numba import jit
import warnings as _warnings
@jit(nopython=True, cache=True)
def _limit(a):
"""
faster version of numpy clip, also modifies array in place
"""
#numba doesn't support indexing by boolean
#a[a<lower]=lower
#a[a>upper]=upper
shape = a.shape
for j in range(shape[1]):
for i in range(shape[0]):
if a[i,j] < 0.0:
a[i,j] = 0.0
if a[i,j] > 1.0:
a[i,j] = 1.0
@jit(nopython=True, cache=True)
def _signfunc(x):
return 1.0-2*(x<0)
@jit(nopython=True, cache=True)
def ReLU(x):
return 0.5*(_np.abs(x)+x)
# Alternative, differentiable "sign" function
# Also improves stability of the state's sign
#@jit(nopython=True)
#def _signfunc(x, epsilon=1e-3):
# return _np.tanh(x/epsilon)
#_np.set_printoptions(precision=3, suppress=True)
@jit(nopython=True, cache=True)
def _step(statevector, #main state vector. Input and output, modified in-place
#outputs, modified in place:
dotstatevector, #velocity of main state vector
activationMatrix, #Activation for each potential state and transition
phasesMatrix, #Phases for each transition
phaseVelocitiesMatrix, #Derivative of phases for each transition
#inputs:
phaseVelocityExponentInput, #input to modify velocity of each transition individually (exponential scale, basis 2)
BiasMatrix, #input to depart / avert departure from states
stateConnectivityGreedinessAdjustment, #input to modify how strong a successor state pulls the system towards itself, relative to the predecessor state
stateConnectivityCompetingGreedinessAdjustment, #input to adjust greediness in between compeeting successor states
phasesInput, # phase target in case a transition is enslaved to an external phase
velocityAdjustmentGain, # gain related to enslaving phase
noise_velocity, # vector that gets added to state velocity (usually in order to inject some base noise)
#parameters:
numStates, #number of states / dimensions
betaInv, #precomputed from beta parameter (state locations / scale)
stateConnectivityAbs, #precomputed from state graph
stateConnectivitySignMap, #precomputed from state graph
stateConnectivityIsBidirectional, #precomputed from state graph
stateConnectivityNrEdges, #precomputed from state graph
rhoZero, #coupling values for creating discrete states
rhoDelta, #coupling values for creating stable heteroclinic channels
alpha, #growth rate of states, determines speed of transitioning
dt, # time step duration in seconds
dtInv, #precomputed from dt
nonlinearityParamsLambda, #Kumaraswamy distribution parameters to modify gradualness of activation
nonlinearityParamsPsi, #Kumaraswamy distribution parameters to modify gradualness of phase progress
stateVectorExponent, #modifies the bending of heteroclinic channels
speedLimit, #safety limit to state velocity
epsilonLambda, #determines the region of zero activation around the coordinates axes
#for comparative study:
emulateHybridAutomaton, #set this to true to hack phasta into acting like a discrete state graph / hybrid automaton
triggervalue_successors, #for HA emulation mode, modified in-place
):
"""
Core phase-state machine computation.
Written as a function in order to be able to optimize it with numba
Note: The function modifies several arguments (numpy arrays) in place.
"""
#compute adjustment to the instantaneously effective growth factor
scaledactivation = activationMatrix * (1.0 / max(1.0, _np.sum(activationMatrix)))
kd = 2** _np.sum( scaledactivation * phaseVelocityExponentInput)
#compute mu for phase control:
phaseerrors = activationMatrix * (phasesInput-phasesMatrix)
correctiveAction = phaseerrors * velocityAdjustmentGain
correctiveActionPredecessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionPredecessor += correctiveAction[:,i]
correctiveActionSuccessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionSuccessor += correctiveAction[i,:]
mu = correctiveActionPredecessor - correctiveActionSuccessor
statevector_abs = _np.abs(statevector)
#adjust signs of the bias values depending on the transition direction:
biases = _np.dot(BiasMatrix * stateConnectivitySignMap * _np.outer(1-statevector_abs,statevector_abs), statevector)
noise_statevector = noise_velocity * dt
#If requested, decide whether to start a transition using a threshold, and stick to that decision no matter what until the transition finishes
if emulateHybridAutomaton:
predecessors = 1.0*(_np.abs(statevector)*betaInv > 0.99)
successors = (_np.dot(stateConnectivityAbs, predecessors) > 0.5 )
notsuccessors = (_np.dot(stateConnectivityAbs, predecessors) < 0.5 )
triggervalue_successors[notsuccessors] = 0.0
noise_statevector = _np.zeros((numStates))
threshold = 0.1
if _np.any(triggervalue_successors >= threshold ):
chosensuccessor = _np.argmax(triggervalue_successors)
value_chosen = triggervalue_successors[chosensuccessor]
notchosensuccessors = successors.copy()
notchosensuccessors[chosensuccessor] = 0
triggervalue_successors[:] = 0.0
triggervalue_successors[chosensuccessor] = value_chosen
if triggervalue_successors[chosensuccessor] < 1e5:
triggervalue_successors[ chosensuccessor ] = 1e6
#print(chosensuccessor)
noise_statevector[chosensuccessor] = 1.0
else:
triggervalue_successors[:] += biases * dt + noise_velocity
statevector[:] = statevector #for numba
statesigns = _signfunc(statevector)
statesignsOuterProduct = _np.outer(statesigns,statesigns) #precompute this, as we need it several times
#stateVectorExponent=1 #straight channels: |x| (original SHC by Horchler/Rabinovich)
#stateVectorExponent=2 #spherical channels: |x|**2 (default for phasta)
x_gamma = (statevector*statesigns)**stateVectorExponent
#Compute a mask that ensures the attractor works with negative state values too, that the transition's "sign" is observed, and that unidirectional edges do not accidentally change between positive and negative state values
#the computation is formulated such that only algebraic and continuous functions (e.g. ReLu) are used
M_T = ReLU(statesignsOuterProduct*stateConnectivitySignMap)
#Appropriate signs for transition-related greediness adjustment, depending on whether a graph edge is bidirectional or not:
TransitionGreedinessAdjustmentSign = (stateConnectivityNrEdges * ReLU(statesignsOuterProduct) - stateConnectivityIsBidirectional) * stateConnectivitySignMap
#sum everything into a transition/greedinesses matrix (T+G):
T_G = M_T*stateConnectivityAbs + TransitionGreedinessAdjustmentSign*stateConnectivityGreedinessAdjustment + stateConnectivityCompetingGreedinessAdjustment
#This is the core computation and time integration of the dynamical system:
growth = alpha + _np.dot(rhoZero, x_gamma) + _np.dot(rhoDelta * T_G, x_gamma)
dotstatevector[:] = statevector * growth * kd + mu + biases #estimate velocity. do not add noise to velocity, promp mixer doesnt like jumps
dotstatevector_L2 = _np.sqrt(_np.sum(dotstatevector**2))
velocity_limitfactor = _np.minimum(1.0, speedLimit/(1e-8 + dotstatevector_L2)) #limit speed of the motion in state space to avoid extreme phase velocities that a robot cannot
statevector[:] = (statevector + dotstatevector*dt*velocity_limitfactor + noise_statevector) #set the new state
#prepare a normalized state vector for the subsequent operations:
statevector_abs = _np.abs(statevector)
S = statevector_abs.reshape((numStates,1))
S2 = S*S
S_plus_P = S + S.T
statevectorL1 = _np.sum(S)
statevectorL2 = _np.sum(S2)
#compute the transition/state activation matrix (Lambda)
activations = stateConnectivitySignMap * _np.outer(statevector, statevector) * 16 * (statevectorL2) / (S_plus_P**4+statevectorL1**4)
activationMatrix[:,:] = activations * stateConnectivityAbs #function shown in visualization_of_activationfunction.py
_limit(activationMatrix)
#apply nonlinearity:
if (nonlinearityParamsLambda[0] != 1.0 or nonlinearityParamsLambda[1] != 1.0 ):
activationMatrix[:,:] = 1.0-(1.0-activationMatrix**nonlinearityParamsLambda[0])**nonlinearityParamsLambda[1] #Kumaraswamy CDF
#compute the state activation and put it into the diagonal of Lambda:
residual = max(0.0, 1.0 - _np.sum(activationMatrix))
stateactivation_normalized = S2/ _np.sum(S2)
for i in range(numStates):
activationMatrix[i,i] = stateactivation_normalized[i,0] * residual
#compute the phase progress matrix (Psi)
epsilonPsi = 0.0001
newphases = (S+epsilonPsi) / (S_plus_P+2*epsilonPsi)
_limit(newphases)
#apply nonlinearity:
if (nonlinearityParamsPsi[0] != 1.0 or nonlinearityParamsPsi[1] != 1.0 ):
newphases = 1.0-(1.0-newphases**nonlinearityParamsPsi[0])**nonlinearityParamsPsi[1] #Kumaraswamy CDF
phaseVelocitiesMatrix[:,:] = (newphases - phasesMatrix) * dtInv
phasesMatrix[:,:] = newphases
return
_KumaraswamyCDFParameters = {
'kumaraswamy1,1': (1.,1.),
'kumaraswamy2,1': (2.,1.),
'kumaraswamy1,2': (1.,2.),
#values for the Kumaraswamy CDF that approximate the given incomplete beta function:
'beta2,2': (1.913227338072261,2.2301669931409323),
'beta3,3': (2.561444544688591,3.680069490606511),
'beta2,5': (1.6666251656562021,5.9340642444701555),
}
class Kernel():
"""
This class provides a dynamical system that can behave like a state machine.
The transitions are smooth though, which enables interestingbehaviors like online-synchronisation and negotiation of branch alternatives
The most important parameters are:
numStates: the number of quasi-discrete states the system should have
predecessors: a list of lists which defines the preceeding states of each state
Note: Don't set up mutual predecessors (i.e. a loop with two states). This does not work. You need at least 3 states for a loop.
alpha: determines the speed at which a state becomes dominant. Effectively speeds up or slows down the machine
epsilon: "noise" added to the states, which has the effect of reducing the average dwell time for the preceeding states
Less important paramters:
beta: scaling factor for the state variable (usually 1.0)
nu: determines how easy it is to push away from a state (usually 1.5).
dt: time step at which the system is simulated (default: 1e-2)
Inputs:
Observed phase Psi_d: A matrix analogous to the phase matrix, containing phase estimates conditional to the transition or phase being activated
phase control gain K_p: A matrix analogous to the activation matrix, which indicates how confident the state observation is
inputbias: vector that signals which state should currently be the next (e.g. from perception)
Output:
stateVector: The actual, evolving state of the dynamical system.
phase matrix Psi: A (numStates x numStates) matrix aggregating all phase variables for each possible transition, plus the state vector on the diagonal
activation matrix Lambda: A matrix which contains the corresponding transition activation values. state
activations correspond to the 1-sum(transition activations), so that sum(matrix) = 1 (i.e.e can be used as a
weighing matrix)
"""
def __init__(self, **kwargs):
self.numStates = 0
self.t = 0.0
self.statehistorylen = 0
self.historyIndex = 0
self.setParameters(**kwargs)
def setParameters(self,
numStates=3,
predecessors=None,
successors=[[1],[2],[0]],
alphaTime=None,
alpha=40.0,
epsilon=1e-9,
nu=1.0,
beta=1.0,
dt=1e-2,
stateVectorExponent=2.0,
speedLimit = _np.inf,
initialState=0,
nonlinearityLambda='kumaraswamy1,1',
nonlinearityPsi='kumaraswamy1,1',
inputFilterTimeConstant = 0.1,
reuseNoiseSampleTimes = 10,
reset=False,
recordSteps=-1,
emulateHybridAutomaton=False):
"""
Method to set or reconfigure the phase-state-machine
numStates: The number of states the system should have
predecessors: A list of lists which contain the state indices of the respective predecessors
successors: A list of lists which contain the state indices of the respective successors
Note: use of predecessors and successors parameter is mutually exclusive!
For the meaning of the other parameters, please consult the paper or the code
"""
oldcount = self.numStates
#parameters:
self.numStates = numStates
if alphaTime is None: #backwards compatibility: if no alphatime is provided, use dt-dependent alpha value
self.alphaTime = self._sanitizeParam(alpha)/dt
else:
self.alphaTime = self._sanitizeParam(alphaTime)
self.beta = self._sanitizeParam(beta)
self.betaInv = 1.0/self.beta #this is used often, so precompute once
self.nu = self._sanitizeParam(nu)
self.nu_term = self.nu/(1 + self.nu) #equations usually use this term - precompute it
self.epsilon = self._sanitizeParam(epsilon) * self.beta #Wiener process noise
self.epsilonLambda=0.01 #regularization parameter of activation function
self.maxGreediness=10.0 #maximum factor to allow for increasing decisiveness (mainly to guard against input errors)
self.reuseNoiseSampleTimes = reuseNoiseSampleTimes
self.stateVectorExponent =stateVectorExponent
self.speedLimit = speedLimit
if initialState >= self.numStates:
raise ValueError()
self.initialState = initialState
if predecessors is not None: #convert list of predecessors into list of successors
self.successors = self._predecessorListToSuccessorList(predecessors)
else:
self.successors = successors
self.updateDt(dt) #also calls self._updateRho
self.nonlinearityParamsLambda = _KumaraswamyCDFParameters[nonlinearityLambda] #nonlinearity for sparsifying activation values
self.nonlinearityParamsPsi = _KumaraswamyCDFParameters[nonlinearityPsi] #nonlinearity that linearizes phase variables
#inputs:
self.BiasMatrix = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.BiasMatrixDesired = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.emulateHybridAutomaton = emulateHybridAutomaton #set this to true to emulate discrete switching behavior on bias input
self.triggervalue_successors = _np.zeros((self.numStates))
self.phasesInput = _np.zeros((self.numStates,self.numStates)) #input to synchronize state transitions (slower/faster)
self.velocityAdjustmentGain = _np.zeros((self.numStates,self.numStates)) #gain of the control enslaving the given state transition
self.phaseVelocityExponentInput = _np.zeros((self.numStates,self.numStates)) #contains values that limit transition velocity
self.stateConnectivityGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust transition greediness
self.stateConnectivityCompetingGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust competing transition greediness
self.stateConnectivityGreedinessTransitions = _np.zeros((self.numStates,self.numStates))
self.stateConnectivityGreedinessCompetingSuccessors = _np.zeros((self.numStates,self.numStates))
self.inputfilterK = dt / max(dt , inputFilterTimeConstant) #how much inputs should be low-passed (to avoid sudden changes in phasta state)
#internal data structures
if self.numStates != oldcount or reset: #force a reset if number of states change
self.statevector = _np.zeros((numStates))
self.dotstatevector = _np.zeros((numStates))
self.statevector[self.initialState] = self.beta[self.initialState] #start at a state
self.phasesActivation = _np.zeros((self.numStates,self.numStates))
self.phasesProgress = _np.zeros((self.numStates,self.numStates))
self.phasesProgressVelocities = _np.zeros((self.numStates,self.numStates))
self.biases = _np.zeros((self.numStates, self.numStates))
self.noise_velocity = 0.0
self.noiseValidCounter = 0
#these data structures are used to save the history of the system:
if recordSteps< 0:
pass
elif recordSteps == 0:
self.statehistorylen = 0
self.historyIndex = 0
else:
self.statehistorylen = recordSteps
self.statehistory = _np.empty((self.statehistorylen, self.numStates+1))
self.statehistory.fill(_np.nan)
self.phasesActivationHistory= _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.phasesProgressHistory = _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.historyIndex = 0
def _updateRho(self):
"""
internal method to compute the P matrix from preset parameters
also computes the state connectivity matrix
reimplements the computation by the SHCtoolbox code
"""
stateConnectivityAbs = _np.zeros((self.numStates, self.numStates))
stateConnectivitySignMap =_np.tri(self.numStates, self.numStates, k=0) - _np.tri(self.numStates, self.numStates, k=-1).T
for state, successorsPerState in enumerate(self.successors):
#precedecessorcount = len(predecessorsPerState)
for successor in successorsPerState:
if state == successor: raise ValueError("Cannot set a state ({0}) as successor of itself!".format(state))
stateConnectivityAbs[successor,state] = 1
stateConnectivitySignMap[successor,state] = 1
stateConnectivitySignMap[state, successor] = -1
self.stateConnectivityAbs = stateConnectivityAbs
self.stateConnectivitySignMap = stateConnectivitySignMap
#precompute some things:
self.stateConnectivityIsBidirectional = _np.sqrt(self.stateConnectivityAbs * self.stateConnectivityAbs.T)
self.stateConnectivityNrEdges = stateConnectivityAbs + stateConnectivityAbs.T
self.stateConnectivity = self.stateConnectivityAbs
#compute a matrix that has ones for states that have a common predecessor, i.e. pairs of states which compete (except for self-competition)
self.connectivitySigned = self.stateConnectivitySignMap*self.stateConnectivityAbs
self.competingStates = _np.dot(self.stateConnectivityAbs, self.stateConnectivityAbs.T) * (1-_np.eye(self.numStates))
#first, fill in the standard values in rhoZero
# rhoZero = beta^-1 x alpha * (1 - I + alpha^-1 x alpha)
alphaInv = 1/self.alpha
s = _np.dot(self.alpha[:,_np.newaxis],self.betaInv[_np.newaxis,:])
rhoZero = s * (_np.eye(self.numStates) - 1 - _np.dot(self.alpha[:,_np.newaxis],alphaInv[_np.newaxis,:]))
#then fill the rhoDelta:
rhoDelta = (self.alpha[:,_np.newaxis]*self.betaInv[_np.newaxis,:] / self.nu_term[:,_np.newaxis])
self.rhoZero = rhoZero
self.rhoDelta = rhoDelta
successorCountInv = 1.0/_np.maximum(_np.sum(self.stateConnectivityAbs, axis=0)[_np.newaxis,:],1.0)
self.BiasMeanBalancingWeights = self.stateConnectivityAbs * successorCountInv
def step(self, until=None, period=None, nr_steps=1):
"""
Main algorithm, implementing the integration step, state space decomposition, phase control and velocity adjustment.
period: give a period to simulate
until: give a time until to simulate
nr_steps: give the number of steps to simulate at self.dt
If more than one argument is given, then precedence is: until > period > nr_steps
"""
if until is not None:
period = until - self.t
if period < 0.0:
raise RuntimeError("argument until is in the past")
#if a period is given, iterate until we finished that period:
if period is not None:
nr_steps = int(period // self.dt)
for i in range(nr_steps):
#execute a single step:
self.t = self.t + self.dt #advance time
self.noiseValidCounter = self.noiseValidCounter - 1
if self.noiseValidCounter <= 0: #do not sample every timestep as the dynamical system cannot react that fast anyway. Effectively low-pass-filters the noise.
self.noise_velocity = _np.random.normal(scale = self.epsilonPerSample, size=self.numStates) #sample a discretized wiener process noise
self.noiseValidCounter = self.reuseNoiseSampleTimes
#low-pass filter input to avoid sudden jumps in velocity
self.BiasMatrix += self.inputfilterK * (self.BiasMatrixDesired-self.BiasMatrix)
self.stateConnectivityGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessTransitions - self.stateConnectivityGreedinessAdjustment)
self.stateConnectivityCompetingGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessCompetingSuccessors -self.stateConnectivityCompetingGreedinessAdjustment)
_step( #arrays modified in-place:
self.statevector,
self.dotstatevector,
self.phasesActivation,
self.phasesProgress,
self.phasesProgressVelocities,
#inputs
self.phaseVelocityExponentInput,
self.BiasMatrix,
self.stateConnectivityGreedinessAdjustment,
self.stateConnectivityCompetingGreedinessAdjustment,
self.phasesInput,
self.velocityAdjustmentGain,
self.noise_velocity,
#parameters
self.numStates,
self.betaInv ,
self.stateConnectivityAbs,
self.stateConnectivitySignMap,
self.stateConnectivityIsBidirectional,
self.stateConnectivityNrEdges,
self.rhoZero,
self.rhoDelta,
self.alpha,
self.dt,
self.dtInv,
self.nonlinearityParamsLambda,
self.nonlinearityParamsPsi,
self.stateVectorExponent,
self.speedLimit,
self.epsilonLambda,
self.emulateHybridAutomaton,
self.triggervalue_successors
)
#note the currently most active state/transition (for informative purposes)
i = _np.argmax(self.phasesActivation)
self.currentPredecessor = i % self.numStates
self.currentSuccessor = i // self.numStates
self._recordState()
return self.statevector
def get1DState(self):
"""
return value of a one-dimensional signal that indicates which state we are in, or in which transition
"""
value = self.currentPredecessor + (self.currentSuccessor - self.currentPredecessor) * self.phasesProgress[self.currentSuccessor,self.currentPredecessor]
return value
def sayState(self):
"""
returns a string describing the current state
"""
if self.currentPredecessor == self.currentSuccessor:
return "{0}".format(self.currentPredecessor )
else:
return "{0}->{1}".format(self.currentPredecessor , self.currentSuccessor)
def updateDt(self, dt):
"""
upadate the time step used to integrate the dynamical system:
"""
self.dt = dt
self.dtInv = 1.0 / dt
self.epsilonPerSample = self.epsilon *_np.sqrt(self.dt*self.reuseNoiseSampleTimes)/dt #factor accounts for the accumulation during a time step (assuming a Wiener process)
self.alpha = self.alphaTime * self.dt
self._updateRho()
def updateEpsilon(self, epsilon):
"""
Update the noise vector
"""
self.epsilon = epsilon
self.updateDt(self.dt) #need to recompute self.epsilonPerSample
def updateSuccessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors=listoflist
self._updateRho()
def updateGreediness(self, greedinesses):
"""
update the greediness for competing transitions / successor states
Low values make the system maintain co-activated transitions for a long time, high values make transitions very competitive.
0.0: complete indecisiveness (transitions do not compete at all and may not converge towards an exclusive successor state)
1.0: behavior of the original SHC network by [1]
20.0: extremely greedy transitions, behaves much like a discrete state machine
negative values: abort transition and return to the predecessor state
Absolute values less than 1.0 also reduce speed of transitions, 0.0 stops transitions completely.
This value is considered during a transition away from the predecessor state,
i.e. it influences the transition dynamics while honoring the basic state connectivity
greediness: vector of size self.numStates or matrix of size (numStates,numStates)
scalar: set a common greediness value for all competing transitions
vector: greediness values for all competing transitions leading to the related successor state
matrix: set greediness value for each competing transition individually
"""
greedinesses = _np.asarray(greedinesses)
if greedinesses.ndim == 1:
greedinesses = greedinesses[_np.newaxis,:]
elif greedinesses.ndim == 0:
greedinesses = _np.full((1, self.numStates),greedinesses)
#adjust the strength / reverse direction of the outgoing shc's according to greedinesses:
greediness_successorstates = _np.clip((0.5*greedinesses-0.5), -1.0, 0.0) # _np.clip(g, -self.nu_term, 0.0)
strength = self.stateConnectivityAbs * greediness_successorstates.T #works for (1,-1) transition pairs too
self.stateConnectivityGreedinessTransitions = strength + strength.T
#Adjust competition between nodes according to their greediness:
kappa=0.
# self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-(1.+kappa)*greedinesses+kappa*greedinesses.T)
self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-greedinesses)
def updateCompetingTransitionGreediness(self,greedinesses):
_warnings.warn("Please replace updateCompetingTransitionGreediness with updateGreediness asap!", DeprecationWarning, stacklevel=2)
self.updateGreediness(greedinesses)
def _predecessorListToSuccessorList(self, predecessors):
""" helper to convert lists of predecessor states into lists of successor states"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updatePredecessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors = self._predecessorListToSuccessorList(predecessors)
self._updateRho()
def getPredecessors(self):
"""
return the predecessors
"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updateBiases(self, successorBias):
"""
changes the "bias" input array
Small values bias the system to hasten transitions towards that state
Large, short spikes can be used to override any state and force the system into any state,
regardless of state connectivity
successorBias: numpy array of biases for each (successor state biased towards, current state) pair
if scalar: set all successor biases to the same value
if vector: set successor biases to the given vector for every state
if matrix: set each (successor state, current state) pair individually
"""
bias = _np.asarray(successorBias)
if bias.ndim == 1:
self.BiasMatrixDesired[:,:] = (self.stateConnectivity) * bias[:,_np.newaxis]
else:
self.BiasMatrixDesired[:,:] = bias
def updateB(self, successorBias):
_warnings.warn("Please replace updateB() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updateTransitionTriggerInput(self, successorBias):
_warnings.warn("Please replace updateTransitionTriggerInput() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updatePhasesInput(self, phases):
"""
changes the Psi_d matrix
Use this as phase reference to sync the system with a phase from perception
"""
_np.copyto(self.phasesInput, phases)
def updateVelocityEnslavementGain(self, gains):
"""
changes the K_p matrix
Set the gain values to use for each phase transition.
"""
_np.copyto(self.velocityAdjustmentGain, gains)
def updateTransitionPhaseVelocityExponentInput(self, limits):
"""
Update the matrix that specifies how fast the given phases should progress
Each element effectively is an exponent with base 2 for adjusting each phase velocity individually
limits[j,i]: exponent for the transition from i to j
limits[i,i]: 0 (enforced implicitly)
if limits is a vector: treat it as common exponent for transitions of the same predecessor state
if limits is a scalar: set as common exponent for all transitions
While phase velocity can also be controlled by the self.alpha vector directly,
large variations to individual states' alpha parameter can alter the
convergence behavior and we may lose the stable heteroclinic channel properties
This method here effectly "scales" the timeline during transitions
"""
limits = _np.asarray(limits)
if limits.ndim == 1:
limits = limits[_np.newaxis,:]
elif limits.ndim == 0:
limits = limits[_np.newaxis,_np.newaxis]
self.phaseVelocityExponentInput[:,:] = limits
#_np.fill_diagonal(self.phaseVelocityExponentInput , 0.0)
def getHistory(self):
"""
return the historic values for plotting
"""
if self.statehistorylen == 0:
raise RuntimeError("no history is being recorded")
return (self.statehistory[:self.historyIndex,:],
self.phasesActivationHistory[:self.historyIndex,:,:],
self.phasesProgressHistory[:self.historyIndex,:,:]
)
def _sanitizeParam(self, p):
"""
internal helper to provide robust handling of lists and numpy array input data
"""
if _np.isscalar(p):
sanitizedP = _np.empty((self.numStates))
sanitizedP.fill(float(p))
else:
try:
p = p[0:self.numStates]
except IndexError:
raise Exception("Parameter has not the length of numStates!")
sanitizedP = _np.array(p)
return sanitizedP
def _recordState(self):
"""
internal helper to save the current state for later plotting
"""
if self.historyIndex < self.statehistorylen:
self.statehistory[self.historyIndex, 0] = self.t
self.statehistory[self.historyIndex, 1:self.numStates+1] = self.statevector
self.phasesActivationHistory[self.historyIndex, :,:] = self.phasesActivation
self.phasesProgressHistory[self.historyIndex, :,:] = self.phasesProgress
if self.historyIndex < self.statehistorylen:
self.historyIndex = self.historyIndex + 1
|
[
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.clip",
"numpy.random.normal",
"numpy.full",
"numpy.tri",
"numpy.minimum",
"numpy.asarray",
"numpy.dot",
"numpy.copyto",
"numpy.outer",
"numpy.isscalar",
"numpy.zeros",
"numpy.any",
"numba.jit",
"numpy.array",
"numpy.eye",
"warnings.warn",
"numpy.sqrt"
] |
[((271, 301), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (274, 301), False, 'from numba import jit\n'), ((692, 722), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (695, 722), False, 'from numba import jit\n'), ((767, 797), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (770, 797), False, 'from numba import jit\n'), ((1075, 1105), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (1078, 1105), False, 'from numba import jit\n'), ((4858, 4878), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (4867, 4878), True, 'import numpy as _np\n'), ((5017, 5037), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (5026, 5037), True, 'import numpy as _np\n'), ((5234, 5254), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (5241, 5254), True, 'import numpy as _np\n'), ((7001, 7034), 'numpy.outer', '_np.outer', (['statesigns', 'statesigns'], {}), '(statesigns, statesigns)\n', (7010, 7034), True, 'import numpy as _np\n'), ((8704, 8762), 'numpy.minimum', '_np.minimum', (['(1.0)', '(speedLimit / (1e-08 + dotstatevector_L2))'], {}), '(1.0, speedLimit / (1e-08 + dotstatevector_L2))\n', (8715, 8762), True, 'import numpy as _np\n'), ((9097, 9117), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (9104, 9117), True, 'import numpy as _np\n'), ((9237, 9247), 'numpy.sum', '_np.sum', (['S'], {}), '(S)\n', (9244, 9247), True, 'import numpy as _np\n'), ((9272, 9283), 'numpy.sum', '_np.sum', (['S2'], {}), '(S2)\n', (9279, 9283), True, 'import numpy as _np\n'), ((4584, 4638), 'numpy.sum', '_np.sum', (['(scaledactivation * phaseVelocityExponentInput)'], {}), '(scaledactivation * phaseVelocityExponentInput)\n', (4591, 4638), True, 'import numpy as _np\n'), ((6023, 6043), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (6032, 6043), True, 'import numpy as _np\n'), ((6089, 6134), 'numpy.any', '_np.any', (['(triggervalue_successors >= threshold)'], {}), '(triggervalue_successors >= threshold)\n', (6096, 6134), True, 'import numpy as _np\n'), ((8425, 8457), 'numpy.dot', '_np.dot', (['(rhoDelta * T_G)', 'x_gamma'], {}), '(rhoDelta * T_G, x_gamma)\n', (8432, 8457), True, 'import numpy as _np\n'), ((8645, 8673), 'numpy.sum', '_np.sum', (['(dotstatevector ** 2)'], {}), '(dotstatevector ** 2)\n', (8652, 8673), True, 'import numpy as _np\n'), ((10093, 10104), 'numpy.sum', '_np.sum', (['S2'], {}), '(S2)\n', (10100, 10104), True, 'import numpy as _np\n'), ((16425, 16468), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16434, 16468), True, 'import numpy as _np\n'), ((16563, 16606), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16572, 16606), True, 'import numpy as _np\n'), ((16840, 16865), 'numpy.zeros', '_np.zeros', (['self.numStates'], {}), '(self.numStates)\n', (16849, 16865), True, 'import numpy as _np\n'), ((16904, 16947), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16913, 16947), True, 'import numpy as _np\n'), ((17041, 17084), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17050, 17084), True, 'import numpy as _np\n'), ((17185, 17228), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17194, 17228), True, 'import numpy as _np\n'), ((17330, 17373), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17339, 17373), True, 'import numpy as _np\n'), ((17486, 17529), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17495, 17529), True, 'import numpy as _np\n'), ((17644, 17687), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17653, 17687), True, 'import numpy as _np\n'), ((17749, 17792), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17758, 17792), True, 'import numpy as _np\n'), ((19669, 19712), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (19678, 19712), True, 'import numpy as _np\n'), ((20531, 20596), 'numpy.sqrt', '_np.sqrt', (['(self.stateConnectivityAbs * self.stateConnectivityAbs.T)'], {}), '(self.stateConnectivityAbs * self.stateConnectivityAbs.T)\n', (20539, 20596), True, 'import numpy as _np\n'), ((21289, 21354), 'numpy.dot', '_np.dot', (['self.alpha[:, _np.newaxis]', 'self.betaInv[_np.newaxis, :]'], {}), '(self.alpha[:, _np.newaxis], self.betaInv[_np.newaxis, :])\n', (21296, 21354), True, 'import numpy as _np\n'), ((25665, 25698), 'numpy.argmax', '_np.argmax', (['self.phasesActivation'], {}), '(self.phasesActivation)\n', (25675, 25698), True, 'import numpy as _np\n'), ((28762, 28787), 'numpy.asarray', '_np.asarray', (['greedinesses'], {}), '(greedinesses)\n', (28773, 28787), True, 'import numpy as _np\n'), ((29129, 29174), 'numpy.clip', '_np.clip', (['(0.5 * greedinesses - 0.5)', '(-1.0)', '(0.0)'], {}), '(0.5 * greedinesses - 0.5, -1.0, 0.0)\n', (29137, 29174), True, 'import numpy as _np\n'), ((29817, 29957), 'warnings.warn', '_warnings.warn', (['"""Please replace updateCompetingTransitionGreediness with updateGreediness asap!"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Please replace updateCompetingTransitionGreediness with updateGreediness asap!'\n , DeprecationWarning, stacklevel=2)\n", (29831, 29957), True, 'import warnings as _warnings\n'), ((31742, 31768), 'numpy.asarray', '_np.asarray', (['successorBias'], {}), '(successorBias)\n', (31753, 31768), True, 'import numpy as _np\n'), ((32001, 32087), 'warnings.warn', '_warnings.warn', (['"""Please replace updateB() with updateBiases() asap!"""'], {'stacklevel': '(2)'}), "('Please replace updateB() with updateBiases() asap!',\n stacklevel=2)\n", (32015, 32087), True, 'import warnings as _warnings\n'), ((32192, 32304), 'warnings.warn', '_warnings.warn', (['"""Please replace updateTransitionTriggerInput() with updateBiases() asap!"""'], {'stacklevel': '(2)'}), "(\n 'Please replace updateTransitionTriggerInput() with updateBiases() asap!',\n stacklevel=2)\n", (32206, 32304), True, 'import warnings as _warnings\n'), ((32544, 32580), 'numpy.copyto', '_np.copyto', (['self.phasesInput', 'phases'], {}), '(self.phasesInput, phases)\n', (32554, 32580), True, 'import numpy as _np\n'), ((32776, 32822), 'numpy.copyto', '_np.copyto', (['self.velocityAdjustmentGain', 'gains'], {}), '(self.velocityAdjustmentGain, gains)\n', (32786, 32822), True, 'import numpy as _np\n'), ((33788, 33807), 'numpy.asarray', '_np.asarray', (['limits'], {}), '(limits)\n', (33799, 33807), True, 'import numpy as _np\n'), ((34650, 34665), 'numpy.isscalar', '_np.isscalar', (['p'], {}), '(p)\n', (34662, 34665), True, 'import numpy as _np\n'), ((828, 838), 'numpy.abs', '_np.abs', (['x'], {}), '(x)\n', (835, 838), True, 'import numpy as _np\n'), ((5400, 5447), 'numpy.outer', '_np.outer', (['(1 - statevector_abs)', 'statevector_abs'], {}), '(1 - statevector_abs, statevector_abs)\n', (5409, 5447), True, 'import numpy as _np\n'), ((5798, 5841), 'numpy.dot', '_np.dot', (['stateConnectivityAbs', 'predecessors'], {}), '(stateConnectivityAbs, predecessors)\n', (5805, 5841), True, 'import numpy as _np\n'), ((5881, 5924), 'numpy.dot', '_np.dot', (['stateConnectivityAbs', 'predecessors'], {}), '(stateConnectivityAbs, predecessors)\n', (5888, 5924), True, 'import numpy as _np\n'), ((6171, 6206), 'numpy.argmax', '_np.argmax', (['triggervalue_successors'], {}), '(triggervalue_successors)\n', (6181, 6206), True, 'import numpy as _np\n'), ((8397, 8422), 'numpy.dot', '_np.dot', (['rhoZero', 'x_gamma'], {}), '(rhoZero, x_gamma)\n', (8404, 8422), True, 'import numpy as _np\n'), ((10025, 10050), 'numpy.sum', '_np.sum', (['activationMatrix'], {}), '(activationMatrix)\n', (10032, 10050), True, 'import numpy as _np\n'), ((18105, 18125), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (18114, 18125), True, 'import numpy as _np\n'), ((18162, 18182), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (18171, 18182), True, 'import numpy as _np\n'), ((18319, 18362), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18328, 18362), True, 'import numpy as _np\n'), ((18396, 18439), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18405, 18439), True, 'import numpy as _np\n'), ((18483, 18526), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18492, 18526), True, 'import numpy as _np\n'), ((18552, 18595), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18561, 18595), True, 'import numpy as _np\n'), ((19747, 19791), 'numpy.tri', '_np.tri', (['self.numStates', 'self.numStates'], {'k': '(0)'}), '(self.numStates, self.numStates, k=0)\n', (19754, 19791), True, 'import numpy as _np\n'), ((21022, 21085), 'numpy.dot', '_np.dot', (['self.stateConnectivityAbs', 'self.stateConnectivityAbs.T'], {}), '(self.stateConnectivityAbs, self.stateConnectivityAbs.T)\n', (21029, 21085), True, 'import numpy as _np\n'), ((34692, 34717), 'numpy.empty', '_np.empty', (['self.numStates'], {}), '(self.numStates)\n', (34701, 34717), True, 'import numpy as _np\n'), ((34963, 34975), 'numpy.array', '_np.array', (['p'], {}), '(p)\n', (34972, 34975), True, 'import numpy as _np\n'), ((4539, 4564), 'numpy.sum', '_np.sum', (['activationMatrix'], {}), '(activationMatrix)\n', (4546, 4564), True, 'import numpy as _np\n'), ((19794, 19839), 'numpy.tri', '_np.tri', (['self.numStates', 'self.numStates'], {'k': '(-1)'}), '(self.numStates, self.numStates, k=-1)\n', (19801, 19839), True, 'import numpy as _np\n'), ((21091, 21114), 'numpy.eye', '_np.eye', (['self.numStates'], {}), '(self.numStates)\n', (21098, 21114), True, 'import numpy as _np\n'), ((21405, 21466), 'numpy.dot', '_np.dot', (['self.alpha[:, _np.newaxis]', 'alphaInv[_np.newaxis, :]'], {}), '(self.alpha[:, _np.newaxis], alphaInv[_np.newaxis, :])\n', (21412, 21466), True, 'import numpy as _np\n'), ((23217, 23284), 'numpy.random.normal', '_np.random.normal', ([], {'scale': 'self.epsilonPerSample', 'size': 'self.numStates'}), '(scale=self.epsilonPerSample, size=self.numStates)\n', (23234, 23284), True, 'import numpy as _np\n'), ((26776, 26822), 'numpy.sqrt', '_np.sqrt', (['(self.dt * self.reuseNoiseSampleTimes)'], {}), '(self.dt * self.reuseNoiseSampleTimes)\n', (26784, 26822), True, 'import numpy as _np\n'), ((28942, 28985), 'numpy.full', '_np.full', (['(1, self.numStates)', 'greedinesses'], {}), '((1, self.numStates), greedinesses)\n', (28950, 28985), True, 'import numpy as _np\n'), ((5734, 5754), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (5741, 5754), True, 'import numpy as _np\n'), ((9398, 9433), 'numpy.outer', '_np.outer', (['statevector', 'statevector'], {}), '(statevector, statevector)\n', (9407, 9433), True, 'import numpy as _np\n'), ((19023, 19076), 'numpy.empty', '_np.empty', (['(self.statehistorylen, self.numStates + 1)'], {}), '((self.statehistorylen, self.numStates + 1))\n', (19032, 19076), True, 'import numpy as _np\n'), ((19169, 19234), 'numpy.zeros', '_np.zeros', (['(self.statehistorylen, self.numStates, self.numStates)'], {}), '((self.statehistorylen, self.numStates, self.numStates))\n', (19178, 19234), True, 'import numpy as _np\n'), ((19279, 19344), 'numpy.zeros', '_np.zeros', (['(self.statehistorylen, self.numStates, self.numStates)'], {}), '((self.statehistorylen, self.numStates, self.numStates))\n', (19288, 19344), True, 'import numpy as _np\n'), ((21375, 21398), 'numpy.eye', '_np.eye', (['self.numStates'], {}), '(self.numStates)\n', (21382, 21398), True, 'import numpy as _np\n'), ((21729, 21771), 'numpy.sum', '_np.sum', (['self.stateConnectivityAbs'], {'axis': '(0)'}), '(self.stateConnectivityAbs, axis=0)\n', (21736, 21771), True, 'import numpy as _np\n')]
|
import pygame
import random
pygame.init()
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
SCORE_MAX = 10
tn = [1, 2, 3, 4, 5]
size = (1280, 720)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("MyPong - PyGame Edition - 2021.01.30")
# score text
score_font = pygame.font.Font('C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 44)
score_text = score_font.render('00 x 00', True, COLOR_WHITE, COLOR_BLACK)
score_text_rect = score_text.get_rect()
score_text_rect.center = (680, 50)
# victory text
victory_font = pygame.font.Font('C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 100)
victory_text = victory_font .render('VICTORY', True, COLOR_WHITE, COLOR_BLACK)
victory_text_rect = score_text.get_rect()
victory_text_rect.center = (450, 350)
# sound effects
bounce_sound_effect = pygame.mixer.Sound('C:/Users/Pichau/Documents/stem-games/mypong2/assets/bounce.wav')
scoring_sound_effect = pygame.mixer.Sound('C:/Users/Pichau/Documents/stem-games/mypong2/assets'
'/258020__kodack__arcade-bleep-sound.wav')
# player 1
player_1 = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_Player.png")
player_1_y = 290
player_1_move_up = False
player_1_move_down = False
# player 2 - robot
player_2 = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_AI.png")
player_2_y = 290
# ball
ball = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_ball.png")
ball_x = 640
ball_y = 360
ball_dx = 3
ball_dy = 3
# score
score_1 = 0
score_2 = 0
game_loop = True
game_clock = pygame.time.Clock()
# game loop
while game_loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_loop = False
# keystroke events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player_1_move_up = True
if event.key == pygame.K_DOWN:
player_1_move_down = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
player_1_move_up = False
if event.key == pygame.K_DOWN:
player_1_move_down = False
# checking the victory condition
if score_1 < SCORE_MAX and score_2 < SCORE_MAX:
# clear screen
screen.fill(COLOR_BLACK)
# ball movement
ball_x = ball_x + ball_dx
ball_y = ball_y + ball_dy
# ball collision with the wall
if ball_y > 700:
ball_dy *= -1
bounce_sound_effect.play()
elif ball_y <= 0:
ball_dy *= -1
bounce_sound_effect.play()
# ball collision with the player 1 's paddle
if (ball_x == 100) and (player_1_y < ball_y + 25) and (player_1_y + 150 > ball_y):
ball_dx *= -1
ball_dy = random.randrange(-15, 16)
bounce_sound_effect.play()
print(ball_dy)
# ball collision with the player 2 's paddle
if (ball_x > 1140) and (player_2_y < ball_y + 25) and (player_2_y + 150 > ball_y):
ball_dx *= -1
ball_dy = random.randrange(-15, 16)
bounce_sound_effect.play()
print(ball_dy)
# scoring points
if ball_x < -50:
ball_x = 640
ball_y = 360
ball_dy *= -1
ball_dx *= -1
score_2 += 1
scoring_sound_effect.play()
elif ball_x > 1280:
ball_x = 640
ball_y = 360
ball_dy *= -1
ball_dx *= -1
score_1 += 1
scoring_sound_effect.play()
# player 1 movement
if player_1_move_up:
player_1_y -= 10
else:
player_1_y += 0
if player_1_move_down:
player_1_y += 10
else:
player_1_y += 0
if player_1_y <= 0:
player_1_y = 0
elif player_1_y >= 570:
player_1_y = 570
# player 2 "Artificial Intelligence"
# player_2_y = ball_y
if (ball_y < 0) or (ball_y < player_2_y+20) and random.randrange(31) == 10:
player_2_y = ball_y-30
if (ball_y > 0) or (ball_y > player_2_y-20) and random.randrange(31) == 10:
player_2_y = ball_y+30
if player_2_y <= 0:
player_2_y = 0
elif player_2_y >= 570:
player_2_y = 570
score_text = score_font.render(str(score_1) + ' x ' + str(score_2), True, COLOR_WHITE, COLOR_BLACK)
# drawing objects
screen.blit(ball, (ball_x, ball_y))
screen.blit(player_1, (60, player_1_y))
screen.blit(player_2, (1180, player_2_y))
screen.blit(score_text, score_text_rect)
else:
# drawing victory
screen.fill(COLOR_BLACK)
screen.blit(score_text, score_text_rect)
screen.blit(victory_text, victory_text_rect)
# update screen
pygame.display.flip()
game_clock.tick(60)
pygame.quit()
|
[
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.flip",
"random.randrange",
"pygame.font.Font",
"pygame.image.load",
"pygame.display.set_caption",
"pygame.time.Clock",
"pygame.mixer.Sound"
] |
[((29, 42), 'pygame.init', 'pygame.init', ([], {}), '()\n', (40, 42), False, 'import pygame\n'), ((165, 194), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (188, 194), False, 'import pygame\n'), ((195, 261), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""MyPong - PyGame Edition - 2021.01.30"""'], {}), "('MyPong - PyGame Edition - 2021.01.30')\n", (221, 261), False, 'import pygame\n'), ((289, 386), 'pygame.font.Font', 'pygame.font.Font', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf"""', '(44)'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 44)\n", (305, 386), False, 'import pygame\n'), ((562, 665), 'pygame.font.Font', 'pygame.font.Font', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf"""', '(100)'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 100\n )\n", (578, 665), False, 'import pygame\n'), ((854, 943), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/bounce.wav"""'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/bounce.wav')\n", (872, 943), False, 'import pygame\n'), ((962, 1084), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/258020__kodack__arcade-bleep-sound.wav"""'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/258020__kodack__arcade-bleep-sound.wav'\n )\n", (980, 1084), False, 'import pygame\n'), ((1143, 1260), 'pygame.image.load', 'pygame.image.load', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_Player.png"""'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_Player.png'\n )\n", (1160, 1260), False, 'import pygame\n'), ((1351, 1464), 'pygame.image.load', 'pygame.image.load', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_AI.png"""'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_AI.png'\n )\n", (1368, 1464), False, 'import pygame\n'), ((1487, 1595), 'pygame.image.load', 'pygame.image.load', (['"""C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_ball.png"""'], {}), "(\n 'C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_ball.png'\n )\n", (1504, 1595), False, 'import pygame\n'), ((1700, 1719), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1717, 1719), False, 'import pygame\n'), ((5093, 5106), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (5104, 5106), False, 'import pygame\n'), ((1768, 1786), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1784, 1786), False, 'import pygame\n'), ((5046, 5067), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5065, 5067), False, 'import pygame\n'), ((2953, 2978), 'random.randrange', 'random.randrange', (['(-15)', '(16)'], {}), '(-15, 16)\n', (2969, 2978), False, 'import random\n'), ((3238, 3263), 'random.randrange', 'random.randrange', (['(-15)', '(16)'], {}), '(-15, 16)\n', (3254, 3263), False, 'import random\n'), ((4225, 4245), 'random.randrange', 'random.randrange', (['(31)'], {}), '(31)\n', (4241, 4245), False, 'import random\n'), ((4344, 4364), 'random.randrange', 'random.randrange', (['(31)'], {}), '(31)\n', (4360, 4364), False, 'import random\n')]
|
import os
AWS_REGION = os.environ.get('AWS_REGION')
BUCKET = ""
CACHE_MAX_AGE = 3600
DEFAULT_QUALITY_RATE = 80
LOSSY_IMAGE_FMTS = ('jpg', 'jpeg', 'webp')
|
[
"os.environ.get"
] |
[((24, 52), 'os.environ.get', 'os.environ.get', (['"""AWS_REGION"""'], {}), "('AWS_REGION')\n", (38, 52), False, 'import os\n')]
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from collections import OrderedDict
import paddle.trainer.config_parser as cp
import struct
import tarfile
import cStringIO
from topology import Topology
__all__ = ['Parameters', 'create']
def create(layers):
"""
Create parameter pool by topology.
:param layers:
:return:
"""
topology = Topology(layers)
pool = Parameters()
initializers = cp.g_parameter_initializer_map
for param in topology.proto().parameters:
pool.__append_config__(param)
if param.name in initializers:
pool[param.name] = initializers[param.name](param.name)
return pool
class Parameters(object):
"""
`Parameters` manages all the learnable parameters in a neural network.
It stores parameters' information in an OrderedDict. The key is
the name of a parameter, and value is a parameter's configuration(in
protobuf format), such as initialization mean and std, its size, whether it
is a static parameter, and so on.
:param __param_conf__: store the configurations of learnable parameters in
the network in an OrderedDict. Parameter is added one by one into the
dict by following their created order in the network: parameters of
the previous layers in a network are careted first. You can visit the
parameters from bottom to top by iterating over this dict.
:type __param_conf__: OrderedDict
:param __gradient_machines__: all of the parameters in a neural network are
appended to a PaddlePaddle gradient machine, which is used internally to
copy parameter values between C++ and Python end.
:type __gradient_machines__: list
:param __tmp_params__: a dict to store dummy parameters if no
__gradient_machines__ is appended to `Parameters`.
:type __tmp_params__: dict
Basically usage is
.. code-block:: python
data = paddle.layers.data(...)
...
out = paddle.layers.fc(...)
parameters = paddle.parameters.create(out)
parameter_names = parameters.names()
fc_mat = parameters.get('fc')
print fc_mat
"""
def __init__(self):
self.__param_conf__ = OrderedDict()
self.__gradient_machines__ = []
self.__tmp_params__ = dict()
def __append_config__(self, param_conf):
"""
Append a parameter configuration. It used to initialize Parameters and
should be invoked only in paddle.parameters.create
:param param_conf: The parameter configuration in protobuf
:type param_conf: ParameterConfig
:return: Nothing
"""
if not isinstance(param_conf, ParameterConfig):
raise ValueError("param_conf must be paddle.proto.ParameterConfig")
if param_conf.name in self.__param_conf__:
raise ValueError("duplicated parameter %s" % param_conf.name)
self.__param_conf__[param_conf.name] = param_conf
def keys(self):
"""
keys are the names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.__param_conf__.keys()
def names(self):
"""
names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.keys()
def has_key(self, key):
"""
has_key return true if there are such parameter name == key
:param key: Parameter name
:type key: basestring
:return: True if contains such key
"""
return key in self.__param_conf__.keys()
def __iter__(self):
"""
Return an iterator of parameter name. It is used by `for loop`
or `in` operator.
.. code-block:: python
parameters = paddle.parameters.create(...)
if "fc_param" in parameters:
print 'OK'
:return: an iterator of parameter name
:rtype: iterator
"""
return iter(self.__param_conf__)
def __getter_inner(self, key, param_type):
import py_paddle.swig_paddle as api
shape = self.get_shape(key)
if len(self.__gradient_machines__) == 0:
# create new parameter in python numpy.
if key in self.__tmp_params__:
return self.__tmp_params__[key]
else:
return np.ndarray(shape=shape, dtype=np.float32)
else:
for each_gradient_machine in self.__gradient_machines__:
param = __get_parameter_in_gradient_machine__(
each_gradient_machine, key)
# for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter)
val = param.getBuf(param_type)
assert isinstance(val, api.Vector)
val = val.copyToNumpyArray()
return val
# else continue
raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key):
"""
get shape of the parameter.
:param key: parameter name
:type key: basestring
:return: parameter's shape
:rtype: tuple
"""
if not isinstance(key, basestring):
raise ValueError("parameter name should be string")
if not self.has_key(key):
raise ValueError("No such parameter %s" % key)
conf = self.__param_conf__[key]
dims = conf.dims if conf.dims else (1, conf.size)
return tuple(map(int, dims))
def __setitem__(self, key, value):
"""
Set parameter by parameter name & value. It use Python dict syntax.
:note: It will always copy the parameter to C++ side.
:param key: Parameter name
:type key: basestring
:param value: Parameter matrix.
:type value: np.ndarray
:return: Nothing
"""
if not isinstance(value, np.ndarray):
raise ValueError("Must return ndarray")
value = value.astype(dtype=np.float32)
shape = self.get_shape(key)
if value.shape != shape:
raise ValueError("Value shape mismatch, expect %s, should %s" %
(shape, value.shape))
if len(self.__gradient_machines__) == 0:
self.__tmp_params__[key] = value
else:
for each_gradient_machine in self.__gradient_machines__:
__copy_parameter_to_gradient_machine__(each_gradient_machine,
key, value)
def get(self, parameter_name):
"""
Get parameter by parameter name.
:note: It will always copy the parameter from C++ side.
:param parameter_name: parameter name
:type parameter_name: basestring
:return: The parameter matrix.
:rtype: np.ndarray
"""
return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
if self.__param_conf__[key].is_static:
return np.zeros(self.__param_conf__[key].size, dtype=np.float32)
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value):
"""
Set parameter by parameter name & matrix.
:param parameter_name: parameter name
:type parameter_name: basestring
:param value: parameter matrix
:type value: np.ndarray
:return: Nothing.
"""
self.__setitem__(key=parameter_name, value=value)
def append_gradient_machine(self, gradient_machine):
"""
append gradient machine to parameters. This method is used internally in
Trainer.train.
:param gradient_machine: PaddlePaddle C++ GradientMachine object.
:type gradient_machine: api.GradientMachine
:return:
"""
import py_paddle.swig_paddle as api
if not isinstance(gradient_machine, api.GradientMachine):
raise ValueError("gradient_machine should be api.GradientMachine")
if len(self.__tmp_params__) != 0:
for name, val in self.__tmp_params__.iteritems():
try:
__copy_parameter_to_gradient_machine__(gradient_machine,
name, val)
except ValueError:
# If no such parameter in gradient machine, then don't copy
pass
self.__gradient_machines__.append(gradient_machine)
def serialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
param = self.get(name)
size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32)
s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
f.read(16) # header
arr = np.frombuffer(f.read(), dtype=np.float32)
self.set(name, arr.reshape(self.get_shape(name)))
def to_tar(self, f):
tar = tarfile.TarFile(fileobj=f, mode='w')
for nm in self.names():
buf = cStringIO.StringIO()
self.serialize(nm, buf)
tarinfo = tarfile.TarInfo(name=nm)
buf.seek(0)
tarinfo.size = len(buf.getvalue())
tar.addfile(tarinfo, buf)
conf = self.__param_conf__[nm]
confStr = conf.SerializeToString()
tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
tarinfo.size = len(confStr)
buf = cStringIO.StringIO(confStr)
buf.seek(0)
tar.addfile(tarinfo, fileobj=buf)
@staticmethod
def from_tar(f):
"""
Create a `Parameters` object from the given file. And
the `Parameters` only contains the parameters in this
file. It is adapted the parameters are same in the
defined network and the given file. For example, it
can be used in the inference.
:param f: the initialized model file.
:type f: tar file
:return: A Parameters object.
:rtype: Parameters.
"""
params = Parameters()
tar = tarfile.TarFile(fileobj=f, mode='r')
for finfo in tar:
assert isinstance(finfo, tarfile.TarInfo)
if finfo.name.endswith('.protobuf'):
f = tar.extractfile(finfo)
conf = ParameterConfig()
conf.ParseFromString(f.read())
params.__append_config__(conf)
for param_name in params.names():
f = tar.extractfile(param_name)
params.deserialize(param_name, f)
return params
def init_from_tar(self, f):
"""
Different from `from_tar`, this interface can be used to
init partial network parameters from another saved model.
:param f: the initialized model file.
:type f: tar file
:return: Nothing.
"""
tar_param = Parameters.from_tar(f)
for pname in tar_param.names():
if pname in self.names():
self.set(pname, tar_param.get(pname))
def __get_parameter_in_gradient_machine__(gradient_machine, name):
"""
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:return:
:rtype: api.Parameter
"""
params = filter(lambda p: p.getName() == name,
gradient_machine.getParameters())
if len(params) == 0:
raise ValueError("No such parameter")
elif len(params) > 1:
raise ValueError("Unexpected branch")
else:
return params[0]
def __copy_parameter_to_gradient_machine__(gradient_machine, name, arr):
"""
Copy a python ndarray into the gradient machine.
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:param arr:
:type arr: np.ndarray
:return:
:rtype: api.Parameter
"""
import py_paddle.swig_paddle as api
param = __get_parameter_in_gradient_machine__(gradient_machine, name)
vec = param.getBuf(api.PARAMETER_VALUE)
assert isinstance(vec, api.Vector)
vec.copyFromNumpyArray(arr.flatten())
|
[
"topology.Topology",
"tarfile.TarFile",
"paddle.proto.ParameterConfig_pb2.ParameterConfig",
"numpy.zeros",
"tarfile.TarInfo",
"struct.pack",
"collections.OrderedDict",
"cStringIO.StringIO",
"numpy.ndarray"
] |
[((1005, 1021), 'topology.Topology', 'Topology', (['layers'], {}), '(layers)\n', (1013, 1021), False, 'from topology import Topology\n'), ((2864, 2877), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2875, 2877), False, 'from collections import OrderedDict\n'), ((10804, 10840), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'f', 'mode': '"""w"""'}), "(fileobj=f, mode='w')\n", (10819, 10840), False, 'import tarfile\n'), ((11942, 11978), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'f', 'mode': '"""r"""'}), "(fileobj=f, mode='r')\n", (11957, 11978), False, 'import tarfile\n'), ((8435, 8492), 'numpy.zeros', 'np.zeros', (['self.__param_conf__[key].size'], {'dtype': 'np.float32'}), '(self.__param_conf__[key].size, dtype=np.float32)\n', (8443, 8492), True, 'import numpy as np\n'), ((10151, 10181), 'struct.pack', 'struct.pack', (['"""IIQ"""', '(0)', '(4)', 'size'], {}), "('IIQ', 0, 4, size)\n", (10162, 10181), False, 'import struct\n'), ((10891, 10911), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (10909, 10911), False, 'import cStringIO\n'), ((10970, 10994), 'tarfile.TarInfo', 'tarfile.TarInfo', ([], {'name': 'nm'}), '(name=nm)\n', (10985, 10994), False, 'import tarfile\n'), ((11217, 11257), 'tarfile.TarInfo', 'tarfile.TarInfo', ([], {'name': "('%s.protobuf' % nm)"}), "(name='%s.protobuf' % nm)\n", (11232, 11257), False, 'import tarfile\n'), ((11316, 11343), 'cStringIO.StringIO', 'cStringIO.StringIO', (['confStr'], {}), '(confStr)\n', (11334, 11343), False, 'import cStringIO\n'), ((5040, 5081), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'shape', 'dtype': 'np.float32'}), '(shape=shape, dtype=np.float32)\n', (5050, 5081), True, 'import numpy as np\n'), ((12174, 12191), 'paddle.proto.ParameterConfig_pb2.ParameterConfig', 'ParameterConfig', ([], {}), '()\n', (12189, 12191), False, 'from paddle.proto.ParameterConfig_pb2 import ParameterConfig\n')]
|
"""
:: deftwit.forms ::
A source of truthyness for deftwit wtforms.
"""
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, Length
from deftwit.models import DB, User, Tweet
class GetUserForm(FlaskForm):
"""
A general class for a Twitter handle input form.
Inherits from flask_wtf.FlaskForm.
Flask-specific subclass of WTForms :class:`~wtforms.form.Form`.
"""
# Text field for user to input target Twitter handle
handle = StringField(
"Twitter Handle", validators=[DataRequired(), Length(min=2, max=15)]
)
# Submit button to add the user to the db
submit = SubmitField("Add User")
class PredictForm(FlaskForm):
"""
A general class for selecting two Twitter users and comparing them
based on a text input field.
Inherits from flask_wtf.FlaskForm.
Flask-specific subclass of WTForms :class:`~wtforms.form.Form`.
"""
# Get list of choices (users) from database
users = User.query.all()
# Create the selection fields - choice tuples defined using list comprehension
user_1 = SelectField(
"Twit #1", choices=[(user.handle, user.handle) for user in users],
)
user_2 = SelectField(
"Twit #2", choices=[(user.handle, user.handle) for user in users],
)
# TODO: create function that generates a random tweet
tweet_text = StringField(
"Tweet text", validators=[DataRequired(), Length(min=1, max=240)]
)
submit = SubmitField("Predict")
|
[
"wtforms.SelectField",
"wtforms.validators.Length",
"deftwit.models.User.query.all",
"wtforms.SubmitField",
"wtforms.validators.DataRequired"
] |
[((699, 722), 'wtforms.SubmitField', 'SubmitField', (['"""Add User"""'], {}), "('Add User')\n", (710, 722), False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((1052, 1068), 'deftwit.models.User.query.all', 'User.query.all', ([], {}), '()\n', (1066, 1068), False, 'from deftwit.models import DB, User, Tweet\n'), ((1166, 1244), 'wtforms.SelectField', 'SelectField', (['"""Twit #1"""'], {'choices': '[(user.handle, user.handle) for user in users]'}), "('Twit #1', choices=[(user.handle, user.handle) for user in users])\n", (1177, 1244), False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((1273, 1351), 'wtforms.SelectField', 'SelectField', (['"""Twit #2"""'], {'choices': '[(user.handle, user.handle) for user in users]'}), "('Twit #2', choices=[(user.handle, user.handle) for user in users])\n", (1284, 1351), False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((1550, 1572), 'wtforms.SubmitField', 'SubmitField', (['"""Predict"""'], {}), "('Predict')\n", (1561, 1572), False, 'from wtforms import StringField, SubmitField, SelectField\n'), ((595, 609), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (607, 609), False, 'from wtforms.validators import DataRequired, Length\n'), ((611, 632), 'wtforms.validators.Length', 'Length', ([], {'min': '(2)', 'max': '(15)'}), '(min=2, max=15)\n', (617, 632), False, 'from wtforms.validators import DataRequired, Length\n'), ((1490, 1504), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1502, 1504), False, 'from wtforms.validators import DataRequired, Length\n'), ((1506, 1528), 'wtforms.validators.Length', 'Length', ([], {'min': '(1)', 'max': '(240)'}), '(min=1, max=240)\n', (1512, 1528), False, 'from wtforms.validators import DataRequired, Length\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import re
import pyforms as app
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlList
from pyforms.controls import ControlCheckBox
from pybpodgui_plugin.models.setup.task_variable import TaskVariableWindow
from pybpodgui_api.models.setup.board_task import BoardTask
logger = logging.getLogger(__name__)
class BoardTaskWindow(BoardTask, BaseWidget):
"""
Define here which fields from the board_task model should appear on the setup configuration window.
The model fields shall be defined as UI components like text fields, buttons, combo boxes, etc.
You may also assign actions to these components.
.. seealso::
This class heavy relies on the corresponding API module.
:py:class:`pybpodgui_api.models.setup.board_task.BoardTask`
**Properties**
states
A list of task states associated with this BoardTask. States are defined on the task code.
events
A list of task events associated with this BoardTask. Events are defined on the task code.
variables
A list of task variables associated with this BoardTask. Variables are defined on the task code.
**Private attributes**
_states
:class:`pyforms.controls.ControlList`
UI list to show BoardTask states.
_events
:class:`pyforms.controls.ControlList`
UI list to show BoardTask events.
_vars
:class:`pyforms.controls.ControlList`
UI list to show BoardTask variables.
_sync_btn
:class:`pyforms.controls.ControlButton`
Button to sync variables with board. Pressing the button fires the event :meth:`BoardTaskWindow.sync_variables`.
_load_btn
:class:`pyforms.controls.ControlButton`
Button to read task variables from board. Pressing the button fires the event :meth:`BoardTaskWindow._BoardTaskWindow__load_task_details`.
_formset
Describe window fields organization to PyForms.
**Methods**
"""
def __init__(self, setup):
BaseWidget.__init__(self, "Variables config for {0}".format(setup.name))
self._var_is_being_added = False
self._updvars = ControlCheckBox('Update variables')
self._vars = ControlList('Variables',
add_function=self.__add_variable,
remove_function=self.__remove_variable)
BoardTask.__init__(self, setup)
self._vars.horizontal_headers = ['NAME', 'TYPE', 'VALUE']
self._vars.data_changed_event = self.__varslist_data_changed_evt
self._formset = ['_updvars', '_vars']
self._variable_rule = re.compile('^[A-Z0-9\_]+$')
@property
def update_variables(self):
return self._updvars.value
@update_variables.setter
def update_variables(self, value):
self._updvars.value = value
def create_variable(self, name=None, value=None, datatype='string'):
return TaskVariableWindow(self, name, value, datatype)
def __varslist_data_changed_evt(self, row, col, item):
# only verify if the list is being edited
if self._var_is_being_added is True:
return
if col == 0 and item is not None:
if not (self._variable_rule.match(item) and item.startswith('VAR_')):
self.critical("The name of the variable should start with VAR_, should be alphanumeric and upper case.",
"Error")
self._vars.set_value(
col, row,
'VAR_{0}'.format( self._vars.rows_count))
elif col == 2:
datatype_combo = self._vars.get_value(1, row)
datatype = datatype_combo.value if datatype_combo else None
if datatype == 'number' and isinstance(item, str) and not item.isnumeric():
self.message("The value should be numeric.", "Error")
self._vars.set_value(
col, row,
'0'
)
def __add_variable(self):
self._var_is_being_added = True
var = self.create_variable(
'VAR_{0}'.format(self._vars.rows_count),
'0'
)
self._var_is_being_added = False
def __remove_variable(self):
if self._vars.selected_row_index is not None:
var = self.variables[self._vars.selected_row_index]
self.variables.remove(var)
self._vars -= -1
def before_close(self):
return False
# Execute the application
if __name__ == "__main__":
app.start_app(BoardTaskWindow)
|
[
"pybpodgui_api.models.setup.board_task.BoardTask.__init__",
"pyforms.controls.ControlList",
"re.compile",
"pyforms.controls.ControlCheckBox",
"pybpodgui_plugin.models.setup.task_variable.TaskVariableWindow",
"logging.getLogger",
"pyforms.start_app"
] |
[((366, 393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (383, 393), False, 'import logging\n'), ((4724, 4754), 'pyforms.start_app', 'app.start_app', (['BoardTaskWindow'], {}), '(BoardTaskWindow)\n', (4737, 4754), True, 'import pyforms as app\n'), ((2322, 2357), 'pyforms.controls.ControlCheckBox', 'ControlCheckBox', (['"""Update variables"""'], {}), "('Update variables')\n", (2337, 2357), False, 'from pyforms.controls import ControlCheckBox\n'), ((2379, 2482), 'pyforms.controls.ControlList', 'ControlList', (['"""Variables"""'], {'add_function': 'self.__add_variable', 'remove_function': 'self.__remove_variable'}), "('Variables', add_function=self.__add_variable, remove_function=\n self.__remove_variable)\n", (2390, 2482), False, 'from pyforms.controls import ControlList\n'), ((2553, 2584), 'pybpodgui_api.models.setup.board_task.BoardTask.__init__', 'BoardTask.__init__', (['self', 'setup'], {}), '(self, setup)\n', (2571, 2584), False, 'from pybpodgui_api.models.setup.board_task import BoardTask\n'), ((2803, 2831), 're.compile', 're.compile', (['"""^[A-Z0-9\\\\_]+$"""'], {}), "('^[A-Z0-9\\\\_]+$')\n", (2813, 2831), False, 'import re\n'), ((3107, 3154), 'pybpodgui_plugin.models.setup.task_variable.TaskVariableWindow', 'TaskVariableWindow', (['self', 'name', 'value', 'datatype'], {}), '(self, name, value, datatype)\n', (3125, 3154), False, 'from pybpodgui_plugin.models.setup.task_variable import TaskVariableWindow\n')]
|
"""create table budget_item
Revision ID: 7b47983c2ea0
Revises: 89794c69ffab
Create Date: 2019-09-07 11:46:49.554912
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '7b47983c2ea0'
down_revision = '89794c69ffab'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('budget_item',
sa.Column('uid',
sa.String(36),
primary_key=True),
sa.Column('name',
sa.String(250),
nullable=False),
sa.Column('quantity',
sa.Integer,
nullable=False),
sa.Column('price',
sa.Float,
nullable=False),
sa.Column('total',
sa.Float,
nullable=False),
sa.Column('budget_uid',
sa.String(36),
sa.ForeignKey('budget.uid'),
nullable=False))
def downgrade():
op.drop_table('budget_item')
|
[
"alembic.op.drop_table",
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] |
[((1222, 1250), 'alembic.op.drop_table', 'op.drop_table', (['"""budget_item"""'], {}), "('budget_item')\n", (1235, 1250), False, 'from alembic import op\n'), ((641, 690), 'sqlalchemy.Column', 'sa.Column', (['"""quantity"""', 'sa.Integer'], {'nullable': '(False)'}), "('quantity', sa.Integer, nullable=False)\n", (650, 690), True, 'import sqlalchemy as sa\n'), ((772, 816), 'sqlalchemy.Column', 'sa.Column', (['"""price"""', 'sa.Float'], {'nullable': '(False)'}), "('price', sa.Float, nullable=False)\n", (781, 816), True, 'import sqlalchemy as sa\n'), ((898, 942), 'sqlalchemy.Column', 'sa.Column', (['"""total"""', 'sa.Float'], {'nullable': '(False)'}), "('total', sa.Float, nullable=False)\n", (907, 942), True, 'import sqlalchemy as sa\n'), ((426, 439), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (435, 439), True, 'import sqlalchemy as sa\n'), ((558, 572), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (567, 572), True, 'import sqlalchemy as sa\n'), ((1078, 1091), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (1087, 1091), True, 'import sqlalchemy as sa\n'), ((1123, 1150), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""budget.uid"""'], {}), "('budget.uid')\n", (1136, 1150), True, 'import sqlalchemy as sa\n')]
|
import numpy as np
import imageio
import os
AVAILABLE_IMAGES = ['barbara']
def _add_noise(img, sigma):
noise = np.random.normal(scale=sigma,
size=img.shape).astype(img.dtype)
return img + noise
def example_image(img_name, noise_std=0):
imgf = os.path.join('sparselandtools', 'applications', 'assets', img_name + '.png')
# read image
try:
img = imageio.imread(imgf)[:, :, 0].astype('float32')
except IndexError:
img = imageio.imread(imgf).astype('float32')
# add noise
img = _add_noise(img, sigma=noise_std)
return img
|
[
"imageio.imread",
"os.path.join",
"numpy.random.normal"
] |
[((290, 366), 'os.path.join', 'os.path.join', (['"""sparselandtools"""', '"""applications"""', '"""assets"""', "(img_name + '.png')"], {}), "('sparselandtools', 'applications', 'assets', img_name + '.png')\n", (302, 366), False, 'import os\n'), ((118, 163), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': 'img.shape'}), '(scale=sigma, size=img.shape)\n', (134, 163), True, 'import numpy as np\n'), ((408, 428), 'imageio.imread', 'imageio.imread', (['imgf'], {}), '(imgf)\n', (422, 428), False, 'import imageio\n'), ((493, 513), 'imageio.imread', 'imageio.imread', (['imgf'], {}), '(imgf)\n', (507, 513), False, 'import imageio\n')]
|
import mnist
import numpy as np
import pickle
import cnn
training_images = mnist.train_images()
training_labels = mnist.train_labels()
## uncomment below to train mnist images as RGB data
# import cv2
# training_images_rgb = []
# for i, image in enumerate(training_images):
# training_images_rgb.append(cv2.cvtColor(image, cv2.COLOR_GRAY2RGB))
# training_images = np.array(training_images_rgb)
classes = [x for x in range(10)]
# initialize
net = None
answer = input("Would you like to load a model? (enter 'y' to load): ")
should_load = answer == 'y'
if should_load:
filename = input("Enter a filename (without the extension): ")
pickle_in = open(f'{filename}.pickle','rb')
net = pickle.load(pickle_in)
else:
layers = [
cnn.layers.Conv(num_kernels=16),
cnn.layers.MaxPool(),
cnn.layers.SoftMax(num_classes=10),
]
net = cnn.CNN(layers)
# train
answer = input("Would you like to train? (enter 'y' to train): ")
should_train = answer == 'y'
if should_train:
net.train(training_images, training_labels, classes, num_epochs=5, rate=0.005)
# predict
answer = input("Would you like to test the model? (enter 'y' to test): ")
should_test = answer == 'y'
if should_test:
print('\n\n>>> Testing model...\n')
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]
num_correct = 0
for image, label in zip(test_images, test_labels):
prediction_index = net.predict(image)
prediction = classes[prediction_index]
correct_add = 1 if prediction == label else 0
num_correct += correct_add
num_tests = len(test_images)
percent_accurate = round(((num_correct / num_tests) * 100), 3)
print(f'Prediction accuracy ({num_tests} attempts): {percent_accurate}%\n')
# save model
answer = input("Would you like to save the model? (enter 'y' to save): ")
should_save = answer == 'y'
if should_save:
filename = input("Enter a filename (without the extension): ")
with open(f'{filename}.pickle','wb') as f:
pickle.dump(net, f)
|
[
"mnist.train_images",
"cnn.CNN",
"mnist.train_labels",
"pickle.dump",
"mnist.test_labels",
"cnn.layers.SoftMax",
"pickle.load",
"cnn.layers.MaxPool",
"mnist.test_images",
"cnn.layers.Conv"
] |
[((77, 97), 'mnist.train_images', 'mnist.train_images', ([], {}), '()\n', (95, 97), False, 'import mnist\n'), ((116, 136), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (134, 136), False, 'import mnist\n'), ((701, 723), 'pickle.load', 'pickle.load', (['pickle_in'], {}), '(pickle_in)\n', (712, 723), False, 'import pickle\n'), ((876, 891), 'cnn.CNN', 'cnn.CNN', (['layers'], {}), '(layers)\n', (883, 891), False, 'import cnn\n'), ((753, 784), 'cnn.layers.Conv', 'cnn.layers.Conv', ([], {'num_kernels': '(16)'}), '(num_kernels=16)\n', (768, 784), False, 'import cnn\n'), ((794, 814), 'cnn.layers.MaxPool', 'cnn.layers.MaxPool', ([], {}), '()\n', (812, 814), False, 'import cnn\n'), ((824, 858), 'cnn.layers.SoftMax', 'cnn.layers.SoftMax', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (842, 858), False, 'import cnn\n'), ((1283, 1302), 'mnist.test_images', 'mnist.test_images', ([], {}), '()\n', (1300, 1302), False, 'import mnist\n'), ((1328, 1347), 'mnist.test_labels', 'mnist.test_labels', ([], {}), '()\n', (1345, 1347), False, 'import mnist\n'), ((2049, 2068), 'pickle.dump', 'pickle.dump', (['net', 'f'], {}), '(net, f)\n', (2060, 2068), False, 'import pickle\n')]
|
from flask import Flask
from resume_builder.config import Configuration
app = Flask(__name__)
app.config.from_object(Configuration)
from resume_builder import routes,models
|
[
"flask.Flask"
] |
[((79, 94), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (84, 94), False, 'from flask import Flask\n')]
|